rk, gpu :
authorchenzhen <chenzhen@rock-chips.com>
Thu, 11 Dec 2014 07:11:57 +0000 (15:11 +0800)
committerchenzhen <chenzhen@rock-chips.com>
Fri, 12 Dec 2014 10:08:54 +0000 (18:08 +0800)
source code for device/rockchip/common/gpu/libMali-T760/mali_kbase.ko,
in branch rk/rk32/mid/5.0/develop,
commit 1b187041f11b7ca1d6c1490b934f09648f334a19.

114 files changed:
drivers/gpu/arm/midgard/Kbuild
drivers/gpu/arm/midgard/Kconfig
drivers/gpu/arm/midgard/mali_base_hwconfig.h
drivers/gpu/arm/midgard/mali_base_kernel.h
drivers/gpu/arm/midgard/mali_kbase.h
drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h
drivers/gpu/arm/midgard/mali_kbase_config.c
drivers/gpu/arm/midgard/mali_kbase_config.h
drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
drivers/gpu/arm/midgard/mali_kbase_context.c
drivers/gpu/arm/midgard/mali_kbase_core_linux.c
drivers/gpu/arm/midgard/mali_kbase_cpuprops.c
drivers/gpu/arm/midgard/mali_kbase_cpuprops.h
drivers/gpu/arm/midgard/mali_kbase_debug.c
drivers/gpu/arm/midgard/mali_kbase_debug.h
drivers/gpu/arm/midgard/mali_kbase_defs.h
drivers/gpu/arm/midgard/mali_kbase_devfreq.c [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_devfreq.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_device.c
drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_event.c
drivers/gpu/arm/midgard/mali_kbase_gator.h
drivers/gpu/arm/midgard/mali_kbase_gator_api.c [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_gator_api.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c
drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.h
drivers/gpu/arm/midgard/mali_kbase_gpuprops.c
drivers/gpu/arm/midgard/mali_kbase_gpuprops.h
drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h
drivers/gpu/arm/midgard/mali_kbase_hw.c
drivers/gpu/arm/midgard/mali_kbase_hw.h
drivers/gpu/arm/midgard/mali_kbase_instr.c
drivers/gpu/arm/midgard/mali_kbase_jd.c
drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_jm.c
drivers/gpu/arm/midgard/mali_kbase_jm.h
drivers/gpu/arm/midgard/mali_kbase_js.c
drivers/gpu/arm/midgard/mali_kbase_js.h
drivers/gpu/arm/midgard/mali_kbase_js_affinity.c
drivers/gpu/arm/midgard/mali_kbase_js_affinity.h
drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.c
drivers/gpu/arm/midgard/mali_kbase_js_ctx_attr.h
drivers/gpu/arm/midgard/mali_kbase_js_defs.h
drivers/gpu/arm/midgard/mali_kbase_js_policy.h
drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.c
drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.h
drivers/gpu/arm/midgard/mali_kbase_mem.c
drivers/gpu/arm/midgard/mali_kbase_mem.h
drivers/gpu/arm/midgard/mali_kbase_mem_alloc.c
drivers/gpu/arm/midgard/mali_kbase_mem_alloc.h
drivers/gpu/arm/midgard/mali_kbase_mem_alloc_carveout.c [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h
drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_mmu.c
drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_mmu_hw_direct.c [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_mmu_hw_direct.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_platform_fake.c
drivers/gpu/arm/midgard/mali_kbase_pm.c
drivers/gpu/arm/midgard/mali_kbase_pm.h
drivers/gpu/arm/midgard/mali_kbase_pm_always_on.c
drivers/gpu/arm/midgard/mali_kbase_pm_ca.c
drivers/gpu/arm/midgard/mali_kbase_pm_ca.h
drivers/gpu/arm/midgard/mali_kbase_pm_ca_fixed.c
drivers/gpu/arm/midgard/mali_kbase_pm_coarse_demand.c
drivers/gpu/arm/midgard/mali_kbase_pm_demand.c
drivers/gpu/arm/midgard/mali_kbase_pm_driver.c
drivers/gpu/arm/midgard/mali_kbase_pm_metrics.c
drivers/gpu/arm/midgard/mali_kbase_pm_metrics_dummy.c
drivers/gpu/arm/midgard/mali_kbase_pm_policy.c
drivers/gpu/arm/midgard/mali_kbase_pm_policy.h
drivers/gpu/arm/midgard/mali_kbase_power_actor.c [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_power_actor.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_kbase_replay.c
drivers/gpu/arm/midgard/mali_kbase_security.c
drivers/gpu/arm/midgard/mali_kbase_security.h
drivers/gpu/arm/midgard/mali_kbase_softjobs.c
drivers/gpu/arm/midgard/mali_kbase_sync.c
drivers/gpu/arm/midgard/mali_kbase_sync_user.c
drivers/gpu/arm/midgard/mali_kbase_trace_defs.h
drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c
drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h
drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h
drivers/gpu/arm/midgard/mali_kbase_uku.h
drivers/gpu/arm/midgard/mali_kbase_utility.c
drivers/gpu/arm/midgard/mali_linux_kbase_trace.h [new file with mode: 0755]
drivers/gpu/arm/midgard/mali_midg_regmap.h
drivers/gpu/arm/midgard/mali_timeline.h
drivers/gpu/arm/midgard/mali_uk.h
drivers/gpu/arm/midgard/malisw/mali_malisw.h
drivers/gpu/arm/midgard/malisw/mali_stdtypes.h
drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
drivers/gpu/arm/midgard/platform/rk/mali_kbase_dvfs.c
drivers/gpu/arm/midgard/platform/rk/mali_kbase_dvfs.h
drivers/gpu/arm/midgard/platform/rk/mali_kbase_platform.c
drivers/gpu/arm/midgard/platform/rk/mali_kbase_platform.h
drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h [new file with mode: 0755]
drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_vexpress.c
drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_cpu_vexpress.c
drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild [new file with mode: 0755]
drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h [new file with mode: 0755]
drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c [new file with mode: 0755]
drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h [new file with mode: 0755]
drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c
drivers/gpu/arm/midgard/platform/vexpress_virtex7_40mhz/mali_kbase_config_platform.h [new file with mode: 0755]
drivers/gpu/arm/midgard/platform/vexpress_virtex7_40mhz/mali_kbase_config_vexpress.c
drivers/gpu/arm/midgard/platform/vexpress_virtex7_40mhz/mali_kbase_cpu_vexpress.c
drivers/gpu/arm/midgard/sconscript

index 3cf2828da12071a0b6d05043a954d762d9730a9c..9c94a424e8027c00a366ebc3b50c73592c076fc3 100755 (executable)
@@ -1,5 +1,5 @@
 #
-# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2012,2014 ARM Limited. All rights reserved.
 #
 # This program is free software and is provided to you under the terms of the
 # GNU General Public License version 2 as published by the Free Software
@@ -15,7 +15,7 @@
 
 
 # Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r4p1-00rel0"
+MALI_RELEASE_NAME ?= "r5p0-02dev0"
 
 # Paths required for build
 KBASE_PATH = $(src)
@@ -78,6 +78,7 @@ SRC := \
        mali_kbase_mem.c \
        mali_kbase_mmu.c \
        mali_kbase_jd.c \
+       mali_kbase_jd_debugfs.c \
        mali_kbase_jm.c \
        mali_kbase_cpuprops.c \
        mali_kbase_gpuprops.c \
@@ -102,7 +103,6 @@ SRC := \
        mali_kbase_10969_workaround.c \
        mali_kbase_hw.c \
        mali_kbase_utility.c \
-       mali_kbase_mem_lowlevel.c \
        mali_kbase_debug.c \
        mali_kbase_trace_timeline.c \
        mali_kbase_mem_linux.c \
@@ -110,16 +110,20 @@ SRC := \
        mali_kbase_sync.c \
        mali_kbase_sync_user.c \
        mali_kbase_replay.c \
+       mali_kbase_mem_profile_debugfs.c \
+       mali_kbase_mmu_hw_direct.c \
+       mali_kbase_disjoint_events.c \
+       mali_kbase_gator_api.c
 
 ifeq ($(CONFIG_DEBUG_FS),y)
-SRC += mali_kbase_gpu_memory_debugfs.c
+       SRC += mali_kbase_gpu_memory_debugfs.c
 endif
 
 ifeq ($(MALI_CUSTOMER_RELEASE),0)
 SRC += \
-     mali_kbase_pm_ca_random.c \
-     mali_kbase_pm_demand_always_powered.c \
-     mali_kbase_pm_fast_start.c
+       mali_kbase_pm_ca_random.c \
+       mali_kbase_pm_demand_always_powered.c \
+       mali_kbase_pm_fast_start.c
 endif
 
 # Job Scheduler Policy: Completely Fair Scheduler
@@ -169,43 +173,62 @@ ifeq ($(CONFIG_MALI_PLATFORM_FAKE),y)
        ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS),y)
                SRC += platform/vexpress/mali_kbase_config_vexpress.c \
                platform/vexpress/mali_kbase_cpu_vexpress.c
+               ccflags-y += -I$(src)/platform/vexpress
        endif
 
        ifeq ($(CONFIG_MALI_PLATFORM_RTSM_VE),y)
                SRC += platform/rtsm_ve/mali_kbase_config_vexpress.c
+               ccflags-y += -I$(src)/platform/rtsm_ve
        endif
 
        ifeq ($(CONFIG_MALI_PLATFORM_JUNO),y)
                SRC += platform/juno/mali_kbase_config_vexpress.c
+               ccflags-y += -I$(src)/platform/juno
+       endif
+
+       ifeq ($(CONFIG_MALI_PLATFORM_JUNO_SOC),y)
+               SRC += platform/juno_soc/mali_kbase_config_vexpress.c
+               ccflags-y += -I$(src)/platform/juno_soc
+       endif
+
+       ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_1XV7_A57),y)
+               SRC += platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
+               ccflags-y += -I$(src)/platform/vexpress_1xv7_a57
        endif
 
        ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ),y)
                SRC += platform/vexpress_virtex7_40mhz/mali_kbase_config_vexpress.c \
                platform/vexpress_virtex7_40mhz/mali_kbase_cpu_vexpress.c
+               ccflags-y += -I$(src)/platform/vexpress_virtex7_40mhz
        endif
 
        ifeq ($(CONFIG_MALI_PLATFORM_VEXPRESS_6XVIRTEX7_10MHZ),y)
                SRC += platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_vexpress.c \
                platform/vexpress_6xvirtex7_10mhz/mali_kbase_cpu_vexpress.c
+               ccflags-y += -I$(src)/platform/vexpress_6xvirtex7_10mhz
        endif
 
        ifeq ($(CONFIG_MALI_PLATFORM_GOLDFISH),y)
                SRC += platform/goldfish/mali_kbase_config_goldfish.c
+               ccflags-y += -I$(src)/platform/goldfish
        endif
 
        ifeq ($(CONFIG_MALI_PLATFORM_PBX),y)
                SRC += platform/pbx/mali_kbase_config_pbx.c
+               ccflags-y += -I$(src)/platform/pbx
        endif
 
        ifeq ($(CONFIG_MALI_PLATFORM_PANDA),y)
                SRC += platform/panda/mali_kbase_config_panda.c
+               ccflags-y += -I$(src)/platform/panda
        endif
 
        ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y)
-       ifeq ($(CONFIG_MALI_MIDGARD),m)
        # remove begin and end quotes from the Kconfig string type
        platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME))
        MALI_PLATFORM_THIRDPARTY_DIR := platform/$(platform_name)
+       ccflags-y += -I$(src)/$(MALI_PLATFORM_THIRDPARTY_DIR)
+       ifeq ($(CONFIG_MALI_MIDGARD),m)
        include  $(src)/platform/$(platform_name)/Kbuild
        else ifeq ($(CONFIG_MALI_MIDGARD),y)
        obj-$(CONFIG_MALI_MIDGARD) += platform/
@@ -214,10 +237,11 @@ ifeq ($(CONFIG_MALI_PLATFORM_FAKE),y)
 endif # CONFIG_MALI_PLATFORM_FAKE=y
 
 ifeq ($(CONFIG_MALI_PLATFORM_THIRDPARTY),y)
-ifeq ($(CONFIG_MALI_MIDGARD),m)
 # remove begin and end quotes from the Kconfig string type
 platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_THIRDPARTY_NAME))
 MALI_PLATFORM_THIRDPARTY_DIR := platform/$(platform_name)
+ccflags-y += -I$(src)/$(MALI_PLATFORM_THIRDPARTY_DIR)
+ifeq ($(CONFIG_MALI_MIDGARD),m)
 include  $(src)/platform/$(platform_name)/Kbuild
 else ifeq ($(CONFIG_MALI_MIDGARD),y)
 obj-$(CONFIG_MALI_MIDGARD) += platform/
@@ -230,4 +254,12 @@ obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o
 # Tell the Linux build system to enable building of our .c files
 mali_kbase-y := $(SRC:.c=.o)
 
+mali_kbase-$(CONFIG_MALI_DEVFREQ) += mali_kbase_devfreq.o
+mali_kbase-$(CONFIG_MALI_POWER_ACTOR) += mali_kbase_power_actor.o
 
+ifneq ($(wildcard $(src)/internal/Kbuild),)
+ifeq ($(MALI_CUSTOMER_RELEASE),0)
+include  $(src)/internal/Kbuild
+mali_kbase-y += $(INTERNAL:.c=.o)
+endif
+endif
index 71be8bbe0d181981b90449d9dd765380bcd2103b..2f99fd5f1ef4220a2c8fafa2cf6c26dac98dbf51 100755 (executable)
@@ -72,6 +72,25 @@ menuconfig MALI_EXPERT
          Enabling this option and modifying the default settings may produce a driver with performance or
          other limitations.
 
+config MALI_DEVFREQ
+       bool "devfreq support for Mali"
+       depends on PM_DEVFREQ
+       help
+         Support devfreq for Mali.
+
+         Using the devfreq framework and, by default, the simpleondemand
+         governor, the frequency of Mali will be dynamically selected from the
+         available OPPs.
+
+config MALI_POWER_ACTOR
+       bool "Thermal API support for Mali"
+       depends on DEVFREQ_THERMAL && THERMAL_POWER_ACTOR
+       help
+         Support the thermal API for Mali.
+
+         This can be used with the power allocator thermal governor to
+         dynamically allocate the power budget to Mali.
+
 config MALI_DEBUG_SHADER_SPLIT_FS
        bool "Allow mapping of shader cores via sysfs"
        depends on MALI_MIDGARD && MALI_MIDGARD_DEBUG_SYS && MALI_EXPERT
index 5f5c945f3f87d142305ba29f8867418166662276..a2aeb5dde56ff9f67f73116646e7371a1a900330 100755 (executable)
  * List of all hw features.
  *
  */
-typedef enum base_hw_feature {
+enum base_hw_feature {
        /* Allow soft/hard stopping of job depending on job chain flag */
        BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
 
        /* Allow writes to SHADER_PWRON and TILER_PWRON registers while these cores are currently transitioning to OFF power state */
        BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
 
+       /* 33-bit VA space of the GPU (but still 40 bit PA) */
+       BASE_HW_FEATURE_33BIT_VA,
+
        /* The BASE_HW_FEATURE_END value must be the last feature listed in this enumeration
         * and must be the last value in each array that contains the list of features
         * for a particular HW version.
         */
        BASE_HW_FEATURE_END
-} base_hw_feature;
+};
 
-static const base_hw_feature base_hw_features_generic[] = {
+static const enum base_hw_feature base_hw_features_generic[] = {
        BASE_HW_FEATURE_END
-}; 
+};
 
-static const base_hw_feature base_hw_features_t76x[] = {
+static const enum base_hw_feature base_hw_features_t72x[] = {
+       BASE_HW_FEATURE_33BIT_VA,
+       BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t76x[] = {
        BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
        BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
        BASE_HW_FEATURE_END
@@ -61,7 +69,7 @@ static const base_hw_feature base_hw_features_t76x[] = {
  *
  */
 
-typedef enum base_hw_issue {
+enum base_hw_issue {
 
        /* The current version of the model doesn't support Soft-Stop */
        BASE_HW_ISSUE_5736,
@@ -143,6 +151,10 @@ typedef enum base_hw_issue {
        /* Fragments are clamped instead of discarded when fragment depth bound op is discard and depth datum source is shader. */
        BASE_HW_ISSUE_8634,
 
+       /* Arithmetic pipe mode which uses additional hardware to
+        * suppress the generation of Inf (Inf => MAX_FLOAT) and NaN (NaN = 0.0) not supported. */ 
+       BASE_HW_ISSUE_8778,
+
        /* Livelock issue using atomic instructions (particularly when using atomic_cmpxchg as a spinlock) */
        BASE_HW_ISSUE_8791,
 
@@ -284,9 +296,30 @@ typedef enum base_hw_issue {
        /* Partial 16xMSAA support */
        BASE_HW_ISSUE_T76X_26,
 
+       /* Forward pixel kill doesn't work with MRT */
+       BASE_HW_ISSUE_T76X_2121,
+
+       /* CRC not working with MFBD and more than one render target */
+       BASE_HW_ISSUE_T76X_2315,
+
+       /* Some indexed formats not supported for MFBD preload. */
+       BASE_HW_ISSUE_T76X_2686,
+
+       /* Must disable CRC if the tile output size is 8 bytes or less. */
+       BASE_HW_ISSUE_T76X_2712,
+
+       /* DBD clean pixel enable bit is reserved */
+       BASE_HW_ISSUE_T76X_2772,
+
+       /* AFBC is not supported for T76X beta. */
+       BASE_HW_ISSUE_T76X_2906,
+
        /* RTD doesn't specify the row stride for AFBC surfaces. */
        BASE_HW_ISSUE_T76X_3086,
 
+       /* Prevent MMU deadlock for T76X beta. */
+       BASE_HW_ISSUE_T76X_3285,
+
        /* Clear encoder state for a hard stopped fragment job which is AFBC
         * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and r0p1_50rel0
         */
@@ -300,21 +333,27 @@ typedef enum base_hw_issue {
        /* T76X cannot disable uses_discard even if depth and stencil are read-only. */
        BASE_HW_ISSUE_T76X_3700,
 
+       /* ST_TILEBUFFER is not supported on T76X-r0p0-beta */
+       BASE_HW_ISSUE_T76X_3759,
+
        /* Preload ignores any size or bounding box restrictions of the output image. */
        BASE_HW_ISSUE_T76X_3793,
 
+       /* Keep tiler module clock on to prevent GPU stall */
+       BASE_HW_ISSUE_T76X_3953,
+
        /* The BASE_HW_ISSUE_END value must be the last issue listed in this enumeration
         * and must be the last value in each array that contains the list of workarounds
         * for a particular HW version.
         */
        BASE_HW_ISSUE_END
-} base_hw_issue;
+};
 
 /**
  * Workarounds configuration for each HW revision
  */
 /* Mali T60x r0p0-15dev0 - 2011-W39-stable-9 */
-static const base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
        BASE_HW_ISSUE_6367,
        BASE_HW_ISSUE_6398,
        BASE_HW_ISSUE_6402,
@@ -337,6 +376,7 @@ static const base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
        BASE_HW_ISSUE_8456,
        BASE_HW_ISSUE_8564,
        BASE_HW_ISSUE_8634,
+       BASE_HW_ISSUE_8778,
        BASE_HW_ISSUE_8791,
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_8833,
@@ -376,13 +416,14 @@ static const base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
 };
 
 /* Mali T60x r0p0-00rel0 - 2011-W46-stable-13c */
-static const base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
        BASE_HW_ISSUE_6367,
        BASE_HW_ISSUE_6402,
        BASE_HW_ISSUE_6787,
        BASE_HW_ISSUE_7027,
        BASE_HW_ISSUE_8408,
        BASE_HW_ISSUE_8564,
+       BASE_HW_ISSUE_8778,
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_8975,
        BASE_HW_ISSUE_9010,
@@ -413,13 +454,14 @@ static const base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
 };
 
 /* Mali T60x r0p1 */
-static const base_hw_issue base_hw_issues_t60x_r0p1[] = {
+static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = {
        BASE_HW_ISSUE_6367,
        BASE_HW_ISSUE_6402,
        BASE_HW_ISSUE_6787,
        BASE_HW_ISSUE_7027,
        BASE_HW_ISSUE_8408,
        BASE_HW_ISSUE_8564,
+       BASE_HW_ISSUE_8778,
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_8975,
        BASE_HW_ISSUE_9010,
@@ -447,7 +489,7 @@ static const base_hw_issue base_hw_issues_t60x_r0p1[] = {
 };
 
 /* Mali T62x r0p1 */
-static const base_hw_issue base_hw_issues_t62x_r0p1[] = {
+static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = {
        BASE_HW_ISSUE_6402,
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
@@ -478,7 +520,7 @@ static const base_hw_issue base_hw_issues_t62x_r0p1[] = {
 };
 
 /* Mali T62x r1p0 */
-static const base_hw_issue base_hw_issues_t62x_r1p0[] = {
+static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = {
        BASE_HW_ISSUE_6402,
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
@@ -499,7 +541,7 @@ static const base_hw_issue base_hw_issues_t62x_r1p0[] = {
 };
 
 /* Mali T62x r1p1 */
-static const base_hw_issue base_hw_issues_t62x_r1p1[] =
+static const enum base_hw_issue base_hw_issues_t62x_r1p1[] =
 {
        BASE_HW_ISSUE_6402,
        BASE_HW_ISSUE_8803,
@@ -518,8 +560,35 @@ static const base_hw_issue base_hw_issues_t62x_r1p1[] =
        BASE_HW_ISSUE_END
 };
 
+/* Mali T76x r0p0 beta */
+static const enum base_hw_issue base_hw_issues_t76x_r0p0_beta[] = {
+       BASE_HW_ISSUE_8803,
+       BASE_HW_ISSUE_9435,
+       BASE_HW_ISSUE_10649,
+       BASE_HW_ISSUE_10821,
+       BASE_HW_ISSUE_10883,
+       BASE_HW_ISSUE_10946,
+       BASE_HW_ISSUE_10959,
+       BASE_HW_ISSUE_11020,
+       BASE_HW_ISSUE_11024,
+       BASE_HW_ISSUE_T76X_26,
+       BASE_HW_ISSUE_T76X_2121,
+       BASE_HW_ISSUE_T76X_2315,
+       BASE_HW_ISSUE_T76X_2686,
+       BASE_HW_ISSUE_T76X_2712,
+       BASE_HW_ISSUE_T76X_2772,
+       BASE_HW_ISSUE_T76X_2906,
+       BASE_HW_ISSUE_T76X_3285,
+       BASE_HW_ISSUE_T76X_3700,
+       BASE_HW_ISSUE_T76X_3759,
+       BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
+       /* List of hardware issues must end with BASE_HW_ISSUE_END */
+       BASE_HW_ISSUE_END
+};
+
 /* Mali T76x r0p0 */
-static const base_hw_issue base_hw_issues_t76x_r0p0[] = {
+static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = {
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
        BASE_HW_ISSUE_10649,
@@ -534,12 +603,13 @@ static const base_hw_issue base_hw_issues_t76x_r0p0[] = {
        BASE_HW_ISSUE_T76X_3556,
        BASE_HW_ISSUE_T76X_3700,
        BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
        /* List of hardware issues must end with BASE_HW_ISSUE_END */
        BASE_HW_ISSUE_END
 };
 
 /* Mali T76x r0p1 */
-static const base_hw_issue base_hw_issues_t76x_r0p1[] = {
+static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = {
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
        BASE_HW_ISSUE_10649,
@@ -554,12 +624,13 @@ static const base_hw_issue base_hw_issues_t76x_r0p1[] = {
        BASE_HW_ISSUE_T76X_3556,
        BASE_HW_ISSUE_T76X_3700,
        BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
        /* List of hardware issues must end with BASE_HW_ISSUE_END */
        BASE_HW_ISSUE_END
 };
 
 /* Mali T76x r0p1_50rel0 */
-static const base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
+static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
        BASE_HW_ISSUE_10649,
@@ -572,12 +643,13 @@ static const base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
        BASE_HW_ISSUE_T76X_3556,
        BASE_HW_ISSUE_T76X_3700,
        BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
        /* List of hardware issues must end with BASE_HW_ISSUE_END */
        BASE_HW_ISSUE_END
 };
 
 /* Mali T76x r0p2 */
-static const base_hw_issue base_hw_issues_t76x_r0p2[] = {
+static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = {
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
        BASE_HW_ISSUE_10649,
@@ -592,12 +664,13 @@ static const base_hw_issue base_hw_issues_t76x_r0p2[] = {
        BASE_HW_ISSUE_T76X_3556,
        BASE_HW_ISSUE_T76X_3700,
        BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
        /* List of hardware issues must end with BASE_HW_ISSUE_END */
        BASE_HW_ISSUE_END
 };
 
 /* Mali T76x r0p3 */
-static const base_hw_issue base_hw_issues_t76x_r0p3[] = {
+static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = {
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
        BASE_HW_ISSUE_10649,
@@ -610,12 +683,13 @@ static const base_hw_issue base_hw_issues_t76x_r0p3[] = {
        BASE_HW_ISSUE_T76X_3556,
        BASE_HW_ISSUE_T76X_3700,
        BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
        /* List of hardware issues must end with BASE_HW_ISSUE_END */
        BASE_HW_ISSUE_END
 };
 
 /* Mali T76x r1p0 */
-static const base_hw_issue base_hw_issues_t76x_r1p0[] = {
+static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = {
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
        BASE_HW_ISSUE_10649,
@@ -625,13 +699,14 @@ static const base_hw_issue base_hw_issues_t76x_r1p0[] = {
        BASE_HW_ISSUE_T76X_3086,
        BASE_HW_ISSUE_T76X_3700,
        BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
        /* List of hardware issues must end with BASE_HW_ISSUE_END */
        BASE_HW_ISSUE_END
 };
 
 
 /* Mali T72x r0p0 */
-static const base_hw_issue base_hw_issues_t72x_r0p0[] = {
+static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = {
        BASE_HW_ISSUE_6402,
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
@@ -648,7 +723,24 @@ static const base_hw_issue base_hw_issues_t72x_r0p0[] = {
 };
 
 /* Mali T72x r1p0 */
-static const base_hw_issue base_hw_issues_t72x_r1p0[] = {
+static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = {
+       BASE_HW_ISSUE_6402,
+       BASE_HW_ISSUE_8803,
+       BASE_HW_ISSUE_9435,
+       BASE_HW_ISSUE_10471,
+       BASE_HW_ISSUE_10649,
+       BASE_HW_ISSUE_10684,
+       BASE_HW_ISSUE_10797,
+       BASE_HW_ISSUE_10821,
+       BASE_HW_ISSUE_10883,
+       BASE_HW_ISSUE_10931,
+       BASE_HW_ISSUE_10946,
+       /* List of hardware issues must end with BASE_HW_ISSUE_END */
+       BASE_HW_ISSUE_END
+};
+
+/* Mali T72x r1p1 */
+static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = {
        BASE_HW_ISSUE_6402,
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_9435,
@@ -664,9 +756,45 @@ static const base_hw_issue base_hw_issues_t72x_r1p0[] = {
        BASE_HW_ISSUE_END
 };
 
+#ifdef MALI_INCLUDE_TFRX
+/* Mali TFRx r0p0 */
+static const enum base_hw_issue base_hw_issues_tFRx_r0p0[] = {
+       BASE_HW_ISSUE_8803,
+       BASE_HW_ISSUE_9435,
+       BASE_HW_ISSUE_10649,
+       BASE_HW_ISSUE_10821,
+       BASE_HW_ISSUE_10883,
+       BASE_HW_ISSUE_10946,
+       BASE_HW_ISSUE_T76X_3086,
+       BASE_HW_ISSUE_T76X_3700,
+       BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
+       /* List of hardware issues must end with BASE_HW_ISSUE_END */
+       BASE_HW_ISSUE_END
+};
+#endif /* MALI_INCLUDE_TFRX */
+
+#ifdef MALI_INCLUDE_TF2X
+/* Mali TF2x r0p0 */
+static const enum base_hw_issue base_hw_issues_tF2x_r0p0[] = {
+       BASE_HW_ISSUE_8803,
+       BASE_HW_ISSUE_9435,
+       BASE_HW_ISSUE_10649,
+       BASE_HW_ISSUE_10821,
+       BASE_HW_ISSUE_10883,
+       BASE_HW_ISSUE_10946,
+       BASE_HW_ISSUE_T76X_3086,
+       BASE_HW_ISSUE_T76X_3700,
+       BASE_HW_ISSUE_T76X_3793,
+       BASE_HW_ISSUE_T76X_3953,
+       /* List of hardware issues must end with BASE_HW_ISSUE_END */
+       BASE_HW_ISSUE_END
+};
+#endif /* MALI_INCLUDE_TF2X */
+
 /* Model configuration
  */
-static const base_hw_issue base_hw_issues_model_t72x[] =
+static const enum base_hw_issue base_hw_issues_model_t72x[] =
 {
        BASE_HW_ISSUE_5736,
        BASE_HW_ISSUE_6402, /* NOTE: Fix is present in model r125162 but is not enabled until RTL is fixed */
@@ -679,7 +807,7 @@ static const base_hw_issue base_hw_issues_model_t72x[] =
        BASE_HW_ISSUE_END
 };
 
-static const base_hw_issue base_hw_issues_model_t7xx[] =
+static const enum base_hw_issue base_hw_issues_model_t7xx[] =
 {
        BASE_HW_ISSUE_5736,
        BASE_HW_ISSUE_9275,
@@ -693,10 +821,11 @@ static const base_hw_issue base_hw_issues_model_t7xx[] =
        BASE_HW_ISSUE_END
 };
 
-static const base_hw_issue base_hw_issues_model_t6xx[] =
+static const enum base_hw_issue base_hw_issues_model_t6xx[] =
 {
        BASE_HW_ISSUE_5736,
        BASE_HW_ISSUE_6402, /* NOTE: Fix is present in model r125162 but is not enabled until RTL is fixed */
+       BASE_HW_ISSUE_8778,     
        BASE_HW_ISSUE_9275,
        BASE_HW_ISSUE_9435,
        BASE_HW_ISSUE_10472,
@@ -708,4 +837,34 @@ static const base_hw_issue base_hw_issues_model_t6xx[] =
        BASE_HW_ISSUE_END
 };
 
+#ifdef MALI_INCLUDE_TFRX
+static const enum base_hw_issue base_hw_issues_model_tFRx[] =
+{
+       BASE_HW_ISSUE_5736,
+       BASE_HW_ISSUE_9275,
+       BASE_HW_ISSUE_9435,
+       BASE_HW_ISSUE_10931,
+       BASE_HW_ISSUE_T76X_3086,
+       BASE_HW_ISSUE_T76X_3700,
+       BASE_HW_ISSUE_T76X_3793,
+       /* List of hardware issues must end with BASE_HW_ISSUE_END */
+       BASE_HW_ISSUE_END
+};
+#endif /* MALI_INCLUDE_TFRX */
+
+#ifdef MALI_INCLUDE_TF2X
+static const enum base_hw_issue base_hw_issues_model_tF2x[] =
+{
+       BASE_HW_ISSUE_5736,
+       BASE_HW_ISSUE_9275,
+       BASE_HW_ISSUE_9435,
+       BASE_HW_ISSUE_10931,
+       BASE_HW_ISSUE_T76X_3086,
+       BASE_HW_ISSUE_T76X_3700,
+       BASE_HW_ISSUE_T76X_3793,
+       /* List of hardware issues must end with BASE_HW_ISSUE_END */
+       BASE_HW_ISSUE_END
+};
+#endif /* MALI_INCLUDE_TF2X */
+
 #endif                         /* _BASE_HWCONFIG_H_ */
index 53e643116a27121fed9e8c75a29ccd36125594d5..dfa64d8726b9d0e6043cd0eee701d21f53248623 100755 (executable)
 #ifndef _BASE_KERNEL_H_
 #define _BASE_KERNEL_H_
 
-/* For now we support the legacy API as well as the new API */
-#define BASE_LEGACY_JD_API 1
+#ifndef __user
+#define __user
+#endif
+
+/* Support UK6 IOCTLS */
+#define BASE_LEGACY_UK6_SUPPORT 1
 
 typedef mali_addr64 base_mem_handle;
 
@@ -46,11 +50,6 @@ typedef mali_addr64 base_mem_handle;
 #define BASEP_JD_SEM_MASK_IN_WORD(x)    (1 << ((x) & (BASEP_JD_SEM_PER_WORD - 1)))
 #define BASEP_JD_SEM_ARRAY_SIZE         BASEP_JD_SEM_WORD_NR(BASE_JD_ATOM_COUNT)
 
-#if BASE_LEGACY_JD_API
-/* Size of the ring buffer */
-#define BASEP_JCTX_RB_NRPAGES           4
-#endif                         /* BASE_LEGACY_JD_API */
-
 #define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
 
 #define BASE_MAX_COHERENT_GROUPS 16
@@ -77,7 +76,7 @@ typedef mali_addr64 base_mem_handle;
 
 /** 32/64-bit neutral way to represent pointers */
 typedef union kbase_pointer {
-       void *value;      /**< client should store their pointers here */
+       void __user *value;       /**< client should store their pointers here */
        u32 compat_value; /**< 64-bit kernels should fetch value here when handling 32-bit clients */
        u64 sizer;        /**< Force 64-bit storage for all clients regardless */
 } kbase_pointer;
@@ -102,7 +101,7 @@ typedef union kbase_pointer {
  * heavily read by the CPU...
  * Other flags are only meaningful to a particular allocator.
  * More flags can be added to this list, as long as they don't clash
- * (see ::BASE_MEM_FLAGS_NR_BITS for the number of the first free bit).
+ * (see ::BASE_MEM_FLAGS_NR_TOTAL_BITS for the number of the first free bit).
  */
 typedef u32 base_mem_alloc_flags;
 
@@ -113,28 +112,57 @@ typedef u32 base_mem_alloc_flags;
  *
  */
 enum {
+/* IN */
        BASE_MEM_PROT_CPU_RD = (1U << 0),      /**< Read access CPU side */
        BASE_MEM_PROT_CPU_WR = (1U << 1),      /**< Write access CPU side */
        BASE_MEM_PROT_GPU_RD = (1U << 2),      /**< Read access GPU side */
        BASE_MEM_PROT_GPU_WR = (1U << 3),      /**< Write access GPU side */
-       BASE_MEM_PROT_GPU_EX = (1U << 4),      /**< Execute allowed on the GPU side */
+       BASE_MEM_PROT_GPU_EX = (1U << 4),      /**< Execute allowed on the GPU
+                                                   side */
 
        /* Note that the HINT flags are obsolete now. If you want the memory
         * to be cached on the CPU please use the BASE_MEM_CACHED_CPU flag
         */
-       BASE_MEM_HINT_CPU_RD = (1U << 5),      /**< Heavily read CPU side - OBSOLETE */
-       BASE_MEM_HINT_CPU_WR = (1U << 6),      /**< Heavily written CPU side - OBSOLETE */
-       BASE_MEM_HINT_GPU_RD = (1U << 7),      /**< Heavily read GPU side  - OBSOLETE */
-       BASE_MEM_HINT_GPU_WR = (1U << 8),      /**< Heavily written GPU side - OBSOLETE */
-
-       BASE_MEM_GROW_ON_GPF = (1U << 9),      /**< Grow backing store on GPU Page Fault */
+       BASE_MEM_HINT_CPU_RD = (1U << 5),      /**< Heavily read CPU side
+                                                   - OBSOLETE */
+       BASE_MEM_HINT_CPU_WR = (1U << 6),      /**< Heavily written CPU side
+                                                   - OBSOLETE */
+       BASE_MEM_HINT_GPU_RD = (1U << 7),      /**< Heavily read GPU side
+                                                   - OBSOLETE */
+       BASE_MEM_HINT_GPU_WR = (1U << 8),      /**< Heavily written GPU side
+                                                   - OBSOLETE */
+
+       BASE_MEM_GROW_ON_GPF = (1U << 9),      /**< Grow backing store on GPU
+                                                   Page Fault */
+
+       BASE_MEM_COHERENT_SYSTEM = (1U << 10), /**< Page coherence Outer
+                                                   shareable */
+       BASE_MEM_COHERENT_LOCAL = (1U << 11),  /**< Page coherence Inner
+                                                   shareable */
+       BASE_MEM_CACHED_CPU = (1U << 12),      /**< Should be cached on the
+                                                   CPU */
+
+/* IN/OUT */
+       BASE_MEM_SAME_VA = (1U << 13), /**< Must have same VA on both the GPU
+                                           and the CPU */
+/* OUT */
+       BASE_MEM_NEED_MMAP = (1U << 14) /**< Must call mmap to aquire a GPU
+                                            address for the alloc */
+};
 
-       BASE_MEM_COHERENT_SYSTEM = (1U << 10), /**< Page coherence Outer shareable */
-       BASE_MEM_COHERENT_LOCAL = (1U << 11),  /**< Page coherence Inner shareable */
-       BASE_MEM_CACHED_CPU = (1U << 12),      /**< Should be cached on the CPU */
+/**
+ * @brief Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the ::base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_INPUT_BITS  14
+#define BASE_MEM_FLAGS_NR_OUTPUT_BITS 1
+#define BASE_MEM_FLAGS_NR_TOTAL_BITS  ((BASE_MEM_FLAGS_NR_INPUT_BITS) + (BASE_MEM_FLAGS_NR_OUTPUT_BITS))
+#define BASE_MEM_FLAGS_NR_BITS 15
 
-       BASE_MEM_SAME_VA = (1U << 13) /**< Must have same VA on both the GPU and the CPU */
-};
+#if BASE_MEM_FLAGS_NR_TOTAL_BITS > BASE_MEM_FLAGS_NR_BITS
+#error "Too many flag bits, will require change in cmem"
+#endif
 
 /**
  * @brief Memory types supported by @a base_mem_import
@@ -178,12 +206,6 @@ typedef enum base_mem_import_type {
 /* Bit mask of cookies used for for memory allocation setup */
 #define KBASE_COOKIE_MASK  ~1UL /* bit 0 is reserved */
 
-/**
- * @brief Number of bits used as flags for base memory management
- *
- * Must be kept in sync with the ::base_mem_alloc_flags flags
- */
-#define BASE_MEM_FLAGS_NR_BITS  14
 
 /**
  * @brief Result codes of changing the size of the backing store allocated to a tmem region
@@ -208,7 +230,7 @@ typedef enum base_backing_threshold_status {
  * by the accessors.
  */
 typedef struct base_syncset {
-       basep_syncset basep_sset;
+       struct basep_syncset basep_sset;
 } base_syncset;
 
 /** @} end group base_user_api_memory_defered */
@@ -257,25 +279,6 @@ typedef struct base_fence {
        } basep;
 } base_fence;
 
-#if BASE_LEGACY_JD_API
-/**
- * @brief A pre- or post- dual dependency.
- *
- * This structure is used to express either
- * @li a single or dual pre-dependency (a job depending on one or two
- * other jobs),
- * @li a single or dual post-dependency (a job resolving a dependency
- * for one or two other jobs).
- *
- * The dependency itself is specified as a u8, where 0 indicates no
- * dependency. A single dependency is expressed by having one of the
- * dependencies set to 0.
- */
-typedef struct base_jd_dep {
-       u8 dep[2];      /**< pre/post dependencies */
-} base_jd_dep;
-#endif                         /* BASE_LEGACY_JD_API */
-
 /**
  * @brief Per-job data
  *
@@ -497,64 +500,55 @@ typedef u16 base_jd_core_req;
 #define BASEP_JD_REQ_ATOM_TYPE (~(BASEP_JD_REQ_RESERVED | BASE_JD_REQ_EVENT_ONLY_ON_FAILURE |\
                                BASE_JD_REQ_EXTERNAL_RESOURCES | BASEP_JD_REQ_EVENT_NEVER))
 
-#if BASE_LEGACY_JD_API
 /**
- * @brief A single job chain, with pre/post dependendencies and mem ops
- *
- * This structure is used to describe a single job-chain to be submitted
- * as part of a bag.
- * It contains all the necessary information for Base to take care of this
- * job-chain, including core requirements, priority, syncsets and
- * dependencies.
- */
-typedef struct base_jd_atom {
-       mali_addr64 jc;                     /**< job-chain GPU address */
-       base_jd_udata udata;                /**< user data */
-       base_jd_dep pre_dep;                /**< pre-dependencies */
-       base_jd_dep post_dep;               /**< post-dependencies */
-       base_jd_core_req core_req;          /**< core requirements */
-       u16 nr_syncsets;                    /**< nr of syncsets following the atom */
-       u16 nr_extres;                      /**< nr of external resources following the atom */
-
-       /** @brief Relative priority.
-        *
-        * A positive value requests a lower priority, whilst a negative value
-        * requests a higher priority. Only privileged processes may request a
-        * higher priority. For unprivileged processes, a negative priority will
-        * be interpreted as zero.
-        */
-       s8 prio;
+ * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
+ * handles retaining cores for power management and affinity management.
+ *
+ * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
+ * where lots of atoms could be submitted before powerup, and each has an
+ * affinity chosen that causes other atoms to have an affinity
+ * violation. Whilst the affinity was not causing violations at the time it
+ * was chosen, it could cause violations thereafter. For example, 1000 jobs
+ * could have had their affinity chosen during the powerup time, so any of
+ * those 1000 jobs could cause an affinity violation later on.
+ *
+ * The attack would otherwise occur because other atoms/contexts have to wait for:
+ * -# the currently running atoms (which are causing the violation) to
+ * finish
+ * -# and, the atoms that had their affinity chosen during powerup to
+ * finish. These are run preferrentially because they don't cause a
+ * violation, but instead continue to cause the violation in others.
+ * -# or, the attacker is scheduled out (which might not happen for just 2
+ * contexts)
+ *
+ * By re-choosing the affinity (which is designed to avoid violations at the
+ * time it's chosen), we break condition (2) of the wait, which minimizes the
+ * problem to just waiting for current jobs to finish (which can be bounded if
+ * the Job Scheduling Policy has a timer).
+ */
+enum kbase_atom_coreref_state {
+       /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
+       KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
+       /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
+       KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
+       /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
+       KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
+       /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
+       KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
+       /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
+       KBASE_ATOM_COREREF_STATE_READY
+};
 
-       /**
-        * @brief Device number to use, depending on @ref base_jd_core_req flags set.
-        *
-        * When BASE_JD_REQ_SPECIFIC_COHERENT_GROUP is set, a 'device' is one of
-        * the coherent core groups, and so this targets a particular coherent
-        * core-group. They are numbered from 0 to (mali_base_gpu_coherent_group_info::num_groups - 1),
-        * and the cores targeted by this device_nr will usually be those specified by
-        * (mali_base_gpu_coherent_group_info::group[device_nr].core_mask).
-        * Further, two atoms from different processes using the same \a device_nr
-        * at the same time will always target the same coherent core-group.
-        *
-        * There are exceptions to when the device_nr is ignored:
-        * - when any process in the system uses a BASE_JD_REQ_CS or
-        * BASE_JD_REQ_ONLY_COMPUTE atom that can run on all cores across all
-        * coherency groups (i.e. also does \b not have the
-        * BASE_JD_REQ_COHERENT_GROUP or BASE_JD_REQ_SPECIFIC_COHERENT_GROUP flags
-        * set). In this case, such atoms would block device_nr==1 being used due
-        * to restrictions on affinity, perhaps indefinitely. To ensure progress is
-        * made, the atoms targeted for device_nr 1 will instead be redirected to
-        * device_nr 0
-        * - During certain HW workarounds, such as BASE_HW_ISSUE_8987, where
-        * BASE_JD_REQ_ONLY_COMPUTE atoms must not use the same cores as other
-        * atoms. In this case, all atoms are targeted to device_nr == min( num_groups, 1 )
-        *
-        * Note that the 'device' number for a coherent coregroup cannot exceed
-        * (BASE_MAX_COHERENT_GROUPS - 1).
-        */
-       u8 device_nr;
-} base_jd_atom;
-#endif                         /* BASE_LEGACY_JD_API */
+enum kbase_jd_atom_state {
+       /** Atom is not used */
+       KBASE_JD_ATOM_STATE_UNUSED,
+       /** Atom is queued in JD */
+       KBASE_JD_ATOM_STATE_QUEUED,
+       /** Atom has been given to JS (is runnable/running) */
+       KBASE_JD_ATOM_STATE_IN_JS,
+       /** Atom has been completed, but not yet handed back to userspace */
+       KBASE_JD_ATOM_STATE_COMPLETED
+};
 
 typedef u8 base_atom_id; /**< Type big enough to store an atom number in */
 
@@ -565,7 +559,7 @@ struct base_dependency {
 
 typedef struct base_jd_atom_v2 {
        mali_addr64 jc;                     /**< job-chain GPU address */
-       base_jd_udata udata;                /**< user data */
+       struct base_jd_udata udata;                 /**< user data */
        kbase_pointer extres_list;          /**< list of external resources */
        u16 nr_extres;                      /**< nr of external resources */
        base_jd_core_req core_req;          /**< core requirements */
@@ -577,13 +571,20 @@ typedef struct base_jd_atom_v2 {
        u8 padding[5];
 } base_jd_atom_v2;
 
-#if BASE_LEGACY_JD_API
-/* Structure definition works around the fact that C89 doesn't allow arrays of size 0 */
-typedef struct basep_jd_atom_ss {
-       base_jd_atom atom;
-       base_syncset syncsets[1];
-} basep_jd_atom_ss;
-#endif                         /* BASE_LEGACY_JD_API */
+#ifdef BASE_LEGACY_UK6_SUPPORT
+struct base_jd_atom_v2_uk6 {
+       mali_addr64 jc;                     /**< job-chain GPU address */
+       struct base_jd_udata udata;                 /**< user data */
+       kbase_pointer extres_list;          /**< list of external resources */
+       u16 nr_extres;                      /**< nr of external resources */
+       base_jd_core_req core_req;          /**< core requirements */
+       base_atom_id pre_dep[2]; /**< pre-dependencies */
+       base_atom_id atom_number;           /**< unique number to identify the atom */
+       s8 prio;                            /**< priority - smaller is higher priority */
+       u8 device_nr;                       /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
+       u8 padding[7];
+};
+#endif
 
 typedef enum base_external_resource_access {
        BASE_EXT_RES_ACCESS_SHARED,
@@ -594,61 +595,6 @@ typedef struct base_external_resource {
        u64 ext_resource;
 } base_external_resource;
 
-#if BASE_LEGACY_JD_API
-/* Structure definition works around the fact that C89 doesn't allow arrays of size 0 */
-typedef struct basep_jd_atom_ext_res {
-       base_jd_atom atom;
-       base_external_resource resources[1];
-} basep_jd_atom_ext_res;
-
-static INLINE size_t base_jd_atom_size_ex(u32 syncset_count, u32 external_res_count)
-{
-       int size;
-
-       LOCAL_ASSERT(0 == syncset_count || 0 == external_res_count);
-
-       size = syncset_count ? offsetof(basep_jd_atom_ss, syncsets[0]) + (sizeof(base_syncset) * syncset_count) : external_res_count ? offsetof(basep_jd_atom_ext_res, resources[0]) + (sizeof(base_external_resource) * external_res_count) : sizeof(base_jd_atom);
-
-       /* Atom minimum size set to 64 bytes to ensure that the maximum
-        * number of atoms in the ring buffer is limited to 256 */
-       return MAX(64, size);
-}
-
-/**
- * @brief Atom size evaluator
- *
- * This function returns the size in bytes of a ::base_jd_atom
- * containing @a n syncsets. It must be used to compute the size of a
- * bag before allocation.
- *
- * @param nr the number of syncsets for this atom
- * @return the atom size in bytes
- */
-static INLINE size_t base_jd_atom_size(u32 nr)
-{
-       return base_jd_atom_size_ex(nr, 0);
-}
-
-/**
- * @brief Atom syncset accessor
- *
- * This function returns a pointer to the nth syncset allocated
- * together with an atom.
- *
- * @param[in] atom The allocated atom
- * @param     n    The number of the syncset to be returned
- * @return a pointer to the nth syncset.
- */
-static INLINE base_syncset *base_jd_get_atom_syncset(base_jd_atom *atom, u16 n)
-{
-       LOCAL_ASSERT(atom != NULL);
-       LOCAL_ASSERT(0 == (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES));
-       LOCAL_ASSERT(n <= atom->nr_syncsets);
-       return &((basep_jd_atom_ss *) atom)->syncsets[n];
-}
-#endif                         /* BASE_LEGACY_JD_API */
-
-
 /**
  * @brief Setter for a dependency structure
  *
@@ -704,17 +650,7 @@ static INLINE void base_jd_atom_dep_copy(const struct base_dependency* const_dep
  * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom
  * @param[in] fence The base fence object to trigger.
  */
-static INLINE void base_jd_fence_trigger_setup(base_jd_atom * const atom, base_fence *fence)
-{
-       LOCAL_ASSERT(atom);
-       LOCAL_ASSERT(fence);
-       LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE);
-       LOCAL_ASSERT(fence->basep.stream_fd >= 0);
-       atom->jc = (uintptr_t) fence;
-       atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER;
-}
-
-static INLINE void base_jd_fence_trigger_setup_v2(base_jd_atom_v2 *atom, base_fence *fence)
+static INLINE void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
 {
        LOCAL_ASSERT(atom);
        LOCAL_ASSERT(fence);
@@ -743,7 +679,7 @@ static INLINE void base_jd_fence_trigger_setup_v2(base_jd_atom_v2 *atom, base_fe
  * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom
  * @param[in] fence The base fence object to wait on
  */
-static INLINE void base_jd_fence_wait_setup(base_jd_atom * const atom, base_fence *fence)
+static INLINE void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
 {
        LOCAL_ASSERT(atom);
        LOCAL_ASSERT(fence);
@@ -752,34 +688,6 @@ static INLINE void base_jd_fence_wait_setup(base_jd_atom * const atom, base_fenc
        atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
 }
 
-static INLINE void base_jd_fence_wait_setup_v2(base_jd_atom_v2 *atom, base_fence *fence)
-{
-       LOCAL_ASSERT(atom);
-       LOCAL_ASSERT(fence);
-       LOCAL_ASSERT(fence->basep.fd >= 0);
-       atom->jc = (uintptr_t) fence;
-       atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
-}
-
-#if BASE_LEGACY_JD_API
-/**
- * @brief Atom external resource accessor
- *
- * This functions returns a pointer to the nth external resource tracked by the atom.
- *
- * @param[in] atom The allocated atom
- * @param     n    The number of the external resource to return a pointer to
- * @return a pointer to the nth external resource
- */
-static INLINE base_external_resource * base_jd_get_external_resource(base_jd_atom *atom, u16 n)
-{
-       LOCAL_ASSERT(atom != NULL);
-       LOCAL_ASSERT(BASE_JD_REQ_EXTERNAL_RESOURCES == (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES));
-       LOCAL_ASSERT(n <= atom->nr_extres);
-       return &((basep_jd_atom_ext_res *) atom)->resources[n];
-}
-#endif                         /* BASE_LEGACY_JD_API */
-
 /**
  * @brief External resource info initialization.
  *
@@ -790,7 +698,7 @@ static INLINE base_external_resource * base_jd_get_external_resource(base_jd_ato
  * @param     handle  The handle to the imported memory object
  * @param     access  The type of access requested
  */
-static INLINE void base_external_resource_init(base_external_resource * res, base_import_handle handle, base_external_resource_access access)
+static INLINE void base_external_resource_init(struct base_external_resource * res, struct base_import_handle handle, base_external_resource_access access)
 {
        mali_addr64 address;
        address = handle.basep.handle;
@@ -802,24 +710,6 @@ static INLINE void base_external_resource_init(base_external_resource * res, bas
        res->ext_resource = address | (access & LOCAL_PAGE_LSB);
 }
 
-#if BASE_LEGACY_JD_API
-/**
- * @brief Next atom accessor
- *
- * This function returns a pointer to the next allocated atom. It
- * relies on the fact that the current atom has been correctly
- * initialized (relies on the base_jd_atom::nr_syncsets field).
- *
- * @param[in] atom The allocated atom
- * @return a pointer to the next atom.
- */
-static INLINE base_jd_atom *base_jd_get_next_atom(base_jd_atom *atom)
-{
-       LOCAL_ASSERT(atom != NULL);
-       return (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) ? (base_jd_atom *) base_jd_get_external_resource(atom, atom->nr_extres) : (base_jd_atom *) base_jd_get_atom_syncset(atom, atom->nr_syncsets);
-}
-#endif                         /* BASE_LEGACY_JD_API */
-
 /**
  * @brief Job chain event code bits
  * Defines the bits used to create ::base_jd_event_code
@@ -976,21 +866,14 @@ typedef enum base_jd_event_code {
  * been completed (ie all contained job-chains have been completed).
  * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used
  */
-#if BASE_LEGACY_JD_API
-typedef struct base_jd_event {
-       base_jd_event_code event_code;      /**< event code */
-       void *data;                         /**< event specific data */
-} base_jd_event;
-#endif
-
 typedef struct base_jd_event_v2 {
-       base_jd_event_code event_code;      /**< event code */
-       base_atom_id atom_number;           /**< the atom number that has completed */
-       base_jd_udata udata;                /**< user data */
+       base_jd_event_code event_code;  /**< event code */
+       base_atom_id atom_number;       /**< the atom number that has completed */
+       struct base_jd_udata udata;     /**< user data */
 } base_jd_event_v2;
 
 /**
- * Padding required to ensure that the @ref base_dump_cpu_gpu_counters structure fills
+ * Padding required to ensure that the @ref struct base_dump_cpu_gpu_counters structure fills
  * a full cache line.
  */
 
@@ -1656,7 +1539,7 @@ typedef struct base_cpu_id_props
        u8 valid;  
 
        u8 padding[1];
-}base_cpu_id_props;
+} base_cpu_id_props;
 
 
 /** @brief Platform Dynamic CPU properties structure */
@@ -1716,7 +1599,7 @@ typedef struct base_cpu_props {
        /**
         * CPU ID detailed info
         */
-       base_cpu_id_props cpu_id;
+       struct base_cpu_id_props cpu_id;
 
        u32 padding;
 } base_cpu_props;
index 5fd4b847a86d41bb15d799ef9f76bd6f65df5b4d..f1894c22906d38a28b55423263f91d998c9de9db 100755 (executable)
@@ -53,7 +53,9 @@
 #include "mali_kbase_mem.h"
 #include "mali_kbase_security.h"
 #include "mali_kbase_utility.h"
-#include <mali_kbase_gpu_memory_debugfs.h>
+#include "mali_kbase_gpu_memory_debugfs.h"
+#include "mali_kbase_mem_profile_debugfs.h"
+#include "mali_kbase_jd_debugfs.h"
 #include "mali_kbase_cpuprops.h"
 #include "mali_kbase_gpuprops.h"
 #ifdef CONFIG_GPU_TRACEPOINTS
@@ -70,7 +72,7 @@
  * @defgroup base_kbase_api Kernel-side Base (KBase) APIs
  */
 
-kbase_device *kbase_device_alloc(void);
+struct kbase_device *kbase_device_alloc(void);
 /*
 * note: configuration attributes member of kbdev needs to have
 * been setup before calling kbase_device_init
@@ -84,12 +86,12 @@ const struct list_head *kbase_dev_list_get(void);
 /* API to release the device list semaphore */
 void kbase_dev_list_put(const struct list_head *dev_list);
 
-mali_error kbase_device_init(kbase_device * const kbdev);
-void kbase_device_term(kbase_device *kbdev);
-void kbase_device_free(kbase_device *kbdev);
-int kbase_device_has_feature(kbase_device *kbdev, u32 feature);
-kbase_midgard_type kbase_device_get_type(kbase_device *kbdev);
+mali_error kbase_device_init(struct kbase_device * const kbdev);
+void kbase_device_term(struct kbase_device *kbdev);
+void kbase_device_free(struct kbase_device *kbdev);
+int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
 struct kbase_device *kbase_find_device(int minor);     /* Only needed for gator integration */
+void kbase_release_device(struct kbase_device *kbdev);
 
 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value);
 
@@ -100,78 +102,96 @@ u32 kbase_get_profiling_control(struct kbase_device *kbdev, u32 control);
  *
  * @param kbdev     The kbase device
  */
-void kbase_synchronize_irqs(kbase_device *kbdev);
-
-kbase_context *kbase_create_context(kbase_device *kbdev);
-void kbase_destroy_context(kbase_context *kctx);
-mali_error kbase_context_set_create_flags(kbase_context *kctx, u32 flags);
-
-mali_error kbase_instr_hwcnt_setup(kbase_context *kctx, kbase_uk_hwcnt_setup *setup);
-mali_error kbase_instr_hwcnt_enable(kbase_context *kctx, kbase_uk_hwcnt_setup *setup);
-mali_error kbase_instr_hwcnt_disable(kbase_context *kctx);
-mali_error kbase_instr_hwcnt_clear(kbase_context *kctx);
-mali_error kbase_instr_hwcnt_dump(kbase_context *kctx);
-mali_error kbase_instr_hwcnt_dump_irq(kbase_context *kctx);
-mali_bool kbase_instr_hwcnt_dump_complete(kbase_context *kctx, mali_bool * const success);
-void kbase_instr_hwcnt_suspend(kbase_device *kbdev);
-void kbase_instr_hwcnt_resume(kbase_device *kbdev);
+void kbase_synchronize_irqs(struct kbase_device *kbdev);
+void kbase_synchronize_irqs(struct kbase_device *kbdev);
+
+struct kbase_context *kbase_create_context(struct kbase_device *kbdev);
+void kbase_destroy_context(struct kbase_context *kctx);
+mali_error kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags);
+
+mali_error kbase_instr_hwcnt_setup(struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup);
+mali_error kbase_instr_hwcnt_enable(struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup);
+mali_error kbase_instr_hwcnt_disable(struct kbase_context *kctx);
+mali_error kbase_instr_hwcnt_clear(struct kbase_context *kctx);
+mali_error kbase_instr_hwcnt_dump(struct kbase_context *kctx);
+mali_error kbase_instr_hwcnt_dump_irq(struct kbase_context *kctx);
+mali_bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx, mali_bool * const success);
+void kbase_instr_hwcnt_suspend(struct kbase_device *kbdev);
+void kbase_instr_hwcnt_resume(struct kbase_device *kbdev);
 
 void kbasep_cache_clean_worker(struct work_struct *data);
-void kbase_clean_caches_done(kbase_device *kbdev);
+void kbase_clean_caches_done(struct kbase_device *kbdev);
 
 /**
  * The GPU has completed performance count sampling successfully.
  */
-void kbase_instr_hwcnt_sample_done(kbase_device *kbdev);
-
-mali_error kbase_jd_init(kbase_context *kctx);
-void kbase_jd_exit(kbase_context *kctx);
-mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *user_bag);
-void kbase_jd_done(kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev);
+
+mali_error kbase_jd_init(struct kbase_context *kctx);
+void kbase_jd_exit(struct kbase_context *kctx);
+#ifdef BASE_LEGACY_UK6_SUPPORT
+mali_error kbase_jd_submit(struct kbase_context *kctx,
+               const struct kbase_uk_job_submit *submit_data,
+               int uk6_atom);
+#else
+mali_error kbase_jd_submit(struct kbase_context *kctx,
+               const struct kbase_uk_job_submit *submit_data);
+#endif
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
                    kbasep_js_atom_done_code done_code);
-void kbase_jd_cancel(kbase_device *kbdev, kbase_jd_atom *katom);
-void kbase_jd_zap_context(kbase_context *kctx);
-mali_bool jd_done_nolock(kbase_jd_atom *katom);
-void kbase_jd_free_external_resources(kbase_jd_atom *katom);
-mali_bool jd_submit_atom(kbase_context *kctx,
-                        const base_jd_atom_v2 *user_atom,
-                        kbase_jd_atom *katom);
-
-mali_error kbase_job_slot_init(kbase_device *kbdev);
-void kbase_job_slot_halt(kbase_device *kbdev);
-void kbase_job_slot_term(kbase_device *kbdev);
-void kbase_job_done(kbase_device *kbdev, u32 done);
-void kbase_job_zap_context(kbase_context *kctx);
-
-void kbase_job_slot_softstop(kbase_device *kbdev, int js, kbase_jd_atom *target_katom);
-void kbase_job_slot_hardstop(kbase_context *kctx, int js, kbase_jd_atom *target_katom);
-
-void kbase_event_post(kbase_context *ctx, kbase_jd_atom *event);
-int kbase_event_dequeue(kbase_context *ctx, base_jd_event_v2 *uevent);
-int kbase_event_pending(kbase_context *ctx);
-mali_error kbase_event_init(kbase_context *kctx);
-void kbase_event_close(kbase_context *kctx);
-void kbase_event_cleanup(kbase_context *kctx);
-void kbase_event_wakeup(kbase_context *kctx);
-
-int kbase_process_soft_job(kbase_jd_atom *katom);
-mali_error kbase_prepare_soft_job(kbase_jd_atom *katom);
-void kbase_finish_soft_job(kbase_jd_atom *katom);
-void kbase_cancel_soft_job(kbase_jd_atom *katom);
-void kbase_resume_suspended_soft_jobs(kbase_device *kbdev);
-
-int kbase_replay_process(kbase_jd_atom *katom);
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
+void kbase_jd_zap_context(struct kbase_context *kctx);
+mali_bool jd_done_nolock(struct kbase_jd_atom *katom);
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
+mali_bool jd_submit_atom(struct kbase_context *kctx,
+                        const struct base_jd_atom_v2 *user_atom,
+                        struct kbase_jd_atom *katom);
+
+mali_error kbase_job_slot_init(struct kbase_device *kbdev);
+void kbase_job_slot_halt(struct kbase_device *kbdev);
+void kbase_job_slot_term(struct kbase_device *kbdev);
+void kbase_job_done(struct kbase_device *kbdev, u32 done);
+void kbase_job_zap_context(struct kbase_context *kctx);
+
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
+               struct kbase_jd_atom *target_katom);
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+               struct kbase_jd_atom *target_katom, u32 sw_flags);
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+               struct kbase_jd_atom *target_katom);
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+               u16 core_reqs, struct kbase_jd_atom *target_katom);
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+               struct kbase_jd_atom *target_katom);
+
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
+int kbase_event_pending(struct kbase_context *ctx);
+mali_error kbase_event_init(struct kbase_context *kctx);
+void kbase_event_close(struct kbase_context *kctx);
+void kbase_event_cleanup(struct kbase_context *kctx);
+void kbase_event_wakeup(struct kbase_context *kctx);
+
+int kbase_process_soft_job(struct kbase_jd_atom *katom);
+mali_error kbase_prepare_soft_job(struct kbase_jd_atom *katom);
+void kbase_finish_soft_job(struct kbase_jd_atom *katom);
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
+
+bool kbase_replay_process(struct kbase_jd_atom *katom);
 
 /* api used internally for register access. Contains validation and tracing */
-void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *kctx);
-u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx);
-void kbase_device_trace_register_access(kbase_context *kctx, kbase_reg_access_type type, u16 reg_offset, u32 reg_value);
-void kbase_device_trace_buffer_install(kbase_context *kctx, u32 *tb, size_t size);
-void kbase_device_trace_buffer_uninstall(kbase_context *kctx);
+void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value, struct kbase_context *kctx);
+u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset, struct kbase_context *kctx);
+void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value);
+void kbase_device_trace_buffer_install(struct kbase_context *kctx, u32 *tb, size_t size);
+void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx);
 
 /* api to be ported per OS, only need to do the raw register access */
-void kbase_os_reg_write(kbase_device *kbdev, u16 offset, u32 value);
-u32 kbase_os_reg_read(kbase_device *kbdev, u16 offset);
+void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value);
+u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset);
+
+void kbasep_as_do_poke(struct work_struct *work);
 
 /** Report a GPU fault.
  *
@@ -181,7 +201,7 @@ u32 kbase_os_reg_read(kbase_device *kbdev, u16 offset);
  * @param kbdev     The kbase device that the GPU fault occurred from.
  * @param multiple  Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS was also set
  */
-void kbase_report_gpu_fault(kbase_device *kbdev, int multiple);
+void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple);
 
 /** Kill all jobs that are currently running from a context
  *
@@ -189,7 +209,7 @@ void kbase_report_gpu_fault(kbase_device *kbdev, int multiple);
  *
  * @param kctx      The context to kill jobs from
  */
-void kbase_job_kill_jobs_from_context(kbase_context *kctx);
+void kbase_job_kill_jobs_from_context(struct kbase_context *kctx);
 
 /**
  * GPU interrupt handler
@@ -199,7 +219,7 @@ void kbase_job_kill_jobs_from_context(kbase_context *kctx);
  * @param kbdev The kbase device to handle an IRQ for
  * @param val   The value of the GPU IRQ status register which triggered the call
  */
-void kbase_gpu_interrupt(kbase_device *kbdev, u32 val);
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val);
 
 /**
  * Prepare for resetting the GPU.
@@ -211,7 +231,7 @@ void kbase_gpu_interrupt(kbase_device *kbdev, u32 val);
  *
  * @return See description
  */
-mali_bool kbase_prepare_to_reset_gpu(kbase_device *kbdev);
+mali_bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
 
 /**
  * Pre-locked version of @a kbase_prepare_to_reset_gpu.
@@ -221,7 +241,7 @@ mali_bool kbase_prepare_to_reset_gpu(kbase_device *kbdev);
  *
  * @see kbase_prepare_to_reset_gpu
  */
-mali_bool kbase_prepare_to_reset_gpu_locked(kbase_device *kbdev);
+mali_bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
 
 /** Reset the GPU
  *
@@ -231,7 +251,7 @@ mali_bool kbase_prepare_to_reset_gpu_locked(kbase_device *kbdev);
  * After this function is called (or not called if kbase_prepare_to_reset_gpu returned MALI_FALSE),
  * the caller should wait for kbdev->reset_waitq to be signalled to know when the reset has completed.
  */
-void kbase_reset_gpu(kbase_device *kbdev);
+void kbase_reset_gpu(struct kbase_device *kbdev);
 
 /**
  * Pre-locked version of @a kbase_reset_gpu.
@@ -241,7 +261,7 @@ void kbase_reset_gpu(kbase_device *kbdev);
  *
  * @see kbase_reset_gpu
  */
-void kbase_reset_gpu_locked(kbase_device *kbdev);
+void kbase_reset_gpu_locked(struct kbase_device *kbdev);
 
 /** Returns the name associated with a Mali exception code
  *
@@ -268,7 +288,7 @@ static INLINE mali_bool kbase_pm_is_suspending(struct kbase_device *kbdev) {
  * Return the atom's ID, as was originally supplied by userspace in
  * base_jd_atom_v2::atom_number
  */
-static INLINE int kbase_jd_atom_id(kbase_context *kctx, kbase_jd_atom *katom)
+static INLINE int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
        int result;
        KBASE_DEBUG_ASSERT(kctx);
@@ -280,7 +300,90 @@ static INLINE int kbase_jd_atom_id(kbase_context *kctx, kbase_jd_atom *katom)
        return result;
 }
 
-#if KBASE_TRACE_ENABLE != 0
+/**
+ * Initialize the disjoint state
+ *
+ * The disjoint event count and state are both set to zero.
+ *
+ * Disjoint functions usage:
+ *
+ * The disjoint event count should be incremented whenever a disjoint event occurs.
+ *
+ * There are several cases which are regarded as disjoint behavior. Rather than just increment
+ * the counter during disjoint events we also increment the counter when jobs may be affected
+ * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
+ *
+ * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying
+ * (as part of the replay workaround). Increasing the disjoint state also increases the count of
+ * disjoint events.
+ *
+ * The disjoint state is then used to increase the count of disjoint events during job submission
+ * and job completion. Any atom submitted or completed while the disjoint state is greater than
+ * zero is regarded as a disjoint event.
+ *
+ * The disjoint event counter is also incremented immediately whenever a job is soft stopped
+ * and during context creation.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_init(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events
+ * called when a disjoint event has happened
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event(struct kbase_device *kbdev);
+
+/**
+ * Increase the count of disjoint events only if the GPU is in a disjoint state
+ *
+ * This should be called when something happens which could be disjoint if the GPU
+ * is in a disjoint state. The state refcount keeps track of this.
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev);
+
+/**
+ * Returns the count of disjoint events
+ *
+ * @param kbdev The kbase device
+ * @return the count of disjoint events
+ */
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
+
+/**
+ * Increment the refcount state indicating that the GPU is in a disjoint state.
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
+ * should be called
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_up(struct kbase_device *kbdev);
+
+/**
+ * Decrement the refcount state
+ *
+ * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
+ *
+ * Called after @ref kbase_disjoint_state_up once the disjoint state is over
+ *
+ * @param kbdev The kbase device
+ */
+void kbase_disjoint_state_down(struct kbase_device *kbdev);
+
+/**
+ * If a job is soft stopped and the number of contexts is >= this value
+ * it is reported as a disjoint event
+ */
+#define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
+
+#if KBASE_TRACE_ENABLE
+#ifndef CONFIG_MALI_SYSTEM_TRACE
 /** Add trace values about a job-slot
  *
  * @note Any functions called through this macro will still be evaluated in
@@ -349,42 +452,42 @@ static INLINE int kbase_jd_atom_id(kbase_context *kctx, kbase_jd_atom *katom)
        kbasep_trace_dump(kbdev)
 
 /** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
-void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
 /** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
-void kbasep_trace_clear(kbase_device *kbdev);
-#else
-#ifdef CONFIG_MALI_SYSTEM_TRACE
+void kbasep_trace_clear(struct kbase_device *kbdev);
+#else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
 /* Dispatch kbase trace events as system trace events */
 #include <mali_linux_kbase_trace.h>
-#define KBASE_TRACE_ADD_SLOT( kbdev, code, ctx, katom, gpu_addr, jobslot )\
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
        trace_mali_##code(jobslot, 0)
 
-#define KBASE_TRACE_ADD_SLOT_INFO( kbdev, code, ctx, katom, gpu_addr, jobslot, info_val )\
+#define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
        trace_mali_##code(jobslot, info_val)
 
-#define KBASE_TRACE_ADD_REFCOUNT( kbdev, code, ctx, katom, gpu_addr, refcount )\
+#define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
        trace_mali_##code(refcount, 0)
 
-#define KBASE_TRACE_ADD_REFCOUNT_INFO( kbdev, code, ctx, katom, gpu_addr, refcount, info_val )\
+#define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
        trace_mali_##code(refcount, info_val)
 
-#define KBASE_TRACE_ADD( kbdev, code, ctx, katom, gpu_addr, info_val )\
+#define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\
        trace_mali_##code(gpu_addr, info_val)
 
-#define KBASE_TRACE_CLEAR( kbdev )\
-       do{\
+#define KBASE_TRACE_CLEAR(kbdev)\
+       do {\
                CSTD_UNUSED(kbdev);\
                CSTD_NOP(0);\
-       }while(0)
-#define KBASE_TRACE_DUMP( kbdev )\
-       do{\
+       } while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+       do {\
                CSTD_UNUSED(kbdev);\
                CSTD_NOP(0);\
-       }while(0)
+       } while (0)
 
-#else /* CONFIG_MALI_SYSTEM_TRACE */
-#define KBASE_TRACE_ADD_SLOT( kbdev, code, ctx, katom, gpu_addr, jobslot )\
-       do{\
+#endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
+#else
+#define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
+       do {\
                CSTD_UNUSED(kbdev);\
                CSTD_NOP(code);\
                CSTD_UNUSED(ctx);\
@@ -436,20 +539,19 @@ void kbasep_trace_clear(kbase_device *kbdev);
                CSTD_UNUSED(katom);\
                CSTD_UNUSED(val);\
                CSTD_NOP(0);\
-       }while(0)
+       } while (0)
 
-#define KBASE_TRACE_CLEAR( kbdev )\
-       do{\
+#define KBASE_TRACE_CLEAR(kbdev)\
+       do {\
                CSTD_UNUSED(kbdev);\
                CSTD_NOP(0);\
-       }while(0)
-#define KBASE_TRACE_DUMP( kbdev )\
-       do{\
+       } while (0)
+#define KBASE_TRACE_DUMP(kbdev)\
+       do {\
                CSTD_UNUSED(kbdev);\
                CSTD_NOP(0);\
-       }while(0)
-#endif /* CONFIG_MALI_SYSTEM_TRACE */
-#endif
+       } while (0)
+#endif /* KBASE_TRACE_ENABLE */
 /** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
-void kbasep_trace_dump(kbase_device *kbdev);
+void kbasep_trace_dump(struct kbase_device *kbdev);
 #endif
index 6d52dcfcf362487898887d038b1eca671dff93be..8048fca78952da59228bb0b83575c4cd1473c21e 100755 (executable)
@@ -14,8 +14,9 @@
  */
 
 
-
+#include <linux/dma-mapping.h>
 #include <mali_kbase.h>
+#include <mali_kbase_10969_workaround.h>
 
 /* This function is used to solve an HW issue with single iterator GPUs.
  * If a fragment job is soft-stopped on the edge of its bounding box, can happen that the
 /* Word 9: Maximum Tile Coordinates */
 #define FRAG_JOB_DESC_MAX_TILE_COORD_WORD 9
 
-int kbasep_10969_workaround_clamp_coordinates(kbase_jd_atom *katom)
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
 {
        struct device *dev = katom->kctx->kbdev->dev;
        u32   clamped = 0;
+       struct kbase_va_region *region;
+       phys_addr_t *page_array;
+       u64 page_index;
+       u32 offset = katom->jc & (~PAGE_MASK);
+       u32 *page_1 = NULL;
+       u32 *page_2 = NULL;
+       u32   job_header[JOB_HEADER_SIZE_IN_WORDS];
+       void *dst = job_header;
+       u32 minX, minY, maxX, maxY;
+       u32 restartX, restartY;
+       struct page *p;
+       u32 copy_size;
+
        dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n");
-       if (katom->core_req & BASE_JD_REQ_FS){
-               kbase_va_region *region;
-
-               kbase_gpu_vm_lock(katom->kctx);
-               region = kbase_region_tracker_find_region_enclosing_address(katom->kctx, katom->jc);
-
-               if (region){
-                       phys_addr_t * page_array = kbase_get_phy_pages(region);
-
-                       if (page_array){
-                               u64 page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
-                               u32 offset = katom->jc & (~PAGE_MASK);
-                               u32 * page_1 = NULL;
-                               u32 * page_2 = NULL;
-                               u32   job_header[JOB_HEADER_SIZE_IN_WORDS];
-                               void* dst = job_header;
-
-                               /* we need the first 10 words of the fragment shader job descriptor. We need to check
-                                * that the offset + 10 words is less that the page size otherwise we need to load the next
-                                * page. page_size_overflow will be equal to 0 in case the whole descriptor is within the page
-                                * >0 otherwise.
-                                */
-                               u32 copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE);
-
-                               page_1 = kmap_atomic(pfn_to_page(PFN_DOWN(page_array[page_index])));
-
-                               /* page_1 is a u32 pointer, offset is expressed in bytes */
-                               page_1 += offset>>2;
-                               kbase_sync_to_cpu(page_array[page_index] + offset, page_1, copy_size);
-                               memcpy(dst, page_1, copy_size);
-
-                               /* The data needed overflows page the dimension, need to map the subsequent page */
-                               if (copy_size < JOB_HEADER_SIZE){
-                                       page_2 = kmap_atomic(pfn_to_page(PFN_DOWN(page_array[page_index + 1])));
-
-                                       kbase_sync_to_cpu(page_array[page_index + 1], page_2, JOB_HEADER_SIZE - copy_size);
-                                       memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size);
-                               }
-
-                               /* We managed to correctly map one or two pages (in case of overflow ) */
-                               {
-                                       u32 minX,minY,maxX,maxY;
-                                       u32 restartX,restartY;
-
-                                       /* Get Bounding Box data and restart index from fault address low word*/
-                                       minX     = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK;
-                                       minY     = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK;
-                                       maxX     = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK;
-                                       maxY     = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK;
-                                       restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK;
-                                       restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK;
-
-                                       dev_warn(dev, "Before Clamping: \n" \
-                                                     "Jobstatus: %08x  \n" \
-                                                     "restartIdx: %08x  \n" \
-                                                     "Fault_addr_low: %08x \n" \
-                                                     "minCoordsX: %08x minCoordsY: %08x \n" \
-                                                     "maxCoordsX: %08x maxCoordsY: %08x \n", 
-                                                     job_header[JOB_DESC_STATUS_WORD],
-                                                     job_header[JOB_DESC_RESTART_INDEX_WORD],
-                                                     job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
-                                                     minX,minY,
-                                                     maxX,maxY );
-
-                                       /* Set the restart index to the one which generated the fault*/
-                                       job_header[JOB_DESC_RESTART_INDEX_WORD] = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD];
-
-                                       if (restartX < minX){
-                                               job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY;
-                                               dev_warn(dev,
-                                                        "Clamping restart X index to minimum. %08x clamped to %08x \n",
-                                                        restartX, minX );
-                                               clamped =  1;
-                                       }
-                                       if (restartY < minY){
-                                               job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX;
-                                               dev_warn(dev,
-                                                        "Clamping restart Y index to minimum. %08x clamped to %08x \n",
-                                                        restartY, minY );
-                                               clamped =  1;
-                                       }
-                                       if (restartX > maxX){
-                                               job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY;
-                                               dev_warn(dev,
-                                                        "Clamping restart X index to maximum. %08x clamped to %08x \n",
-                                                        restartX, maxX );
-                                               clamped =  1;
-                                       }
-                                       if (restartY > maxY){
-                                               job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX;
-                                               dev_warn(dev,
-                                                        "Clamping restart Y index to maximum. %08x clamped to %08x \n",
-                                                        restartY, maxY );
-                                               clamped =  1;
-                                       }
-
-                                       if (clamped){
-                                               /* Reset the fault address low word and set the job status to STOPPED */
-                                               job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0;
-                                               job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED;
-                                               dev_warn(dev, "After Clamping: \n"                   \
-                                                             "Jobstatus: %08x  \n"                  \
-                                                             "restartIdx: %08x  \n"                 \
-                                                             "Fault_addr_low: %08x \n"              \
-                                                             "minCoordsX: %08x minCoordsY: %08x \n" \
-                                                             "maxCoordsX: %08x maxCoordsY: %08x \n", 
-                                                             job_header[JOB_DESC_STATUS_WORD],
-                                                             job_header[JOB_DESC_RESTART_INDEX_WORD],
-                                                             job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
-                                                             minX,minY,
-                                                             maxX,maxY );
-
-                                               /* Flush CPU cache to update memory for future GPU reads*/
-                                               memcpy(page_1, dst, copy_size);
-                                               kbase_sync_to_memory(page_array[page_index] + offset, page_1, copy_size);
-
-                                               if (copy_size < JOB_HEADER_SIZE){
-                                                        memcpy(page_2, dst + copy_size, JOB_HEADER_SIZE - copy_size);
-                                                        kbase_sync_to_memory(page_array[page_index + 1], page_2, JOB_HEADER_SIZE - copy_size);
-                                               }
-
-                                       }
-                               }
-                               if (copy_size < JOB_HEADER_SIZE) 
-                                       kunmap_atomic(page_2);
-
-                               kunmap_atomic(page_1);
-                       }
+       if (!(katom->core_req & BASE_JD_REQ_FS))
+               return 0;
+
+       kbase_gpu_vm_lock(katom->kctx);
+       region = kbase_region_tracker_find_region_enclosing_address(katom->kctx,
+                       katom->jc);
+       if (!region || (region->flags & KBASE_REG_FREE))
+               goto out_unlock;
+
+       page_array = kbase_get_phy_pages(region);
+       if (!page_array)
+               goto out_unlock;
+
+       page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
+
+       p = pfn_to_page(PFN_DOWN(page_array[page_index]));
+
+       /* we need the first 10 words of the fragment shader job descriptor.
+        * We need to check that the offset + 10 words is less that the page
+        * size otherwise we need to load the next page.
+        * page_size_overflow will be equal to 0 in case the whole descriptor
+        * is within the page > 0 otherwise.
+        */
+       copy_size = MIN(PAGE_SIZE - offset, JOB_HEADER_SIZE);
+
+       page_1 = kmap_atomic(p);
+
+       /* page_1 is a u32 pointer, offset is expressed in bytes */
+       page_1 += offset>>2;
+       dma_sync_single_for_cpu(katom->kctx->kbdev->dev,
+                       page_private(p) + offset,
+                       copy_size, DMA_BIDIRECTIONAL);
+       memcpy(dst, page_1, copy_size);
+
+       /* The data needed overflows page the dimension,
+        * need to map the subsequent page */
+       if (copy_size < JOB_HEADER_SIZE) {
+               p = pfn_to_page(PFN_DOWN(page_array[page_index + 1]));
+               page_2 = kmap_atomic(p);
+
+               dma_sync_single_for_cpu(katom->kctx->kbdev->dev,
+                               page_private(p),
+                               JOB_HEADER_SIZE - copy_size, DMA_BIDIRECTIONAL);
+               memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size);
+       }
+
+       /* We managed to correctly map one or two pages (in case of overflow) */
+       /* Get Bounding Box data and restart index from fault address low word */
+       minX = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & X_COORDINATE_MASK;
+       minY = job_header[FRAG_JOB_DESC_MIN_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+       maxX = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & X_COORDINATE_MASK;
+       maxY = job_header[FRAG_JOB_DESC_MAX_TILE_COORD_WORD] & Y_COORDINATE_MASK;
+       restartX = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & X_COORDINATE_MASK;
+       restartY = job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] & Y_COORDINATE_MASK;
+
+       dev_warn(dev, "Before Clamping:\n"
+                       "Jobstatus: %08x\n"
+                       "restartIdx: %08x\n"
+                       "Fault_addr_low: %08x\n"
+                       "minCoordsX: %08x minCoordsY: %08x\n"
+                       "maxCoordsX: %08x maxCoordsY: %08x\n",
+                       job_header[JOB_DESC_STATUS_WORD],
+                       job_header[JOB_DESC_RESTART_INDEX_WORD],
+                       job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+                       minX, minY,
+                       maxX, maxY);
+
+       /* Set the restart index to the one which generated the fault*/
+       job_header[JOB_DESC_RESTART_INDEX_WORD] =
+                       job_header[JOB_DESC_FAULT_ADDR_LOW_WORD];
+
+       if (restartX < minX) {
+               job_header[JOB_DESC_RESTART_INDEX_WORD] = (minX) | restartY;
+               dev_warn(dev,
+                       "Clamping restart X index to minimum. %08x clamped to %08x\n",
+                       restartX, minX);
+               clamped =  1;
+       }
+       if (restartY < minY) {
+               job_header[JOB_DESC_RESTART_INDEX_WORD] = (minY) | restartX;
+               dev_warn(dev,
+                       "Clamping restart Y index to minimum. %08x clamped to %08x\n",
+                       restartY, minY);
+               clamped =  1;
+       }
+       if (restartX > maxX) {
+               job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxX) | restartY;
+               dev_warn(dev,
+                       "Clamping restart X index to maximum. %08x clamped to %08x\n",
+                       restartX, maxX);
+               clamped =  1;
+       }
+       if (restartY > maxY) {
+               job_header[JOB_DESC_RESTART_INDEX_WORD] = (maxY) | restartX;
+               dev_warn(dev,
+                       "Clamping restart Y index to maximum. %08x clamped to %08x\n",
+                       restartY, maxY);
+               clamped =  1;
+       }
+
+       if (clamped) {
+               /* Reset the fault address low word
+                * and set the job status to STOPPED */
+               job_header[JOB_DESC_FAULT_ADDR_LOW_WORD] = 0x0;
+               job_header[JOB_DESC_STATUS_WORD] = BASE_JD_EVENT_STOPPED;
+               dev_warn(dev, "After Clamping:\n"
+                               "Jobstatus: %08x\n"
+                               "restartIdx: %08x\n"
+                               "Fault_addr_low: %08x\n"
+                               "minCoordsX: %08x minCoordsY: %08x\n"
+                               "maxCoordsX: %08x maxCoordsY: %08x\n",
+                               job_header[JOB_DESC_STATUS_WORD],
+                               job_header[JOB_DESC_RESTART_INDEX_WORD],
+                               job_header[JOB_DESC_FAULT_ADDR_LOW_WORD],
+                               minX, minY,
+                               maxX, maxY);
+
+               /* Flush CPU cache to update memory for future GPU reads*/
+               memcpy(page_1, dst, copy_size);
+               p = pfn_to_page(PFN_DOWN(page_array[page_index]));
+               dma_sync_single_for_device(katom->kctx->kbdev->dev,
+                               page_private(p) + offset,
+                               copy_size, DMA_TO_DEVICE);
+
+               if (copy_size < JOB_HEADER_SIZE) {
+                       memcpy(page_2, dst + copy_size,
+                                       JOB_HEADER_SIZE - copy_size);
+                       p = pfn_to_page(PFN_DOWN(page_array[page_index + 1]));
+                       dma_sync_single_for_device(katom->kctx->kbdev->dev,
+                                       page_private(p),
+                                       JOB_HEADER_SIZE - copy_size,
+                                       DMA_TO_DEVICE);
                }
-               kbase_gpu_vm_unlock(katom->kctx);
        }
+       if (copy_size < JOB_HEADER_SIZE)
+               kunmap_atomic(page_2);
+
+       kunmap_atomic(page_1);
+
+out_unlock:
+       kbase_gpu_vm_unlock(katom->kctx);
        return clamped;
 }
index 85184c9f316311d2910b72fd10238dd747f51665..90bd027be30af875b64bf1af23d3595accd051e1 100755 (executable)
@@ -18,6 +18,6 @@
 #ifndef _KBASE_10969_WORKAROUND_
 #define _KBASE_10969_WORKAROUND_
 
-int kbasep_10969_workaround_clamp_coordinates( kbase_jd_atom * katom );
+int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom);
 
 #endif /* _KBASE_10969_WORKAROUND_ */
index fe9f027645592d0dbaf38e7775e3f0e6f987660a..a5ca26fad6063af28f8e587fa6d13d089dde3cac 100755 (executable)
  * changed if we need to introduce more attributes or many memory regions need to be defined */
 #define ATTRIBUTE_COUNT_MAX 32
 
-/* Limits for gpu frequency configuration parameters. These will use for config validation. */
-#define MAX_GPU_ALLOWED_FREQ_KHZ 1000000
-#define MIN_GPU_ALLOWED_FREQ_KHZ 1
-
-int kbasep_get_config_attribute_count(const kbase_attribute *attributes)
+int kbasep_get_config_attribute_count(const struct kbase_attribute *attributes)
 {
        int count = 1;
 
@@ -46,7 +42,7 @@ int kbasep_get_config_attribute_count(const kbase_attribute *attributes)
        return count;
 }
 
-const kbase_attribute *kbasep_get_next_attribute(const kbase_attribute *attributes, int attribute_id)
+const struct kbase_attribute *kbasep_get_next_attribute(const struct kbase_attribute *attributes, int attribute_id)
 {
        KBASE_DEBUG_ASSERT(attributes != NULL);
 
@@ -61,9 +57,9 @@ const kbase_attribute *kbasep_get_next_attribute(const kbase_attribute *attribut
 
 KBASE_EXPORT_TEST_API(kbasep_get_next_attribute)
 
-uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const kbase_attribute *attributes, int attribute_id)
+uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const struct kbase_attribute *attributes, int attribute_id)
 {
-       const kbase_attribute *attr;
+       const struct kbase_attribute *attr;
 
        KBASE_DEBUG_ASSERT(attributes != NULL);
 
@@ -73,8 +69,6 @@ uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const kbase_attrib
 
        /* default values */
        switch (attribute_id) {
-       case KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US:
-               return DEFAULT_IRQ_THROTTLE_TIME_US;
                /* Begin scheduling defaults */
        case KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS:
                return DEFAULT_JS_SCHEDULING_TICK_NS;
@@ -93,10 +87,6 @@ uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const kbase_attrib
                return DEFAULT_JS_HARD_STOP_TICKS_NSS;
        case KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS:
                return DEFAULT_JS_CTX_TIMESLICE_NS;
-       case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES:
-               return DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES;
-       case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES:
-               return DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES;
        case KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS:
                if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8408))
                        return DEFAULT_JS_RESET_TICKS_SS_HW_ISSUE_8408;
@@ -113,16 +103,10 @@ uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const kbase_attrib
                return 0;
        case KBASE_CONFIG_ATTR_PLATFORM_FUNCS:
                return 0;
-       case KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE:
-               return DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
        case KBASE_CONFIG_ATTR_CPU_SPEED_FUNC:
                return DEFAULT_CPU_SPEED_FUNC;
        case KBASE_CONFIG_ATTR_GPU_SPEED_FUNC:
                return 0;
-       case KBASE_CONFIG_ATTR_ARID_LIMIT:
-               return DEFAULT_ARID_LIMIT;
-       case KBASE_CONFIG_ATTR_AWID_LIMIT:
-               return DEFAULT_AWID_LIMIT;
        case KBASE_CONFIG_ATTR_POWER_MANAGEMENT_DVFS_FREQ:
                return DEFAULT_PM_DVFS_FREQ;
        case KBASE_CONFIG_ATTR_PM_GPU_POWEROFF_TICK_NS:
@@ -131,6 +115,8 @@ uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const kbase_attrib
                return DEFAULT_PM_POWEROFF_TICK_SHADER;
        case KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_GPU:
                return DEFAULT_PM_POWEROFF_TICK_GPU;
+       case KBASE_CONFIG_ATTR_POWER_MODEL_CALLBACKS:
+               return 0;
 
        default:
                dev_err(kbdev->dev, "kbasep_get_config_value. Cannot get value of attribute with id=%d and no default value defined", attribute_id);
@@ -140,11 +126,11 @@ uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const kbase_attrib
 
 KBASE_EXPORT_TEST_API(kbasep_get_config_value)
 
-mali_bool kbasep_platform_device_init(kbase_device *kbdev)
+mali_bool kbasep_platform_device_init(struct kbase_device *kbdev)
 {
-       kbase_platform_funcs_conf *platform_funcs;
+       struct kbase_platform_funcs_conf *platform_funcs;
 
-       platform_funcs = (kbase_platform_funcs_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PLATFORM_FUNCS);
+       platform_funcs = (struct kbase_platform_funcs_conf *)kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PLATFORM_FUNCS);
        if (platform_funcs) {
                if (platform_funcs->platform_init_func)
                        return platform_funcs->platform_init_func(kbdev);
@@ -152,31 +138,18 @@ mali_bool kbasep_platform_device_init(kbase_device *kbdev)
        return MALI_TRUE;
 }
 
-void kbasep_platform_device_term(kbase_device *kbdev)
+void kbasep_platform_device_term(struct kbase_device *kbdev)
 {
-       kbase_platform_funcs_conf *platform_funcs;
+       struct kbase_platform_funcs_conf *platform_funcs;
 
-       platform_funcs = (kbase_platform_funcs_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PLATFORM_FUNCS);
+       platform_funcs = (struct kbase_platform_funcs_conf *)kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PLATFORM_FUNCS);
        if (platform_funcs) {
                if (platform_funcs->platform_term_func)
                        platform_funcs->platform_term_func(kbdev);
        }
 }
 
-static mali_bool kbasep_validate_gpu_clock_freq(kbase_device *kbdev, const kbase_attribute *attributes)
-{
-       uintptr_t freq_min = kbasep_get_config_value(kbdev, attributes, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN);
-       uintptr_t freq_max = kbasep_get_config_value(kbdev, attributes, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX);
-
-       if ((freq_min > MAX_GPU_ALLOWED_FREQ_KHZ) || (freq_min < MIN_GPU_ALLOWED_FREQ_KHZ) || (freq_max > MAX_GPU_ALLOWED_FREQ_KHZ) || (freq_max < MIN_GPU_ALLOWED_FREQ_KHZ) || (freq_min > freq_max)) {
-               dev_warn(kbdev->dev, "Invalid GPU frequencies found in configuration: min=%ldkHz, max=%ldkHz.", freq_min, freq_max);
-               return MALI_FALSE;
-       }
-
-       return MALI_TRUE;
-}
-
-static mali_bool kbasep_validate_pm_callback(const kbase_pm_callback_conf *callbacks, const kbase_device * kbdev )
+static mali_bool kbasep_validate_pm_callback(const struct kbase_pm_callback_conf *callbacks, const struct kbase_device *kbdev)
 {
        if (callbacks == NULL) {
                /* Having no callbacks is valid */
@@ -195,10 +168,9 @@ static mali_bool kbasep_validate_cpu_speed_func(kbase_cpuprops_clock_speed_funct
        return fcn != NULL;
 }
 
-mali_bool kbasep_validate_configuration_attributes(kbase_device *kbdev, const kbase_attribute *attributes)
+mali_bool kbasep_validate_configuration_attributes(struct kbase_device *kbdev, const struct kbase_attribute *attributes)
 {
        int i;
-       mali_bool had_gpu_freq_min = MALI_FALSE, had_gpu_freq_max = MALI_FALSE;
 
        KBASE_DEBUG_ASSERT(attributes);
 
@@ -209,30 +181,13 @@ mali_bool kbasep_validate_configuration_attributes(kbase_device *kbdev, const kb
                }
 
                switch (attributes[i].id) {
-               case KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN:
-                       had_gpu_freq_min = MALI_TRUE;
-                       if (MALI_FALSE == kbasep_validate_gpu_clock_freq(kbdev, attributes)) {
-                               /* Warning message handled by kbasep_validate_gpu_clock_freq() */
-                               return MALI_FALSE;
-                       }
-                       break;
-
-               case KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX:
-                       had_gpu_freq_max = MALI_TRUE;
-                       if (MALI_FALSE == kbasep_validate_gpu_clock_freq(kbdev, attributes)) {
-                               /* Warning message handled by kbasep_validate_gpu_clock_freq() */
-                               return MALI_FALSE;
-                       }
-                       break;
-
                        /* Only non-zero unsigned 32-bit values accepted */
                case KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS:
 #if CSTD_CPU_64BIT
-                       if (attributes[i].data == 0u || (u64) attributes[i].data > (u64) U32_MAX)
+                       if (attributes[i].data == 0u || (u64) attributes[i].data > (u64) U32_MAX) {
 #else
-                       if (attributes[i].data == 0u)
+                       if (attributes[i].data == 0u) {
 #endif
-                       {
                                dev_warn(kbdev->dev, "Invalid Job Scheduling Configuration attribute for " "KBASE_CONFIG_ATTR_JS_SCHEDULING_TICKS_NS: %d", (int)attributes[i].data);
                                return MALI_FALSE;
                        }
@@ -249,8 +204,6 @@ mali_bool kbasep_validate_configuration_attributes(kbase_device *kbdev, const kb
                case KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS:
                case KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS:
                case KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS:
-               case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES:
-               case KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES:
 #if CSTD_CPU_64BIT
                        if ((u64) attributes[i].data > (u64) U32_MAX) {
                                dev_warn(kbdev->dev, "Job Scheduling Configuration attribute exceeds 32-bits: " "id==%d val==%d", attributes[i].id, (int)attributes[i].data);
@@ -259,29 +212,13 @@ mali_bool kbasep_validate_configuration_attributes(kbase_device *kbdev, const kb
 #endif
                        break;
 
-               case KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US:
-#if CSTD_CPU_64BIT
-                       if ((u64) attributes[i].data > (u64) U32_MAX) {
-                               dev_warn(kbdev->dev, "IRQ throttle time attribute exceeds 32-bits: " "id==%d val==%d", attributes[i].id, (int)attributes[i].data);
-                               return MALI_FALSE;
-                       }
-#endif
-                       break;
-
                case KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS:
-                       if (MALI_FALSE == kbasep_validate_pm_callback((kbase_pm_callback_conf *) attributes[i].data, kbdev)) {
+                       if (MALI_FALSE == kbasep_validate_pm_callback((struct kbase_pm_callback_conf *)attributes[i].data, kbdev)) {
                                /* Warning message handled by kbasep_validate_pm_callback() */
                                return MALI_FALSE;
                        }
                        break;
 
-               case KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE:
-                       if (attributes[i].data != MALI_TRUE && attributes[i].data != MALI_FALSE) {
-                               dev_warn(kbdev->dev, "Value for KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE was not " "MALI_TRUE or MALI_FALSE: %u", (unsigned int)attributes[i].data);
-                               return MALI_FALSE;
-                       }
-                       break;
-
                case KBASE_CONFIG_ATTR_CPU_SPEED_FUNC:
                        if (MALI_FALSE == kbasep_validate_cpu_speed_func((kbase_cpuprops_clock_speed_function) attributes[i].data)) {
                                dev_warn(kbdev->dev, "Invalid function pointer in KBASE_CONFIG_ATTR_CPU_SPEED_FUNC");
@@ -300,14 +237,6 @@ mali_bool kbasep_validate_configuration_attributes(kbase_device *kbdev, const kb
                        /* any value is allowed */
                        break;
 
-               case KBASE_CONFIG_ATTR_AWID_LIMIT:
-               case KBASE_CONFIG_ATTR_ARID_LIMIT:
-                       if ((u32) attributes[i].data > 0x3) {
-                               dev_warn(kbdev->dev, "Invalid AWID or ARID limit");
-                               return MALI_FALSE;
-                       }
-                       break;
-
                case KBASE_CONFIG_ATTR_POWER_MANAGEMENT_DVFS_FREQ:
 #if CSTD_CPU_64BIT
                        if ((u64) attributes[i].data > (u64) U32_MAX) {
@@ -338,21 +267,19 @@ mali_bool kbasep_validate_configuration_attributes(kbase_device *kbdev, const kb
 #endif
                        break;
 
+               case KBASE_CONFIG_ATTR_POWER_MODEL_CALLBACKS:
+                       if (0 == attributes[i].data) {
+                               dev_warn(kbdev->dev, "Power model callbacks is specified but NULL: " "id==%d val==%d",
+                                               attributes[i].id, (int)attributes[i].data);
+                               return MALI_FALSE;
+                       }
+                       break;
+
                default:
                        dev_warn(kbdev->dev, "Invalid attribute found in configuration: %d", attributes[i].id);
                        return MALI_FALSE;
                }
        }
 
-       if (!had_gpu_freq_min) {
-               dev_warn(kbdev->dev, "Configuration does not include mandatory attribute KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN");
-               return MALI_FALSE;
-       }
-
-       if (!had_gpu_freq_max) {
-               dev_warn(kbdev->dev, "Configuration does not include mandatory attribute KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX");
-               return MALI_FALSE;
-       }
-
        return MALI_TRUE;
 }
index 52d9eda78575e6feecf39ceef48d156c7ec62478..6e51210d5171c7c50b8f559a0375877f395cbe8a 100755 (executable)
  * @{
  */
 
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
 /* This flag is set for internal builds so we can run tests without credentials. */
 #define KBASE_HWCNT_DUMP_BYPASS_ROOT 1
 #else
 #define KBASE_HWCNT_DUMP_BYPASS_ROOT 0
 #endif
 
+/* Enable power management API, note that KBASE_PM_EN==0 is not supported */
+#define KBASE_PM_EN 1
+/* Enable GPU reset API, note that KBASE_GPU_RESET_EN==0 is not supported */
+#define KBASE_GPU_RESET_EN 1
+/* Enable HW MMU backend, note that KBASE_MMU_HW_BACKEND==0 is not supported */
+#define KBASE_MMU_HW_BACKEND 1
+
+
 #include <linux/rbtree.h>
 
+
 /**
  * Device wide configuration
  */
@@ -63,35 +72,6 @@ enum {
         * */
        KBASE_CONFIG_ATTR_INVALID,
 
-       /**
-        * Maximum frequency GPU will be clocked at. Given in kHz.
-        * This must be specified as there is no default value.
-        *
-        * Attached value: number in kHz
-        * Default value: NA
-        */
-       KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX,
-
-       /**
-        * Minimum frequency GPU will be clocked at. Given in kHz.
-        * This must be specified as there is no default value.
-        *
-        * Attached value: number in kHz
-        * Default value: NA
-        */
-       KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN,
-
-       /**
-        * Irq throttle. It is the minimum desired time in between two
-        * consecutive gpu interrupts (given in 'us'). The irq throttle
-        * gpu register will be configured after this, taking into
-        * account the configured max frequency.
-        *
-        * Attached value: number in micro seconds
-        * Default value: see DEFAULT_IRQ_THROTTLE_TIME_US
-        */
-       KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US,
-
        /*** Begin Job Scheduling Configs ***/
        /**
         * Job Scheduler scheduling tick granuality. This is in nanoseconds to
@@ -262,9 +242,9 @@ enum {
         * often used by the OS.
         *
         * This value controls affects the actual time defined by the following
-        * config values:
-        * - @ref KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES
-        * - @ref KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES
+        * defaults:
+        * - @ref DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES
+        * - @ref DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES
         *
         * This value is supported by the following scheduling policies:
         * - The Completely Fair Share (CFS) policy
@@ -274,57 +254,14 @@ enum {
         * Default value: @ref DEFAULT_JS_CTX_TIMESLICE_NS
         *
         * @note a value of zero models a "Round Robin" scheduling policy, and
-        * disables @ref KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES
+        * disables @ref DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES
         * (initially causing LIFO scheduling) and
-        * @ref KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES (allowing
+        * @ref DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES (allowing
         * not-run-often contexts to get scheduled in quickly, but to only use
         * a single timeslice when they get scheduled in).
         */
        KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS,
 
-       /**
-        * Job Scheduler initial runtime of a context for the CFS Policy, in time-slices.
-        *
-        * This value is relative to that of the least-run context, and defines
-        * where in the CFS queue a new context is added. A value of 1 means 'after
-        * the least-run context has used its timeslice'. Therefore, when all
-        * contexts consistently use the same amount of time, a value of 1 models a
-        * FIFO. A value of 0 would model a LIFO.
-        *
-        * The value is represented in "numbers of time slices". Multiply this
-        * value by that defined in @ref KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS to get
-        * the time value for this in nanoseconds.
-        *
-        * Attached value: unsigned 32-bit kbasep_js_device_data::cfs_ctx_runtime_init_slices<br>
-        * Default value: @ref DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES
-        */
-       KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES,
-
-       /**
-        * Job Scheduler minimum runtime value of a context for CFS, in time_slices
-        * relative to that of the least-run context.
-        *
-        * This is a measure of how much preferrential treatment is given to a
-        * context that is not run very often.
-        *
-        * Specficially, this value defines how many timeslices such a context is
-        * (initially) allowed to use at once. Such contexts (e.g. 'interactive'
-        * processes) will appear near the front of the CFS queue, and can initially
-        * use more time than contexts that run continuously (e.g. 'batch'
-        * processes).
-        *
-        * This limit \b prevents a "stored-up timeslices" DoS attack, where a ctx
-        * not run for a long time attacks the system by using a very large initial
-        * number of timeslices when it finally does run.
-        *
-        * Attached value: unsigned 32-bit kbasep_js_device_data::cfs_ctx_runtime_min_slices<br>
-        * Default value: @ref DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES
-        *
-        * @note A value of zero allows not-run-often contexts to get scheduled in
-        * quickly, but to only use a single timeslice when they get scheduled in.
-        */
-       KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES,
-
        /**
         * Job Scheduler minimum number of scheduling ticks before non-CL jobs
         * cause the GPU to be reset.
@@ -399,30 +336,6 @@ enum {
         */
        KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS,
 
-       /**
-        * Boolean indicating whether the driver is configured to be secure at
-        * a potential loss of performance.
-        *
-        * This currently affects only r0p0-15dev0 HW and earlier.
-        *
-        * On r0p0-15dev0 HW and earlier, there are tradeoffs between security and
-        * performance:
-        *
-        * - When this is set to MALI_TRUE, the driver remains fully secure,
-        * but potentially loses performance compared with setting this to
-        * MALI_FALSE.
-        * - When set to MALI_FALSE, the driver is open to certain security
-        * attacks.
-        *
-        * From r0p0-00rel0 and onwards, there is no security loss by setting
-        * this to MALI_FALSE, and no performance loss by setting it to
-        * MALI_TRUE.
-        *
-        * Attached value: mali_bool value
-        * Default value: @ref DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE
-        */
-       KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE,
-
        /**
         * A pointer to a function that calculates the CPU clock
         * speed of the platform in MHz - see
@@ -442,8 +355,8 @@ enum {
         * prototype.
         *
         * Attached value: A @ref kbase_gpuprops_clock_speed_function.
-        * Default Value:  NULL (in which case the driver assumes a current
-        *                 GPU frequency specified by KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX)
+        * Default Value:  NULL (in which case the driver assumes maximum
+        *                 GPU frequency stored in gpu_freq_khz_max)
         */
        KBASE_CONFIG_ATTR_GPU_SPEED_FUNC,
 
@@ -455,32 +368,6 @@ enum {
         */
        KBASE_CONFIG_ATTR_PLATFORM_FUNCS,
 
-       /**
-        * Limit ARID width on the AXI bus.
-        *
-        * Attached value: u32 register value
-        *    KBASE_AID_32 - use the full 32 IDs (5 ID bits)
-        *    KBASE_AID_16 - use 16 IDs (4 ID bits)
-        *    KBASE_AID_8  - use 8 IDs (3 ID bits)
-        *    KBASE_AID_4  - use 4 IDs (2 ID bits)
-        * Default value: KBASE_AID_32 (no limit). Note hardware implementation
-        * may limit to a lower value.
-        */
-       KBASE_CONFIG_ATTR_ARID_LIMIT,
-
-       /**
-        * Limit AWID width on the AXI bus.
-        *
-        * Attached value: u32 register value
-        *    KBASE_AID_32 - use the full 32 IDs (5 ID bits)
-        *    KBASE_AID_16 - use 16 IDs (4 ID bits)
-        *    KBASE_AID_8  - use 8 IDs (3 ID bits)
-        *    KBASE_AID_4  - use 4 IDs (2 ID bits)
-        * Default value: KBASE_AID_32 (no limit). Note hardware implementation
-        * may limit to a lower value.
-        */
-       KBASE_CONFIG_ATTR_AWID_LIMIT,
-
        /**
         * Rate at which dvfs data should be collected.
         *
@@ -522,6 +409,12 @@ enum {
         */
        KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_GPU,
 
+       /** Power model for IPA
+        *
+        * Attached value: pointer to @ref mali_pa_model_ops
+        */
+       KBASE_CONFIG_ATTR_POWER_MODEL_CALLBACKS,
+
        /**
         * End of attribute list indicator.
         * The configuration loader will stop processing any more elements
@@ -532,31 +425,6 @@ enum {
        KBASE_CONFIG_ATTR_END = 0x1FFFUL
 };
 
-enum {
-       /**
-        * Use unrestricted Address ID width on the AXI bus.
-        */
-       KBASE_AID_32 = 0x0,
-
-       /**
-        * Restrict GPU to a half of maximum Address ID count.
-        * This will reduce performance, but reduce bus load due to GPU.
-        */
-       KBASE_AID_16 = 0x3,
-
-       /**
-        * Restrict GPU to a quarter of maximum Address ID count.
-        * This will reduce performance, but reduce bus load due to GPU.
-        */
-       KBASE_AID_8  = 0x2,
-
-       /**
-        * Restrict GPU to an eighth of maximum Address ID count.
-        * This will reduce performance, but reduce bus load due to GPU.
-        */
-       KBASE_AID_4  = 0x1
-};
-
 /*
  * @brief specifies a single attribute
  *
@@ -567,7 +435,7 @@ typedef struct kbase_attribute {
        uintptr_t data;
 } kbase_attribute;
 
-/* Forward declaration of kbase_device */
+/* Forward declaration of struct kbase_device */
 struct kbase_device;
 
 /*
@@ -578,18 +446,18 @@ struct kbase_device;
 typedef struct kbase_platform_funcs_conf {
        /**
         * Function pointer for platform specific initialization or NULL if no initialization function is required.
-        * This function will be called \em before any other callbacks listed in the kbase_attribute struct (such as
+        * This function will be called \em before any other callbacks listed in the struct kbase_attribute struct (such as
         * Power Management callbacks).
         * The platform specific private pointer kbase_device::platform_context can be accessed (and possibly initialized) in here.
         */
        mali_bool(*platform_init_func) (struct kbase_device *kbdev);
        /**
         * Function pointer for platform specific termination or NULL if no termination function is required.
-        * This function will be called \em after any other callbacks listed in the kbase_attribute struct (such as
+        * This function will be called \em after any other callbacks listed in the struct kbase_attribute struct (such as
         * Power Management callbacks).
         * The platform specific private pointer kbase_device::platform_context can be accessed (and possibly terminated) in here.
         */
-       void (*platform_term_func) (struct kbase_device *kbdev);
+       void (*platform_term_func)(struct kbase_device *kbdev);
 
 } kbase_platform_funcs_conf;
 
@@ -606,7 +474,7 @@ typedef struct kbase_pm_callback_conf {
         * The platform specific private pointer kbase_device::platform_context can be accessed and modified in here. It is the
         * platform \em callbacks responsiblity to initialize and terminate this pointer if used (see @ref kbase_platform_funcs_conf).
         */
-       void (*power_off_callback) (struct kbase_device *kbdev);
+       void (*power_off_callback)(struct kbase_device *kbdev);
 
        /** Callback for when the GPU is about to become active and power must be supplied.
         *
@@ -620,7 +488,7 @@ typedef struct kbase_pm_callback_conf {
         *
         * @return 1 if the GPU state may have been lost, 0 otherwise.
         */
-       int (*power_on_callback) (struct kbase_device *kbdev);
+       int (*power_on_callback)(struct kbase_device *kbdev);
 
        /** Callback for when the system is requesting a suspend and GPU power
         * must be switched off.
@@ -635,7 +503,7 @@ typedef struct kbase_pm_callback_conf {
         * callbacks responsibility to initialize and terminate this pointer if
         * used (see @ref kbase_platform_funcs_conf).
         */
-       void (*power_suspend_callback) (struct kbase_device *kbdev);
+       void (*power_suspend_callback)(struct kbase_device *kbdev);
 
        /** Callback for when the system is resuming from a suspend and GPU
         * power must be switched on.
@@ -650,7 +518,7 @@ typedef struct kbase_pm_callback_conf {
         * callbacks responsibility to initialize and terminate this pointer if
         * used (see @ref kbase_platform_funcs_conf).
         */
-       void (*power_resume_callback) (struct kbase_device *kbdev);
+       void (*power_resume_callback)(struct kbase_device *kbdev);
 
        /** Callback for handling runtime power management initialization.
         *
@@ -669,7 +537,7 @@ typedef struct kbase_pm_callback_conf {
         * should no longer be called by the OS on completion of this function.
         * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
         */
-       void (*power_runtime_term_callback) (struct kbase_device *kbdev);
+       void (*power_runtime_term_callback)(struct kbase_device *kbdev);
 
        /** Callback for runtime power-off power management callback
         *
@@ -678,14 +546,14 @@ typedef struct kbase_pm_callback_conf {
         *
         * @return 0 on success, else OS error code.
         */
-       void (*power_runtime_off_callback) (struct kbase_device *kbdev);
+       void (*power_runtime_off_callback)(struct kbase_device *kbdev);
 
        /** Callback for runtime power-on power management callback
         *
         * For linux this callback will be called by the kernel runtime_resume callback.
         * Note: for linux the kernel must have CONFIG_PM_RUNTIME enabled to use this feature.
         */
-       int (*power_runtime_on_callback) (struct kbase_device *kbdev);
+       int (*power_runtime_on_callback)(struct kbase_device *kbdev);
 
 } kbase_pm_callback_conf;
 
@@ -706,14 +574,14 @@ typedef int (*kbase_cpuprops_clock_speed_function) (u32 *clock_speed);
  *                          If the system timer is not available then this function is required
  *                          for the OpenCL queue profiling to return correct timing information.
  *
- * @return 0 on success, 1 on error. When an error is returned the caller assumes a current
- * GPU speed as specified by KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX.
+ * @return 0 on success, 1 on error. When an error is returned the caller assumes maximum
+ * GPU speed stored in gpu_freq_khz_max.
  */
 typedef int (*kbase_gpuprops_clock_speed_function) (u32 *clock_speed);
 
 #ifdef CONFIG_OF
 typedef struct kbase_platform_config {
-       const kbase_attribute *attributes;
+       const struct kbase_attribute *attributes;
        u32 midgard_type;
 } kbase_platform_config;
 #else
@@ -730,15 +598,16 @@ typedef struct kbase_io_memory_region {
  * @brief Specifies I/O related resources like IRQs and memory region for I/O operations.
  */
 typedef struct kbase_io_resources {
-       u32 job_irq_number;
-       u32 mmu_irq_number;
-       u32 gpu_irq_number;
-       kbase_io_memory_region io_memory_region;
+
+       u32                      job_irq_number;
+       u32                      mmu_irq_number;
+       u32                      gpu_irq_number;
+       struct kbase_io_memory_region io_memory_region;
 } kbase_io_resources;
 
 typedef struct kbase_platform_config {
-       const kbase_attribute *attributes;
-       const kbase_io_resources *io_resources;
+       const struct kbase_attribute *attributes;
+       const struct kbase_io_resources *io_resources;
        u32 midgard_type;
 } kbase_platform_config;
 
@@ -764,7 +633,7 @@ const char *kbasep_midgard_type_to_string(u32 midgard_type);
  *
  * @return  Pointer to the first attribute matching id or NULL if none is found.
  */
-const kbase_attribute *kbasep_get_next_attribute(const kbase_attribute *attributes, int attribute_id);
+const struct kbase_attribute *kbasep_get_next_attribute(const struct kbase_attribute *attributes, int attribute_id);
 
 /**
  * @brief Gets the value of a single config attribute.
@@ -778,28 +647,27 @@ const kbase_attribute *kbasep_get_next_attribute(const kbase_attribute *attribut
  *
  * @return Value of attribute with the given id
  */
-uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const kbase_attribute *attributes, int attribute_id);
+uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const struct kbase_attribute *attributes, int attribute_id);
 
 /**
  * @brief Validates configuration attributes
  *
  * Function checks validity of given configuration attributes. It will fail on any attribute with unknown id, attribute
- * with invalid value or attribute list that is not correctly terminated. It will also fail if
- * KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN or KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX are not specified.
+ * with invalid value or attribute list that is not correctly terminated.
  *
  * @param[in]  kbdev       Kbase device pointer
  * @param[in]  attributes  Array of attributes to validate
  *
  * @return   MALI_TRUE if no errors have been found in the config. MALI_FALSE otherwise.
  */
-mali_bool kbasep_validate_configuration_attributes(struct kbase_device *kbdev, const kbase_attribute *attributes);
+mali_bool kbasep_validate_configuration_attributes(struct kbase_device *kbdev, const struct kbase_attribute *attributes);
 
 /**
  * @brief Gets the pointer to platform config.
  *
  * @return Pointer to the platform config
  */
-kbase_platform_config *kbase_get_platform_config(void);
+struct kbase_platform_config *kbase_get_platform_config(void);
 
 /**
  * @brief Gets the count of attributes in array
@@ -810,7 +678,7 @@ kbase_platform_config *kbase_get_platform_config(void);
  *
  * @return  Number of attributes in the array including end of list indicator.
  */
-int kbasep_get_config_attribute_count(const kbase_attribute *attributes);
+int kbasep_get_config_attribute_count(const struct kbase_attribute *attributes);
 
 /**
  * @brief Platform specific call to initialize hardware
index 30fdb584bacfea2f92bc57f18e7bc237e2473d1d..d7b466580468c201054c3113e5c4584f3ac709de 100755 (executable)
 #ifndef _KBASE_CONFIG_DEFAULTS_H_
 #define _KBASE_CONFIG_DEFAULTS_H_
 
-/* Default irq throttle time. This is the default desired minimum time in
- * between two consecutive interrupts from the gpu. The irq throttle gpu
- * register is set after this value. */
-#define DEFAULT_IRQ_THROTTLE_TIME_US 20
+/* Include mandatory definitions per platform */
 
+/**
+ * Irq throttle. It is the minimum desired time in between two
+ * consecutive gpu interrupts (given in 'us'). The irq throttle
+ * gpu register will be configured after this, taking into
+ * account the configured max frequency.
+ *
+ * Attached value: number in micro seconds
+ */
+#define DEFAULT_IRQ_THROTTLE_TIME_US 20
+#define GPU_FREQ_KHZ_MAX               500000
+#define GPU_FREQ_KHZ_MIN               100000
 /*** Begin Scheduling defaults ***/
 
 /**
 #define DEFAULT_JS_CTX_TIMESLICE_NS 50000000
 
 /**
- * Default initial runtime of a context for CFS, in ticks.
+ *  Default Job Scheduler initial runtime of a context for the CFS Policy,
+ *  in time-slices.
+ *
+ * This value is relative to that of the least-run context, and defines
+ * where in the CFS queue a new context is added. A value of 1 means 'after
+ * the least-run context has used its timeslice'. Therefore, when all
+ * contexts consistently use the same amount of time, a value of 1 models a
+ * FIFO. A value of 0 would model a LIFO.
  *
- * This value is relative to that of the least-run context, and defines where
- * in the CFS queue a new context is added.
+ * The value is represented in "numbers of time slices". Multiply this
+ * value by that defined in @ref DEFAULT_JS_CTX_TIMESLICE_NS to get
+ * the time value for this in nanoseconds.
  */
 #define DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES 1
 
 /**
- * Default minimum runtime value of a context for CFS, in ticks.
+ * Default Job Scheduler minimum runtime value of a context for CFS, in
+ * time_slices relative to that of the least-run context.
+ *
+ * This is a measure of how much preferrential treatment is given to a
+ * context that is not run very often.
+ *
+ * Specficially, this value defines how many timeslices such a context is
+ * (initially) allowed to use at once. Such contexts (e.g. 'interactive'
+ * processes) will appear near the front of the CFS queue, and can initially
+ * use more time than contexts that run continuously (e.g. 'batch'
+ * processes).
  *
- * This value is relative to that of the least-run context. This prevents
- * "stored-up timeslices" DoS attacks.
+ * This limit \b prevents a "stored-up timeslices" DoS attack, where a ctx
+ * not run for a long time attacks the system by using a very large initial
+ * number of timeslices when it finally does run.
+ *
+ * @note A value of zero allows not-run-often contexts to get scheduled in
+ * quickly, but to only use a single timeslice when they get scheduled in.
  */
 #define DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES 2
 
 /**
- * Default setting for whether to prefer security or performance.
- *
- * Currently affects only r0p0-15dev0 HW and earlier.
- */
+* Boolean indicating whether the driver is configured to be secure at
+* a potential loss of performance.
+*
+* This currently affects only r0p0-15dev0 HW and earlier.
+*
+* On r0p0-15dev0 HW and earlier, there are tradeoffs between security and
+* performance:
+*
+* - When this is set to MALI_TRUE, the driver remains fully secure,
+* but potentially loses performance compared with setting this to
+* MALI_FALSE.
+* - When set to MALI_FALSE, the driver is open to certain security
+* attacks.
+*
+* From r0p0-00rel0 and onwards, there is no security loss by setting
+* this to MALI_FALSE, and no performance loss by setting it to
+* MALI_TRUE.
+*/
 #define DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE MALI_FALSE
 
+enum {
+       /**
+        * Use unrestricted Address ID width on the AXI bus.
+        */
+       KBASE_AID_32 = 0x0,
+
+       /**
+        * Restrict GPU to a half of maximum Address ID count.
+        * This will reduce performance, but reduce bus load due to GPU.
+        */
+       KBASE_AID_16 = 0x3,
+
+       /**
+        * Restrict GPU to a quarter of maximum Address ID count.
+        * This will reduce performance, but reduce bus load due to GPU.
+        */
+       KBASE_AID_8  = 0x2,
+
+       /**
+        * Restrict GPU to an eighth of maximum Address ID count.
+        * This will reduce performance, but reduce bus load due to GPU.
+        */
+       KBASE_AID_4  = 0x1
+};
+
 /**
- * Default setting for read Address ID limiting on AXI.
+ * Default setting for read Address ID limiting on AXI bus.
+ *
+ * Attached value: u32 register value
+ *    KBASE_AID_32 - use the full 32 IDs (5 ID bits)
+ *    KBASE_AID_16 - use 16 IDs (4 ID bits)
+ *    KBASE_AID_8  - use 8 IDs (3 ID bits)
+ *    KBASE_AID_4  - use 4 IDs (2 ID bits)
+ * Default value: KBASE_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
  */
 #define DEFAULT_ARID_LIMIT KBASE_AID_32
 
 /**
  * Default setting for write Address ID limiting on AXI.
+ *
+ * Attached value: u32 register value
+ *    KBASE_AID_32 - use the full 32 IDs (5 ID bits)
+ *    KBASE_AID_16 - use 16 IDs (4 ID bits)
+ *    KBASE_AID_8  - use 8 IDs (3 ID bits)
+ *    KBASE_AID_4  - use 4 IDs (2 ID bits)
+ * Default value: KBASE_AID_32 (no limit). Note hardware implementation
+ * may limit to a lower value.
  */
 #define DEFAULT_AWID_LIMIT KBASE_AID_32
 
 /*** Begin Power Manager defaults */
 
 /* Milliseconds */
-#define DEFAULT_PM_DVFS_FREQ 20
+#define DEFAULT_PM_DVFS_FREQ 500
 
 /**
  * Default poweroff tick granuality, in nanoseconds
 
 /*** End Power Manager defaults ***/
 
+
 /**
  * Default UMP device mapping. A UMP_DEVICE_<device>_SHIFT value which
  * defines which UMP device this GPU should be mapped to.
index 3320e846a55c988dba7d07cf1a198eaaeb5f563d..3149fd196790b1cd93d7b9370bad853f53a78874 100755 (executable)
@@ -33,9 +33,9 @@
  *
  * Allocate and init a kernel base context.
  */
-kbase_context *kbase_create_context(kbase_device *kbdev)
+struct kbase_context *kbase_create_context(struct kbase_device *kbdev)
 {
-       kbase_context *kctx;
+       struct kbase_context *kctx;
        mali_error mali_err;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -46,6 +46,9 @@ kbase_context *kbase_create_context(kbase_device *kbdev)
        if (!kctx)
                goto out;
 
+       /* creating a context is considered a disjoint event */
+       kbase_disjoint_event(kbdev);
+
        kctx->kbdev = kbdev;
        kctx->as_nr = KBASEP_AS_NR_INVALID;
 #ifdef CONFIG_MALI_TRACE_TIMELINE
@@ -58,7 +61,9 @@ kbase_context *kbase_create_context(kbase_device *kbdev)
        kctx->process_mm = NULL;
        atomic_set(&kctx->nonmapped_pages, 0);
 
-       if (MALI_ERROR_NONE != kbase_mem_allocator_init(&kctx->osalloc, MEMPOOL_PAGES))
+       if (MALI_ERROR_NONE != kbase_mem_allocator_init(&kctx->osalloc,
+                                                       MEMPOOL_PAGES,
+                                                       kctx->kbdev))
                goto free_kctx;
 
        kctx->pgd_allocator = &kctx->osalloc;
@@ -109,8 +114,19 @@ kbase_context *kbase_create_context(kbase_device *kbdev)
        atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
 #endif
 
+       kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
+
+       mali_err = kbasep_mem_profile_debugfs_add(kctx);
+       if (MALI_ERROR_NONE != mali_err)
+               goto no_region_tracker;
+
+       if (kbasep_jd_debugfs_ctx_add(kctx))
+               goto free_mem_profile;
+
        return kctx;
 
+free_mem_profile:
+       kbasep_mem_profile_debugfs_remove(kctx);
 no_region_tracker:
 no_sink_page:
        kbase_mem_allocator_free(&kctx->osalloc, 1, &kctx->aliasing_sink_page, 0);
@@ -146,9 +162,9 @@ static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
  * Destroy a kernel base context. Calls kbase_destroy_os_context() to
  * free OS specific structures. Will release all outstanding regions.
  */
-void kbase_destroy_context(kbase_context *kctx)
+void kbase_destroy_context(struct kbase_context *kctx)
 {
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        int pages;
        unsigned long pending_regions_to_clean;
 
@@ -159,6 +175,10 @@ void kbase_destroy_context(kbase_context *kctx)
 
        KBASE_TRACE_ADD(kbdev, CORE_CTX_DESTROY, kctx, NULL, 0u, 0u);
 
+       kbasep_jd_debugfs_ctx_remove(kctx);
+
+       kbasep_mem_profile_debugfs_remove(kctx);
+
        /* Ensure the core is powered up for the destroy process */
        /* A suspend won't happen here, because we're in a syscall from a userspace
         * thread. */
@@ -217,6 +237,7 @@ void kbase_destroy_context(kbase_context *kctx)
 
        kbase_mem_allocator_term(&kctx->osalloc);
        WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
+
        vfree(kctx);
 }
 KBASE_EXPORT_SYMBOL(kbase_destroy_context)
@@ -224,10 +245,10 @@ KBASE_EXPORT_SYMBOL(kbase_destroy_context)
 /**
  * Set creation flags on a context
  */
-mali_error kbase_context_set_create_flags(kbase_context *kctx, u32 flags)
+mali_error kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
 {
        mali_error err = MALI_ERROR_NONE;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_kctx_info *js_kctx_info;
        KBASE_DEBUG_ASSERT(NULL != kctx);
 
        js_kctx_info = &kctx->jctx.sched_info;
index 635c0fd3482b9bb497a63d792a1dc8574d38ef3b..d63b3e560add8b9ce6fc64fa513bb359795bc546 100755 (executable)
 
 
 
-
 /**
  * @file mali_kbase_core_linux.c
  * Base kernel driver init.
  */
 
 #include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
 #include <mali_kbase_uku.h>
 #include <mali_midg_regmap.h>
 #include <mali_kbase_gator.h>
 #include <mali_kbase_mem_linux.h>
+#ifdef CONFIG_MALI_DEVFREQ
+#include "mali_kbase_devfreq.h"
+#endif /* CONFIG_MALI_DEVFREQ */
 #ifdef CONFIG_MALI_NO_MALI
 #include "mali_kbase_model_linux.h"
 #endif /* CONFIG_MALI_NO_MALI */
 #include <linux/uaccess.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/mm.h>
 #include <linux/compat.h>      /* is_compat_task */
+#include <linux/version.h>
 #include <mali_kbase_hw.h>
 #include <platform/mali_kbase_platform_common.h>
 #ifdef CONFIG_SYNC
 #include <mali_kbase_sync.h>
 #endif /* CONFIG_SYNC */
+#ifdef CONFIG_PM_DEVFREQ
+#include <linux/devfreq.h>
+#endif /* CONFIG_PM_DEVFREQ */
+#include <linux/clk.h>
 
 /*
  * This file is included since when we support device tree we don't
 #include <plat/devs.h>
 #endif
 
+/* GPU IRQ Tags */
 #define        JOB_IRQ_TAG     0
 #define MMU_IRQ_TAG    1
 #define GPU_IRQ_TAG    2
 
+
 struct kbase_irq_table {
        u32 tag;
        irq_handler_t handler;
 };
 #if MALI_UNIT_TEST
-kbase_exported_test_data shared_kernel_test_data;
+static struct kbase_exported_test_data shared_kernel_test_data;
 EXPORT_SYMBOL(shared_kernel_test_data);
 #endif /* MALI_UNIT_TEST */
 
 #define KBASE_DRV_NAME "mali"
 #define ROCKCHIP_VERSION 0x0b
+
+/** process name + ( statistics in a single bin * number of bins + histogram header ) * number of histograms + total size
+ * @note Must be kept in sync with CCTX
+ */
+#define KBASE_MEM_PROFILE_MAX_BUF_SIZE (64 + (24 * 32 + 64) * 16 + 40)
+
 static const char kbase_drv_name[] = KBASE_DRV_NAME;
 
 static int kbase_dev_nr;
@@ -103,9 +120,9 @@ static INLINE void __compile_time_asserts(void)
 
 #ifdef CONFIG_KDS
 
-typedef struct kbasep_kds_resource_set_file_data {
+struct kbasep_kds_resource_set_file_data {
        struct kds_resource_set *lock;
-} kbasep_kds_resource_set_file_data;
+};
 
 static int kds_resource_release(struct inode *inode, struct file *file);
 
@@ -113,11 +130,11 @@ static const struct file_operations kds_resource_fops = {
        .release = kds_resource_release
 };
 
-typedef struct kbase_kds_resource_list_data {
+struct kbase_kds_resource_list_data {
        struct kds_resource **kds_resources;
        unsigned long *kds_access_bitmap;
        int num_elems;
-} kbase_kds_resource_list_data;
+};
 
 static int kds_resource_release(struct inode *inode, struct file *file)
 {
@@ -133,21 +150,25 @@ static int kds_resource_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-mali_error kbasep_kds_allocate_resource_list_data(kbase_context *kctx, base_external_resource *ext_res, int num_elems, kbase_kds_resource_list_data *resources_list)
+static mali_error kbasep_kds_allocate_resource_list_data(struct kbase_context *kctx, struct base_external_resource *ext_res, int num_elems, struct kbase_kds_resource_list_data *resources_list)
 {
-       base_external_resource *res = ext_res;
+       struct base_external_resource *res = ext_res;
        int res_id;
 
        /* assume we have to wait for all */
 
        KBASE_DEBUG_ASSERT(0 != num_elems);
-       resources_list->kds_resources = kmalloc(sizeof(struct kds_resource *) * num_elems, GFP_KERNEL);
+       resources_list->kds_resources = kmalloc_array(num_elems,
+                       sizeof(struct kds_resource *), GFP_KERNEL);
 
        if (NULL == resources_list->kds_resources)
                return MALI_ERROR_OUT_OF_MEMORY;
 
        KBASE_DEBUG_ASSERT(0 != num_elems);
-       resources_list->kds_access_bitmap = kzalloc(sizeof(unsigned long) * ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG), GFP_KERNEL);
+       resources_list->kds_access_bitmap = kzalloc(
+                       sizeof(unsigned long) *
+                       ((num_elems + BITS_PER_LONG - 1) / BITS_PER_LONG),
+                       GFP_KERNEL);
 
        if (NULL == resources_list->kds_access_bitmap) {
                kfree(resources_list->kds_access_bitmap);
@@ -157,14 +178,14 @@ mali_error kbasep_kds_allocate_resource_list_data(kbase_context *kctx, base_exte
        kbase_gpu_vm_lock(kctx);
        for (res_id = 0; res_id < num_elems; res_id++, res++) {
                int exclusive;
-               kbase_va_region *reg;
+               struct kbase_va_region *reg;
                struct kds_resource *kds_res = NULL;
 
                exclusive = res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE;
                reg = kbase_region_tracker_find_region_enclosing_address(kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
 
                /* did we find a matching region object? */
-               if (NULL == reg)
+               if (NULL == reg || (reg->flags & KBASE_REG_FREE))
                        break;
 
                /* no need to check reg->alloc as only regions with an alloc has
@@ -202,7 +223,7 @@ mali_error kbasep_kds_allocate_resource_list_data(kbase_context *kctx, base_exte
        return MALI_ERROR_FUNCTION_FAILED;
 }
 
-mali_bool kbasep_validate_kbase_pointer(kbase_pointer *p)
+static mali_bool kbasep_validate_kbase_pointer(union kbase_pointer *p)
 {
 #ifdef CONFIG_COMPAT
        if (is_compat_task()) {
@@ -218,28 +239,28 @@ mali_bool kbasep_validate_kbase_pointer(kbase_pointer *p)
        return MALI_TRUE;
 }
 
-mali_error kbase_external_buffer_lock(kbase_context *kctx, kbase_uk_ext_buff_kds_data *args, u32 args_size)
+static mali_error kbase_external_buffer_lock(struct kbase_context *kctx, struct kbase_uk_ext_buff_kds_data *args, u32 args_size)
 {
-       base_external_resource *ext_res_copy;
+       struct base_external_resource *ext_res_copy;
        size_t ext_resource_size;
        mali_error return_error = MALI_ERROR_FUNCTION_FAILED;
        int fd;
 
-       if (args_size != sizeof(kbase_uk_ext_buff_kds_data))
+       if (args_size != sizeof(struct kbase_uk_ext_buff_kds_data))
                return MALI_ERROR_FUNCTION_FAILED;
 
        /* Check user space has provided valid data */
        if (!kbasep_validate_kbase_pointer(&args->external_resource) || !kbasep_validate_kbase_pointer(&args->file_descriptor) || (0 == args->num_res) || (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
                return MALI_ERROR_FUNCTION_FAILED;
 
-       ext_resource_size = sizeof(base_external_resource) * args->num_res;
+       ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
 
        KBASE_DEBUG_ASSERT(0 != ext_resource_size);
        ext_res_copy = kmalloc(ext_resource_size, GFP_KERNEL);
 
        if (NULL != ext_res_copy) {
-               base_external_resource *__user ext_res_user;
-               int *__user file_descriptor_user;
+               struct base_external_resource __user *ext_res_user;
+               int __user *file_descriptor_user;
 #ifdef CONFIG_COMPAT
                if (is_compat_task()) {
                        ext_res_user = compat_ptr(args->external_resource.compat_value);
@@ -254,13 +275,13 @@ mali_error kbase_external_buffer_lock(kbase_context *kctx, kbase_uk_ext_buff_kds
 
                /* Copy the external resources to lock from user space */
                if (0 == copy_from_user(ext_res_copy, ext_res_user, ext_resource_size)) {
-                       kbasep_kds_resource_set_file_data *fdata;
+                       struct kbasep_kds_resource_set_file_data *fdata;
 
                        /* Allocate data to be stored in the file */
-                       fdata = kmalloc(sizeof(kbasep_kds_resource_set_file_data), GFP_KERNEL);
+                       fdata = kmalloc(sizeof(*fdata), GFP_KERNEL);
 
                        if (NULL != fdata) {
-                               kbase_kds_resource_list_data resource_list_data;
+                               struct kbase_kds_resource_list_data resource_list_data;
                                /* Parse given elements and create resource and access lists */
                                return_error = kbasep_kds_allocate_resource_list_data(kctx, ext_res_copy, args->num_res, &resource_list_data);
                                if (MALI_ERROR_NONE == return_error) {
@@ -278,7 +299,9 @@ mali_error kbase_external_buffer_lock(kbase_context *kctx, kbase_uk_ext_buff_kds
                                        if ((fd >= 0) && (0 == err)) {
                                                struct kds_resource_set *lock;
 
-                                               lock = kds_waitall(args->num_res, resource_list_data.kds_access_bitmap, resource_list_data.kds_resources, KDS_WAIT_BLOCKING);
+                                               lock = kds_waitall(args->num_res, resource_list_data.kds_access_bitmap,
+                                                               resource_list_data.kds_resources,
+                                                               KDS_WAIT_BLOCKING);
 
                                                if (IS_ERR_OR_NULL(lock)) {
                                                        return_error = MALI_ERROR_FUNCTION_FAILED;
@@ -312,10 +335,10 @@ mali_error kbase_external_buffer_lock(kbase_context *kctx, kbase_uk_ext_buff_kds
 }
 #endif /* CONFIG_KDS */
 
-static mali_error kbase_dispatch(kbase_context *kctx, void * const args, u32 args_size)
+static mali_error kbase_dispatch(struct kbase_context *kctx, void * const args, u32 args_size)
 {
        struct kbase_device *kbdev;
-       uk_header *ukh = args;
+       union uk_header *ukh = args;
        u32 id;
 
        KBASE_DEBUG_ASSERT(ukh != NULL);
@@ -325,11 +348,27 @@ static mali_error kbase_dispatch(kbase_context *kctx, void * const args, u32 arg
        ukh->ret = MALI_ERROR_NONE;     /* Be optimistic */
 
        if (UKP_FUNC_ID_CHECK_VERSION == id) {
-               if (args_size == sizeof(uku_version_check_args)) {
-                       uku_version_check_args *version_check = (uku_version_check_args *)args;
-
+               if (args_size == sizeof(struct uku_version_check_args)) {
+                       struct uku_version_check_args *version_check = (struct uku_version_check_args *)args;
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+                       if (version_check->major == 6) {
+                               /* We are backwards compatible with version 6,
+                                * so pretend to be the old version */
+                               version_check->major = 6;
+                               version_check->minor = 1;
+                       } else {
+                               /* We return our actual version regardless if it
+                                * matches the version returned by userspace -
+                                * userspace can bail if it can't handle this
+                                * version */
+                               version_check->major = BASE_UK_VERSION_MAJOR;
+                               version_check->minor = BASE_UK_VERSION_MINOR;
+                       }
+#else
                        version_check->major = BASE_UK_VERSION_MAJOR;
                        version_check->minor = BASE_UK_VERSION_MINOR;
+#endif /* BASE_LEGACY_UK6_SUPPORT */
 
                        ukh->ret = MALI_ERROR_NONE;
                } else {
@@ -350,7 +389,7 @@ static mali_error kbase_dispatch(kbase_context *kctx, void * const args, u32 arg
 
                /* is it the only call we accept? */
                if (id == KBASE_FUNC_SET_FLAGS) {
-                       kbase_uk_set_flags *kbase_set_flags = (kbase_uk_set_flags *) args;
+                       struct kbase_uk_set_flags *kbase_set_flags = (struct kbase_uk_set_flags *)args;
 
                        if (sizeof(*kbase_set_flags) != args_size) {
                                /* not matching the expected call, stay stuck in setup mode */
@@ -376,7 +415,7 @@ static mali_error kbase_dispatch(kbase_context *kctx, void * const args, u32 arg
        switch (id) {
        case KBASE_FUNC_MEM_ALLOC:
                {
-                       kbase_uk_mem_alloc *mem = args;
+                       struct kbase_uk_mem_alloc *mem = args;
                        struct kbase_va_region *reg;
 
                        if (sizeof(*mem) != args_size)
@@ -389,8 +428,8 @@ static mali_error kbase_dispatch(kbase_context *kctx, void * const args, u32 arg
                }
        case KBASE_FUNC_MEM_IMPORT:
                {
-                       kbase_uk_mem_import *mem_import = args;
-                       int *__user phandle;
+                       struct kbase_uk_mem_import *mem_import = args;
+                       int __user *phandle;
                        int handle;
 
                        if (sizeof(*mem_import) != args_size)
@@ -421,14 +460,14 @@ bad_type:
                        break;
                }
        case KBASE_FUNC_MEM_ALIAS: {
-                       kbase_uk_mem_alias *alias = args;
-                       struct base_mem_aliasing_info *__user user_ai;
+                       struct kbase_uk_mem_alias *alias = args;
+                       struct base_mem_aliasing_info __user *user_ai;
                        struct base_mem_aliasing_info *ai;
 
                        if (sizeof(*alias) != args_size)
                                goto bad_size;
 
-                       if (alias->nents > 4) {
+                       if (alias->nents > 2048) {
                                ukh->ret = MALI_ERROR_FUNCTION_FAILED;
                                break;
                        }
@@ -440,7 +479,8 @@ bad_type:
 #endif
                                user_ai = alias->ai.value;
 
-                       ai = kmalloc(GFP_KERNEL, sizeof(*ai) * alias->nents);
+                       ai = vmalloc(sizeof(*ai) * alias->nents);
+
                        if (!ai) {
                                ukh->ret = MALI_ERROR_OUT_OF_MEMORY;
                                break;
@@ -462,12 +502,12 @@ bad_type:
                        }
 no_alias:
 copy_failed:
-                       kfree(ai);
+                       vfree(ai);
                        break;
                }
        case KBASE_FUNC_MEM_COMMIT:
                {
-                       kbase_uk_mem_commit *commit = args;
+                       struct kbase_uk_mem_commit *commit = args;
 
                        if (sizeof(*commit) != args_size)
                                goto bad_size;
@@ -478,14 +518,18 @@ copy_failed:
                                break;
                        }
 
-                       if (kbase_mem_commit(kctx, commit->gpu_addr, commit->pages, (base_backing_threshold_status*)&commit->result_subcode))
+                       if (kbase_mem_commit(kctx, commit->gpu_addr,
+                                       commit->pages,
+                                       (base_backing_threshold_status *)&commit->result_subcode))
                                ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+
                        break;
                }
 
        case KBASE_FUNC_MEM_QUERY:
                {
-                       kbase_uk_mem_query *query = args;
+                       struct kbase_uk_mem_query *query = args;
+
                        if (sizeof(*query) != args_size)
                                goto bad_size;
 
@@ -509,7 +553,8 @@ copy_failed:
 
        case KBASE_FUNC_MEM_FLAGS_CHANGE:
                {
-                       kbase_uk_mem_flags_change * fc = args;
+                       struct kbase_uk_mem_flags_change *fc = args;
+
                        if (sizeof(*fc) != args_size)
                                goto bad_size;
 
@@ -526,7 +571,7 @@ copy_failed:
                }
        case KBASE_FUNC_MEM_FREE:
                {
-                       kbase_uk_mem_free *mem = args;
+                       struct kbase_uk_mem_free *mem = args;
 
                        if (sizeof(*mem) != args_size)
                                goto bad_size;
@@ -544,19 +589,37 @@ copy_failed:
 
        case KBASE_FUNC_JOB_SUBMIT:
                {
-                       kbase_uk_job_submit *job = args;
+                       struct kbase_uk_job_submit *job = args;
 
                        if (sizeof(*job) != args_size)
                                goto bad_size;
 
+#ifdef BASE_LEGACY_UK6_SUPPORT
+                       if (MALI_ERROR_NONE != kbase_jd_submit(kctx, job, 0))
+#else
                        if (MALI_ERROR_NONE != kbase_jd_submit(kctx, job))
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+                               ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+                       break;
+               }
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+       case KBASE_FUNC_JOB_SUBMIT_UK6:
+               {
+                       struct kbase_uk_job_submit *job = args;
+
+                       if (sizeof(*job) != args_size)
+                               goto bad_size;
+
+                       if (MALI_ERROR_NONE != kbase_jd_submit(kctx, job, 1))
                                ukh->ret = MALI_ERROR_FUNCTION_FAILED;
                        break;
                }
+#endif
 
        case KBASE_FUNC_SYNC:
                {
-                       kbase_uk_sync_now *sn = args;
+                       struct kbase_uk_sync_now *sn = args;
 
                        if (sizeof(*sn) != args_size)
                                goto bad_size;
@@ -572,6 +635,18 @@ copy_failed:
                        break;
                }
 
+       case KBASE_FUNC_DISJOINT_QUERY:
+               {
+                       struct kbase_uk_disjoint_query *dquery = args;
+
+                       if (sizeof(*dquery) != args_size)
+                               goto bad_size;
+
+                       /* Get the disjointness counter value. */
+                       dquery->counter = kbase_disjoint_event_get(kctx->kbdev);
+                       break;
+               }
+
        case KBASE_FUNC_POST_TERM:
                {
                        kbase_event_close(kctx);
@@ -580,7 +655,7 @@ copy_failed:
 
        case KBASE_FUNC_HWCNT_SETUP:
                {
-                       kbase_uk_hwcnt_setup *setup = args;
+                       struct kbase_uk_hwcnt_setup *setup = args;
 
                        if (sizeof(*setup) != args_size)
                                goto bad_size;
@@ -608,7 +683,7 @@ copy_failed:
 
        case KBASE_FUNC_CPU_PROPS_REG_DUMP:
                {
-                       kbase_uk_cpuprops *setup = args;
+                       struct kbase_uk_cpuprops *setup = args;
 
                        if (sizeof(*setup) != args_size)
                                goto bad_size;
@@ -620,7 +695,7 @@ copy_failed:
 
        case KBASE_FUNC_GPU_PROPS_REG_DUMP:
                {
-                       kbase_uk_gpuprops *setup = args;
+                       struct kbase_uk_gpuprops *setup = args;
 
                        if (sizeof(*setup) != args_size)
                                goto bad_size;
@@ -631,21 +706,19 @@ copy_failed:
                }
        case KBASE_FUNC_FIND_CPU_OFFSET:
                {
-                       kbase_uk_find_cpu_offset *find = args;
+                       struct kbase_uk_find_cpu_offset *find = args;
 
                        if (sizeof(*find) != args_size)
                                goto bad_size;
 
                        if (find->gpu_addr & ~PAGE_MASK) {
-                               dev_warn(kbdev->dev,    
-                                       "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET:"
-                                       "find->gpu_addr: passed parameter is invalid");
+                               dev_warn(kbdev->dev, "kbase_dispatch case KBASE_FUNC_FIND_CPU_OFFSET: find->gpu_addr: passed parameter is invalid");
                                goto out_bad;
                        }
 
-                       if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX)
+                       if (find->size > SIZE_MAX || find->cpu_addr > ULONG_MAX) {
                                ukh->ret = MALI_ERROR_FUNCTION_FAILED;
-                       else {
+                       else {
                                mali_error err;
 
                                err = kbasep_find_enclosing_cpu_mapping_offset(
@@ -662,7 +735,7 @@ copy_failed:
                }
        case KBASE_FUNC_GET_VERSION:
                {
-                       kbase_uk_get_ddk_version *get_version = (kbase_uk_get_ddk_version *) args;
+                       struct kbase_uk_get_ddk_version *get_version = (struct kbase_uk_get_ddk_version *)args;
 
                        if (sizeof(*get_version) != args_size)
                                goto bad_size;
@@ -677,7 +750,7 @@ copy_failed:
        case KBASE_FUNC_STREAM_CREATE:
                {
 #ifdef CONFIG_SYNC
-                       kbase_uk_stream_create *screate = (kbase_uk_stream_create *) args;
+                       struct kbase_uk_stream_create *screate = (struct kbase_uk_stream_create *)args;
 
                        if (sizeof(*screate) != args_size)
                                goto bad_size;
@@ -697,7 +770,8 @@ copy_failed:
        case KBASE_FUNC_FENCE_VALIDATE:
                {
 #ifdef CONFIG_SYNC
-                       kbase_uk_fence_validate *fence_validate = (kbase_uk_fence_validate *) args;
+                       struct kbase_uk_fence_validate *fence_validate = (struct kbase_uk_fence_validate *)args;
+
                        if (sizeof(*fence_validate) != args_size)
                                goto bad_size;
 
@@ -709,7 +783,7 @@ copy_failed:
        case KBASE_FUNC_EXT_BUFFER_LOCK:
                {
 #ifdef CONFIG_KDS
-                       ukh->ret = kbase_external_buffer_lock(kctx, (kbase_uk_ext_buff_kds_data *) args, args_size);
+                       ukh->ret = kbase_external_buffer_lock(kctx, (struct kbase_uk_ext_buff_kds_data *)args, args_size);
 #endif /* CONFIG_KDS */
                        break;
                }
@@ -717,11 +791,11 @@ copy_failed:
        case KBASE_FUNC_SET_TEST_DATA:
                {
 #if MALI_UNIT_TEST
-                       kbase_uk_set_test_data *set_data = args;
+                       struct kbase_uk_set_test_data *set_data = args;
 
                        shared_kernel_test_data = set_data->test_data;
-                       shared_kernel_test_data.kctx.value = kctx;
-                       shared_kernel_test_data.mm.value = (void *)current->mm;
+                       shared_kernel_test_data.kctx.value = (void __user *)kctx;
+                       shared_kernel_test_data.mm.value = (void __user *)current->mm;
                        ukh->ret = MALI_ERROR_NONE;
 #endif /* MALI_UNIT_TEST */
                        break;
@@ -731,7 +805,8 @@ copy_failed:
                {
 #ifdef CONFIG_MALI_ERROR_INJECT
                        unsigned long flags;
-                       kbase_error_params params = ((kbase_uk_error_params *) args)->params;
+                       struct kbase_error_params params = ((struct kbase_uk_error_params *)args)->params;
+
                        /*mutex lock */
                        spin_lock_irqsave(&kbdev->reg_op_lock, flags);
                        ukh->ret = job_atom_inject_error(&params);
@@ -745,7 +820,9 @@ copy_failed:
                {
 #ifdef CONFIG_MALI_NO_MALI
                        unsigned long flags;
-                       kbase_model_control_params params = ((kbase_uk_model_control_params *) args)->params;
+                       struct kbase_model_control_params params =
+                                       ((struct kbase_uk_model_control_params *)args)->params;
+
                        /*mutex lock */
                        spin_lock_irqsave(&kbdev->reg_op_lock, flags);
                        ukh->ret = midg_model_control(kbdev->model, &params);
@@ -757,7 +834,9 @@ copy_failed:
 
        case KBASE_FUNC_KEEP_GPU_POWERED:
                {
-                       kbase_uk_keep_gpu_powered *kgp = (kbase_uk_keep_gpu_powered *) args;
+                       struct kbase_uk_keep_gpu_powered *kgp =
+                                       (struct kbase_uk_keep_gpu_powered *)args;
+
                        /* A suspend won't happen here, because we're in a syscall from a
                         * userspace thread.
                         *
@@ -778,26 +857,25 @@ copy_failed:
                        break;
                }
 
-       case KBASE_FUNC_GET_PROFILING_CONTROLS :
+       case KBASE_FUNC_GET_PROFILING_CONTROLS:
                {
-                       struct kbase_uk_profiling_controls *controls = \
+                       struct kbase_uk_profiling_controls *controls =
                                        (struct kbase_uk_profiling_controls *)args;
                        u32 i;
 
                        if (sizeof(*controls) != args_size)
                                goto bad_size;
 
-                       for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++) {
+                       for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
                                controls->profiling_controls[i] = kbase_get_profiling_control(kbdev, i);
-                       }
 
                        break;
                }
 
        /* used only for testing purposes; these controls are to be set by gator through gator API */
-       case KBASE_FUNC_SET_PROFILING_CONTROLS :
+       case KBASE_FUNC_SET_PROFILING_CONTROLS:
                {
-                       struct kbase_uk_profiling_controls *controls = \
+                       struct kbase_uk_profiling_controls *controls =
                                        (struct kbase_uk_profiling_controls *)args;
                        u32 i;
 
@@ -805,10 +883,45 @@ copy_failed:
                                goto bad_size;
 
                        for (i = FBDUMP_CONTROL_MIN; i < FBDUMP_CONTROL_MAX; i++)
-                       {
                                _mali_profiling_control(i, controls->profiling_controls[i]);
+
+                       break;
+               }
+
+       case KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD:
+               {
+                       struct kbase_uk_debugfs_mem_profile_add *add_data =
+                                       (struct kbase_uk_debugfs_mem_profile_add *)args;
+                       char *buf;
+                       char __user *user_buf;
+
+                       if (sizeof(*add_data) != args_size)
+                               goto bad_size;
+
+                       if (add_data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
+                               dev_err(kbdev->dev, "buffer too big");
+                               goto out_bad;
                        }
 
+#ifdef CONFIG_64BIT
+                       if (is_compat_task())
+                               user_buf = compat_ptr(add_data->buf.compat_value);
+                       else
+#endif
+                               user_buf = add_data->buf.value;
+
+                       buf = kmalloc(add_data->len, GFP_KERNEL);
+                       if (!buf)
+                               goto out_bad;
+
+                       if (0 != copy_from_user(buf, user_buf, add_data->len)) {
+                               ukh->ret = MALI_ERROR_FUNCTION_FAILED;
+                               kfree(buf);
+                               goto out_bad;
+                       }
+                       kbasep_mem_profile_debugfs_insert(kctx, buf,
+                                       add_data->len);
+
                        break;
                }
 
@@ -878,7 +991,7 @@ EXPORT_SYMBOL(kbase_release_device);
 static int kbase_open(struct inode *inode, struct file *filp)
 {
        struct kbase_device *kbdev = NULL;
-       kbase_context *kctx;
+       struct kbase_context *kctx;
        int ret = 0;
 
        kbdev = kbase_find_device(iminor(inode));
@@ -898,9 +1011,9 @@ static int kbase_open(struct inode *inode, struct file *filp)
        dev_dbg(kbdev->dev, "created base context\n");
 
        {
-               kbasep_kctx_list_element *element;
+               struct kbasep_kctx_list_element *element;
 
-               element = kzalloc(sizeof(kbasep_kctx_list_element), GFP_KERNEL);
+               element = kzalloc(sizeof(*element), GFP_KERNEL);
                if (element) {
                        mutex_lock(&kbdev->kctx_list_lock);
                        element->kctx = kctx;
@@ -920,11 +1033,11 @@ static int kbase_open(struct inode *inode, struct file *filp)
 
 static int kbase_release(struct inode *inode, struct file *filp)
 {
-       kbase_context *kctx = filp->private_data;
+       struct kbase_context *kctx = filp->private_data;
        struct kbase_device *kbdev = kctx->kbdev;
-       kbasep_kctx_list_element *element, *tmp;
+       struct kbasep_kctx_list_element *element, *tmp;
        mali_bool found_element = MALI_FALSE;
-       
+
        mutex_lock(&kbdev->kctx_list_lock);
        list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
                if (element->kctx == kctx) {
@@ -951,12 +1064,12 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        u64 msg[(CALL_MAX_SIZE + 7) >> 3] = { 0xdeadbeefdeadbeefull };  /* alignment fixup */
        u32 size = _IOC_SIZE(cmd);
-       kbase_context *kctx = filp->private_data;
+       struct kbase_context *kctx = filp->private_data;
 
        if (size > CALL_MAX_SIZE)
                return -ENOTTY;
 
-       if (0 != copy_from_user(&msg, (void *)arg, size)) {
+       if (0 != copy_from_user(&msg, (void __user *)arg, size)) {
                dev_err(kctx->kbdev->dev, "failed to copy ioctl argument into kernel space\n");
                return -EFAULT;
        }
@@ -964,7 +1077,7 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        if (MALI_ERROR_NONE != kbase_dispatch(kctx, &msg, size))
                return -EFAULT;
 
-       if (0 != copy_to_user((void *)arg, &msg, size)) {
+       if (0 != copy_to_user((void __user *)arg, &msg, size)) {
                dev_err(kctx->kbdev->dev, "failed to copy results of UK call back to user space\n");
                return -EFAULT;
        }
@@ -973,8 +1086,8 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
 static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
 {
-       kbase_context *kctx = filp->private_data;
-       base_jd_event_v2 uevent;
+       struct kbase_context *kctx = filp->private_data;
+       struct base_jd_event_v2 uevent;
        int out_count = 0;
 
        if (count < sizeof(uevent))
@@ -1011,7 +1124,7 @@ static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, lof
 
 static unsigned int kbase_poll(struct file *filp, poll_table *wait)
 {
-       kbase_context *kctx = filp->private_data;
+       struct kbase_context *kctx = filp->private_data;
 
        poll_wait(filp, &kctx->event_queue, wait);
        if (kbase_event_pending(kctx))
@@ -1020,7 +1133,7 @@ static unsigned int kbase_poll(struct file *filp, poll_table *wait)
        return 0;
 }
 
-void kbase_event_wakeup(kbase_context *kctx)
+void kbase_event_wakeup(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kctx);
 
@@ -1029,7 +1142,7 @@ void kbase_event_wakeup(kbase_context *kctx)
 
 KBASE_EXPORT_TEST_API(kbase_event_wakeup)
 
-int kbase_check_flags(int flags)
+static int kbase_check_flags(int flags)
 {
        /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
         * closes the file descriptor in a child process.
@@ -1040,6 +1153,100 @@ int kbase_check_flags(int flags)
        return 0;
 }
 
+static unsigned long kbase_get_unmapped_area(struct file *filp,
+               const unsigned long addr, const unsigned long len,
+               const unsigned long pgoff, const unsigned long flags)
+{
+#ifdef CONFIG_64BIT
+       /* based on get_unmapped_area, but simplified slightly due to that some
+        * values are known in advance */
+       struct kbase_context *kctx = filp->private_data;
+
+       if (!is_compat_task() && !addr &&
+               kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
+               struct mm_struct *mm = current->mm;
+               struct vm_area_struct *vma;
+               unsigned long low_limit, high_limit, gap_start, gap_end;
+
+               /* Hardware has smaller VA than userspace, ensure the page
+                * comes from a VA which can be used on the GPU */
+
+               gap_end = (1UL<<33);
+               if (gap_end < len)
+                       return -ENOMEM;
+               high_limit = gap_end - len;
+               low_limit = PAGE_SIZE + len;
+
+               gap_start = mm->highest_vm_end;
+               if (gap_start <= high_limit)
+                       goto found_highest;
+
+               if (RB_EMPTY_ROOT(&mm->mm_rb))
+                       return -ENOMEM;
+               vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+               if (vma->rb_subtree_gap < len)
+                       return -ENOMEM;
+
+               while (true) {
+                       gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+                       if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+                               struct vm_area_struct *right =
+                                       rb_entry(vma->vm_rb.rb_right,
+                                                struct vm_area_struct, vm_rb);
+                               if (right->rb_subtree_gap >= len) {
+                                       vma = right;
+                                       continue;
+                               }
+                       }
+check_current:
+                       gap_end = vma->vm_start;
+                       if (gap_end < low_limit)
+                               return -ENOMEM;
+                       if (gap_start <= high_limit &&
+                           gap_end - gap_start >= len)
+                               goto found;
+
+                       if (vma->vm_rb.rb_left) {
+                               struct vm_area_struct *left =
+                                       rb_entry(vma->vm_rb.rb_left,
+                                                struct vm_area_struct, vm_rb);
+
+                               if (left->rb_subtree_gap >= len) {
+                                       vma = left;
+                                       continue;
+                               }
+                       }
+                       while (true) {
+                               struct rb_node *prev = &vma->vm_rb;
+
+                               if (!rb_parent(prev))
+                                       return -ENOMEM;
+                               vma = rb_entry(rb_parent(prev),
+                                               struct vm_area_struct, vm_rb);
+                               if (prev == vma->vm_rb.rb_right) {
+                                       gap_start = vma->vm_prev ?
+                                               vma->vm_prev->vm_end : 0;
+                                       goto check_current;
+                               }
+                       }
+               }
+
+found:
+               if (gap_end > (1UL<<33))
+                       gap_end = (1UL<<33);
+
+found_highest:
+               gap_end -= len;
+
+               VM_BUG_ON(gap_end < PAGE_SIZE);
+               VM_BUG_ON(gap_end < gap_start);
+               return gap_end;
+       }
+#endif
+       /* No special requirements - fallback to the default version */
+       return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
 static const struct file_operations kbase_fops = {
        .owner = THIS_MODULE,
        .open = kbase_open,
@@ -1050,18 +1257,22 @@ static const struct file_operations kbase_fops = {
        .compat_ioctl = kbase_ioctl,
        .mmap = kbase_mmap,
        .check_flags = kbase_check_flags,
+       .get_unmapped_area = kbase_get_unmapped_area,
 };
 
 #ifndef CONFIG_MALI_NO_MALI
-void kbase_os_reg_write(kbase_device *kbdev, u16 offset, u32 value)
+void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value)
 {
        writel(value, kbdev->reg + offset);
 }
 
-u32 kbase_os_reg_read(kbase_device *kbdev, u16 offset)
+u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset)
 {
        return readl(kbdev->reg + offset);
 }
+#endif
+
+#ifndef CONFIG_MALI_NO_MALI
 
 static void *kbase_tag(void *ptr, u32 tag)
 {
@@ -1073,6 +1284,9 @@ static void *kbase_untag(void *ptr)
        return (void *)(((uintptr_t) ptr) & ~3);
 }
 
+
+
+
 static irqreturn_t kbase_job_irq_handler(int irq, void *data)
 {
        unsigned long flags;
@@ -1092,7 +1306,7 @@ static irqreturn_t kbase_job_irq_handler(int irq, void *data)
 #ifdef CONFIG_MALI_DEBUG
        if (!kbdev->pm.driver_ready_for_irqs)
                dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
-                               __func__, irq, val );
+                               __func__, irq, val);
 #endif /* CONFIG_MALI_DEBUG */
        spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
 
@@ -1127,7 +1341,7 @@ static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
 #ifdef CONFIG_MALI_DEBUG
        if (!kbdev->pm.driver_ready_for_irqs)
                dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
-                               __func__, irq, val );
+                               __func__, irq, val);
 #endif /* CONFIG_MALI_DEBUG */
        spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
 
@@ -1160,7 +1374,7 @@ static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
 #ifdef CONFIG_MALI_DEBUG
        if (!kbdev->pm.driver_ready_for_irqs)
                dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
-                               __func__, irq, val );
+                               __func__, irq, val);
 #endif /* CONFIG_MALI_DEBUG */
        spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
 
@@ -1173,13 +1387,13 @@ static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
 
        return IRQ_HANDLED;
 }
-
 static irq_handler_t kbase_handler_table[] = {
        [JOB_IRQ_TAG] = kbase_job_irq_handler,
        [MMU_IRQ_TAG] = kbase_mmu_irq_handler,
        [GPU_IRQ_TAG] = kbase_gpu_irq_handler,
 };
 
+
 #ifdef CONFIG_MALI_DEBUG
 #define  JOB_IRQ_HANDLER JOB_IRQ_TAG
 #define  MMU_IRQ_HANDLER MMU_IRQ_TAG
@@ -1194,10 +1408,11 @@ static irq_handler_t kbase_handler_table[] = {
  * @param[in] irq_type        - Interrupt type
  * @return     MALI_ERROR_NONE case success, MALI_ERROR_FUNCTION_FAILED otherwise
  */
-static mali_error kbase_set_custom_irq_handler(kbase_device *kbdev, irq_handler_t custom_handler, int irq_type)
+static mali_error kbase_set_custom_irq_handler(struct kbase_device *kbdev, irq_handler_t custom_handler, int irq_type)
 {
        mali_error result = MALI_ERROR_NONE;
        irq_handler_t requested_irq_handler = NULL;
+
        KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) && (GPU_IRQ_HANDLER >= irq_type));
 
        /* Release previous handler */
@@ -1206,7 +1421,10 @@ static mali_error kbase_set_custom_irq_handler(kbase_device *kbdev, irq_handler_
 
        requested_irq_handler = (NULL != custom_handler) ? custom_handler : kbase_handler_table[irq_type];
 
-       if (0 != request_irq(kbdev->irqs[irq_type].irq, requested_irq_handler, kbdev->irqs[irq_type].flags | IRQF_SHARED, dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
+       if (0 != request_irq(kbdev->irqs[irq_type].irq,
+                       requested_irq_handler,
+                       kbdev->irqs[irq_type].flags | IRQF_SHARED,
+                       dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
                result = MALI_ERROR_FUNCTION_FAILED;
                dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n", kbdev->irqs[irq_type].irq, irq_type);
 #ifdef CONFIG_SPARSE_IRQ
@@ -1220,14 +1438,14 @@ static mali_error kbase_set_custom_irq_handler(kbase_device *kbdev, irq_handler_
 KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler)
 
 /* test correct interrupt assigment and reception by cpu */
-typedef struct kbasep_irq_test {
+struct kbasep_irq_test {
        struct hrtimer timer;
        wait_queue_head_t wait;
        int triggered;
        u32 timeout;
-} kbasep_irq_test;
+};
 
-static kbasep_irq_test kbasep_irq_test_data;
+static struct kbasep_irq_test kbasep_irq_test_data;
 
 #define IRQ_TEST_TIMEOUT    500
 
@@ -1295,7 +1513,7 @@ static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
 
 static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
 {
-       kbasep_irq_test *test_data = container_of(timer, kbasep_irq_test, timer);
+       struct kbasep_irq_test *test_data = container_of(timer, struct kbasep_irq_test, timer);
 
        test_data->timeout = 1;
        test_data->triggered = 1;
@@ -1303,7 +1521,7 @@ static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-static mali_error kbasep_common_test_interrupt(kbase_device * const kbdev, u32 tag)
+static mali_error kbasep_common_test_interrupt(struct kbase_device * const kbdev, u32 tag)
 {
        mali_error err = MALI_ERROR_NONE;
        irq_handler_t test_handler;
@@ -1380,7 +1598,7 @@ static mali_error kbasep_common_test_interrupt(kbase_device * const kbdev, u32 t
        return err;
 }
 
-static mali_error kbasep_common_test_interrupt_handlers(kbase_device * const kbdev)
+static mali_error kbasep_common_test_interrupt_handlers(struct kbase_device * const kbdev)
 {
        mali_error err;
 
@@ -1402,17 +1620,16 @@ static mali_error kbasep_common_test_interrupt_handlers(kbase_device * const kbd
                goto out;
        }
 
-       dev_err(kbdev->dev, "Interrupts are correctly assigned.\n");
+       dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n");
 
  out:
        kbase_pm_context_idle(kbdev);
 
        return err;
-
 }
 #endif /* CONFIG_MALI_DEBUG */
 
-static int kbase_install_interrupts(kbase_device *kbdev)
+static int kbase_install_interrupts(struct kbase_device *kbdev)
 {
        u32 nr = ARRAY_SIZE(kbase_handler_table);
        int err;
@@ -1438,7 +1655,7 @@ static int kbase_install_interrupts(kbase_device *kbdev)
        return err;
 }
 
-static void kbase_release_interrupts(kbase_device *kbdev)
+static void kbase_release_interrupts(struct kbase_device *kbdev)
 {
        u32 nr = ARRAY_SIZE(kbase_handler_table);
        u32 i;
@@ -1449,7 +1666,7 @@ static void kbase_release_interrupts(kbase_device *kbdev)
        }
 }
 
-void kbase_synchronize_irqs(kbase_device *kbdev)
+void kbase_synchronize_irqs(struct kbase_device *kbdev)
 {
        u32 nr = ARRAY_SIZE(kbase_handler_table);
        u32 i;
@@ -1459,10 +1676,9 @@ void kbase_synchronize_irqs(kbase_device *kbdev)
                        synchronize_irq(kbdev->irqs[i].irq);
        }
 }
-
 #endif /* CONFIG_MALI_NO_MALI */
 
-
+#if KBASE_PM_EN
 /** Show callback for the @c power_policy sysfs file.
  *
  * This function is called to get the contents of the @c power_policy sysfs
@@ -1563,7 +1779,7 @@ static ssize_t set_policy(struct device *dev, struct device_attribute *attr, con
  * determining which policy is currently active, and changing the active
  * policy.
  */
-DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
+static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
 
 /** Show callback for the @c core_availability_policy sysfs file.
  *
@@ -1577,7 +1793,7 @@ DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
  *
  * @return The number of bytes output to @c buf.
  */
-static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char *const buf)
+static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
 {
        struct kbase_device *kbdev;
        const struct kbase_pm_ca_policy *current_policy;
@@ -1665,7 +1881,7 @@ static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr,
  * determining which policy is currently active, and changing the active
  * policy.
  */
-DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
+static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
 
 /** Show callback for the @c core_mask sysfs file.
  *
@@ -1678,7 +1894,7 @@ DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_
  *
  * @return The number of bytes output to @c buf.
  */
-static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char *const buf)
+static ssize_t show_core_mask(struct device *dev, struct device_attribute *attr, char * const buf)
 {
        struct kbase_device *kbdev;
        ssize_t ret = 0;
@@ -1709,13 +1925,16 @@ static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr,
 {
        struct kbase_device *kbdev;
        u64 new_core_mask;
+       int rc;
 
        kbdev = to_kbase_device(dev);
 
        if (!kbdev)
                return -ENODEV;
 
-       new_core_mask = simple_strtoull(buf, NULL, 16);
+       rc = kstrtoull(buf, 16, &new_core_mask);
+       if (rc)
+               return rc;
 
        if ((new_core_mask & kbdev->shader_present_bitmap) != new_core_mask ||
            !(new_core_mask & kbdev->gpu_props.props.coherency_info.group[0].core_mask)) {
@@ -1743,8 +1962,8 @@ static ssize_t set_core_mask(struct device *dev, struct device_attribute *attr,
  * Reading it will show the current core mask and the mask of cores available.
  * Writing to it will set the current core mask.
  */
-DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
-
+static DEVICE_ATTR(core_mask, S_IRUGO | S_IWUSR, show_core_mask, set_core_mask);
+#endif /* KBASE_PM_EN */
 
 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
 /* Import the external affinity mask variables */
@@ -1755,19 +1974,18 @@ extern u64 mali_js2_affinity_mask;
 /**
  * Structure containing a single shader affinity split configuration.
  */
-typedef struct {
-       char const * tag;
-       char const * human_readable;
+struct sc_split_config {
+       char const *tag;
+       char const *human_readable;
        u64          js0_mask;
        u64          js1_mask;
        u64          js2_mask;
-} sc_split_config;
+};
 
 /**
  * Array of available shader affinity split configurations.
  */
-static sc_split_config const sc_split_configs[] =
-{
+static struct sc_split_config const sc_split_configs[] = {
        /* All must be the first config (default). */
        {
                "all", "All cores",
@@ -1801,7 +2019,7 @@ static sc_split_config const sc_split_configs[] =
 };
 
 /* Pointer to the currently active shader split configuration. */
-static sc_split_config const * current_sc_split_config = &sc_split_configs[0];
+static struct sc_split_config const *current_sc_split_config = &sc_split_configs[0];
 
 /** Show callback for the @c sc_split sysfs file
  *
@@ -1812,7 +2030,7 @@ static ssize_t show_split(struct device *dev, struct device_attribute *attr, cha
        ssize_t ret;
        /* We know we are given a buffer which is PAGE_SIZE long. Our strings are all guaranteed
         * to be shorter than that at this time so no length check needed. */
-       ret = scnprintf(buf, PAGE_SIZE, "Current sc_split: '%s'\n", current_sc_split_config->tag );
+       ret = scnprintf(buf, PAGE_SIZE, "Current sc_split: '%s'\n", current_sc_split_config->tag);
        return ret;
 }
 
@@ -1831,13 +2049,11 @@ static ssize_t show_split(struct device *dev, struct device_attribute *attr, cha
  */
 static ssize_t set_split(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
 {
-       sc_split_config const * config = &sc_split_configs[0];
+       struct sc_split_config const *config = &sc_split_configs[0];
 
        /* Try to match: loop until we hit the last "NULL" entry */
-       while( config->tag )
-       {
-               if (sysfs_streq(config->tag, buf))
-               {
+       while (config->tag) {
+               if (sysfs_streq(config->tag, buf)) {
                        current_sc_split_config = config;
                        mali_js0_affinity_mask  = config->js0_mask;
                        mali_js1_affinity_mask  = config->js1_mask;
@@ -1859,11 +2075,11 @@ static ssize_t set_split(struct device *dev, struct device_attribute *attr, cons
  * This is used for configuring/querying the current shader core work affinity
  * configuration.
  */
-DEVICE_ATTR(sc_split, S_IRUGO|S_IWUSR, show_split, set_split);
+static DEVICE_ATTR(sc_split, S_IRUGO|S_IWUSR, show_split, set_split);
 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
 
 
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
 /** Store callback for the @c js_timeouts sysfs file.
  *
  * This function is called to get the contents of the @c js_timeouts sysfs
@@ -1902,7 +2118,12 @@ static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr
        if (!kbdev)
                return -ENODEV;
 
-       items = sscanf(buf, "%lu %lu %lu %lu %lu %lu %lu %lu", &js_soft_stop_ms, &js_soft_stop_ms_cl, &js_hard_stop_ms_ss, &js_hard_stop_ms_cl, &js_hard_stop_ms_nss, &js_reset_ms_ss, &js_reset_ms_cl, &js_reset_ms_nss);
+       items = sscanf(buf, "%lu %lu %lu %lu %lu %lu %lu %lu",
+                       &js_soft_stop_ms, &js_soft_stop_ms_cl,
+                       &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
+                       &js_hard_stop_ms_nss, &js_reset_ms_ss,
+                       &js_reset_ms_cl, &js_reset_ms_nss);
+
        if (items == 8) {
                u64 ticks;
 
@@ -1938,7 +2159,7 @@ static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr
                do_div(ticks, kbdev->js_data.scheduling_tick_ns);
                kbdev->js_reset_ticks_nss = ticks;
 
-               dev_dbg( kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks, js_soft_stop_ms);
+               dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks, js_soft_stop_ms);
                dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks_cl, js_soft_stop_ms_cl);
                dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_ss, js_hard_stop_ms_ss);
                dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_cl, js_hard_stop_ms_cl);
@@ -2016,7 +2237,11 @@ static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *att
        do_div(ms, 1000000UL);
        js_reset_ms_nss = (unsigned long)ms;
 
-       ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n", js_soft_stop_ms, js_soft_stop_ms_cl, js_hard_stop_ms_ss, js_hard_stop_ms_cl, js_hard_stop_ms_nss, js_reset_ms_ss, js_reset_ms_cl, js_reset_ms_nss);
+       ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
+                       js_soft_stop_ms, js_soft_stop_ms_cl,
+                       js_hard_stop_ms_ss, js_hard_stop_ms_cl,
+                       js_hard_stop_ms_nss, js_reset_ms_ss,
+                       js_reset_ms_cl, js_reset_ms_nss);
 
        if (ret >= PAGE_SIZE) {
                buf[PAGE_SIZE - 2] = '\n';
@@ -2039,7 +2264,7 @@ static ssize_t show_js_timeouts(struct device *dev, struct device_attribute *att
  * KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL
  * KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS.
  */
-DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
+static DEVICE_ATTR(js_timeouts, S_IRUGO | S_IWUSR, show_js_timeouts, set_js_timeouts);
 
 
 
@@ -2139,8 +2364,8 @@ static ssize_t show_force_replay(struct device *dev, struct device_attribute *at
 /** The sysfs file @c force_replay.
  *
  */
-DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay, set_force_replay);
-#endif /* MALI_CUSTOMER_RELEASE == 0 */
+static DEVICE_ATTR(force_replay, S_IRUGO | S_IWUSR, show_force_replay, set_force_replay);
+#endif /* !MALI_CUSTOMER_RELEASE */
 
 #ifdef CONFIG_MALI_DEBUG
 static ssize_t set_js_softstop_always(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
@@ -2189,26 +2414,26 @@ static ssize_t show_js_softstop_always(struct device *dev, struct device_attribu
  * enable soft-stop when only a single context is present can be used for debug and unit-testing purposes.
  * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
  */
-DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
+static DEVICE_ATTR(js_softstop_always, S_IRUGO | S_IWUSR, show_js_softstop_always, set_js_softstop_always);
 #endif /* CONFIG_MALI_DEBUG */
 
 #ifdef CONFIG_MALI_DEBUG
-typedef void (kbasep_debug_command_func) (kbase_device *);
+typedef void (kbasep_debug_command_func) (struct kbase_device *);
 
-typedef enum {
+enum kbasep_debug_command_code {
        KBASEP_DEBUG_COMMAND_DUMPTRACE,
 
        /* This must be the last enum */
        KBASEP_DEBUG_COMMAND_COUNT
-} kbasep_debug_command_code;
+};
 
-typedef struct kbasep_debug_command {
+struct kbasep_debug_command {
        char *str;
        kbasep_debug_command_func *func;
-} kbasep_debug_command;
+};
 
 /** Debug commands supported by the driver */
-static const kbasep_debug_command debug_commands[] = {
+static const struct kbasep_debug_command debug_commands[] = {
        {
         .str = "dumptrace",
         .func = &kbasep_trace_dump,
@@ -2226,7 +2451,7 @@ static const kbasep_debug_command debug_commands[] = {
  *
  * @return The number of bytes output to @c buf.
  */
-static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char *const buf)
+static ssize_t show_debug(struct device *dev, struct device_attribute *attr, char * const buf)
 {
        struct kbase_device *kbdev;
        int i;
@@ -2291,20 +2516,20 @@ static ssize_t issue_debug(struct device *dev, struct device_attribute *attr, co
  * Reading it will produce a list of debug commands, separated by newlines.
  * Writing to it with one of those commands will issue said command.
  */
-DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
+static DEVICE_ATTR(debug_command, S_IRUGO | S_IWUSR, show_debug, issue_debug);
 #endif /* CONFIG_MALI_DEBUG */
 
+
 #ifdef CONFIG_MALI_NO_MALI
-static int kbase_common_reg_map(kbase_device *kbdev)
+static int kbase_common_reg_map(struct kbase_device *kbdev)
 {
        return 0;
 }
-static void kbase_common_reg_unmap(kbase_device * const kbdev)
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
 {
-       return;
 }
 #else /* CONFIG_MALI_NO_MALI */
-static int kbase_common_reg_map(kbase_device *kbdev)
+static int kbase_common_reg_map(struct kbase_device *kbdev)
 {
        int err = -ENOMEM;
 
@@ -2331,7 +2556,7 @@ static int kbase_common_reg_map(kbase_device *kbdev)
        return err;
 }
 
-static void kbase_common_reg_unmap(kbase_device * const kbdev)
+static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
 {
        iounmap(kbdev->reg);
        release_resource(kbdev->reg_res);
@@ -2339,8 +2564,7 @@ static void kbase_common_reg_unmap(kbase_device * const kbdev)
 }
 #endif /* CONFIG_MALI_NO_MALI */
 
-
-static int kbase_common_device_init(kbase_device *kbdev)
+static int kbase_common_device_init(struct kbase_device *kbdev)
 {
        int err = -ENOMEM;
        mali_error mali_err;
@@ -2352,10 +2576,10 @@ static int kbase_common_device_init(kbase_device *kbdev)
                inited_irqs = (1u << 4),
                inited_debug = (1u << 5),
                inited_js_softstop = (1u << 6),
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
                inited_js_timeouts = (1u << 7),
                inited_force_replay = (1u << 13),
-#endif /* MALI_CUSTOMER_RELEASE == 0 */
+#endif /* !MALI_CUSTOMER_RELEASE */
                inited_pm_runtime_init = (1u << 8),
 #ifdef CONFIG_DEBUG_FS
                inited_gpu_memory = (1u << 9),
@@ -2378,6 +2602,8 @@ static int kbase_common_device_init(kbase_device *kbdev)
        kbdev->mdev.fops = &kbase_fops;
        kbdev->mdev.parent = get_device(kbdev->dev);
 
+       kbase_disjoint_init(kbdev);
+
        scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name, kbase_dev_nr++);
 
        if (misc_register(&kbdev->mdev)) {
@@ -2385,7 +2611,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
                err = -EINVAL;
                goto out_misc;
        }
-
+#if KBASE_PM_EN
        if (device_create_file(kbdev->dev, &dev_attr_power_policy)) {
                dev_err(kbdev->dev, "Couldn't create power_policy sysfs file\n");
                goto out_file;
@@ -2400,7 +2626,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
                dev_err(kbdev->dev, "Couldn't create core_mask sysfs file\n");
                goto out_file_core_mask;
        }
-
+#endif /* KBASE_PM_EN */
        down(&kbase_dev_list_lock);
        list_add(&kbdev->entry, &kbase_dev_list);
        up(&kbase_dev_list_lock);
@@ -2445,8 +2671,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
        inited |= inited_irqs;
 
 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
-       if (device_create_file(kbdev->dev, &dev_attr_sc_split))
-       {
+       if (device_create_file(kbdev->dev, &dev_attr_sc_split)) {
                dev_err(kbdev->dev, "Couldn't create sc_split sysfs file\n");
                goto out_partial;
        }
@@ -2477,7 +2702,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
        inited |= inited_js_softstop;
 #endif /* CONFIG_MALI_DEBUG */
 
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
        if (device_create_file(kbdev->dev, &dev_attr_js_timeouts)) {
                dev_err(kbdev->dev, "Couldn't create js_timeouts sysfs file\n");
                goto out_partial;
@@ -2489,7 +2714,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
                goto out_partial;
        }
        inited |= inited_force_replay;
-#endif /* MALI_CUSTOMER_RELEASE */
+#endif /* !MALI_CUSTOMER_RELEASE */
 
 #ifdef CONFIG_MALI_TRACE_TIMELINE
        if (kbasep_trace_timeline_debugfs_init(kbdev)) {
@@ -2499,11 +2724,15 @@ static int kbase_common_device_init(kbase_device *kbdev)
        inited |= inited_timeline;
 #endif /* CONFIG_MALI_TRACE_TIMELINE */
 
+#ifdef CONFIG_MALI_DEVFREQ
+       kbase_devfreq_init(kbdev);
+#endif
+
        mali_err = kbase_pm_powerup(kbdev);
        if (MALI_ERROR_NONE == mali_err) {
                inited |= inited_pm_powerup;
 #ifdef CONFIG_MALI_DEBUG
-#ifndef CONFIG_MALI_NO_MALI
+#if !defined(CONFIG_MALI_NO_MALI)
                if (MALI_ERROR_NONE != kbasep_common_test_interrupt_handlers(kbdev)) {
                        dev_err(kbdev->dev, "Interrupt assigment check failed.\n");
                        err = -EINVAL;
@@ -2511,7 +2740,6 @@ static int kbase_common_device_init(kbase_device *kbdev)
                }
 #endif /* CONFIG_MALI_NO_MALI */
 #endif /* CONFIG_MALI_DEBUG */
-
                /* intialise the kctx list */
                mutex_init(&kbdev->kctx_list_lock);
                INIT_LIST_HEAD(&kbdev->kctx_list);
@@ -2527,12 +2755,12 @@ static int kbase_common_device_init(kbase_device *kbdev)
        if (inited & inited_timeline)
                kbasep_trace_timeline_debugfs_term(kbdev);
 #endif /* CONFIG_MALI_TRACE_TIMELINE */
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
        if (inited & inited_force_replay)
                device_remove_file(kbdev->dev, &dev_attr_force_replay);
        if (inited & inited_js_timeouts)
                device_remove_file(kbdev->dev, &dev_attr_js_timeouts);
-#endif /* MALI_CUSTOMER_RELEASE */
+#endif /* !MALI_CUSTOMER_RELEASE */
 #ifdef CONFIG_MALI_DEBUG
        if (inited & inited_js_softstop)
                device_remove_file(kbdev->dev, &dev_attr_js_softstop_always);
@@ -2549,9 +2777,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
 
 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
        if (inited & inited_sc_split)
-       {
                device_remove_file(kbdev->dev, &dev_attr_sc_split);
-       }
 #endif /* CONFIG_MALI_DEBUG_SHADER_SPLIT_FS */
 
        if (inited & inited_js)
@@ -2589,24 +2815,26 @@ static int kbase_common_device_init(kbase_device *kbdev)
        down(&kbase_dev_list_lock);
        list_del(&kbdev->entry);
        up(&kbase_dev_list_lock);
-
+#if KBASE_PM_EN
        device_remove_file(kbdev->dev, &dev_attr_core_mask);
  out_file_core_mask:
        device_remove_file(kbdev->dev, &dev_attr_core_availability_policy);
  out_file_core_availability_policy:
        device_remove_file(kbdev->dev, &dev_attr_power_policy);
  out_file:
+#endif /*KBASE_PM_EN*/
        misc_deregister(&kbdev->mdev);
  out_misc:
        put_device(kbdev->dev);
        return err;
 }
 
+
 static int kbase_platform_device_probe(struct platform_device *pdev)
 {
        struct kbase_device *kbdev;
        struct resource *reg_res;
-       kbase_attribute *platform_data;
+       struct kbase_attribute *platform_data;
        int err;
        int i;
        struct mali_base_gpu_core_props *core_props;
@@ -2614,12 +2842,9 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
        mali_error mali_err;
 #endif /* CONFIG_MALI_NO_MALI */
 #ifdef CONFIG_OF
-       kbase_platform_config *config;
+       struct kbase_platform_config *config;
        int attribute_count;
 
-/*#ifdef CONFIG_MALI_PLATFORM_FAKE*/
-#if 1
-/*defined(CONFIG_MALI_PLATFORM_FAKE) || defined(CONFIG_MALI_PLATFORM_FAKE_MODULE)*/
        config = kbase_get_platform_config();
        attribute_count = kbasep_get_config_attribute_count(config->attributes);
 
@@ -2627,7 +2852,6 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
                        attribute_count * sizeof(config->attributes[0]));
        if (err)
                return err;
-#endif /* CONFIG_MALI_PLATFORM_FAKE */
 #endif /* CONFIG_OF */
 
        kbdev = kbase_device_alloc();
@@ -2646,7 +2870,7 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
 #endif /* CONFIG_MALI_NO_MALI */
 
        kbdev->dev = &pdev->dev;
-       platform_data = (kbase_attribute *) kbdev->dev->platform_data;
+       platform_data = (struct kbase_attribute *)kbdev->dev->platform_data;
 
        if (NULL == platform_data) {
                dev_err(kbdev->dev, "Platform data not specified\n");
@@ -2674,13 +2898,13 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
                }
 
 #ifdef CONFIG_OF
-               if (!strcmp(irq_res->name, "JOB"))
+               if (!strcmp(irq_res->name, "JOB")) {
                        irqtag = JOB_IRQ_TAG;
-               else if (!strcmp(irq_res->name, "MMU"))
+               } else if (!strcmp(irq_res->name, "MMU")) {
                        irqtag = MMU_IRQ_TAG;
-               else if (!strcmp(irq_res->name, "GPU"))
+               } else if (!strcmp(irq_res->name, "GPU")) {
                        irqtag = GPU_IRQ_TAG;
-               else {
+               else {
                        dev_err(&pdev->dev, "Invalid irq res name: '%s'\n",
                                irq_res->name);
                        err = -EINVAL;
@@ -2708,25 +2932,51 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
        if (err)
                goto out_free_dev;
 
+       kbdev->clock = clk_get(kbdev->dev, "clk_mali");
+       if (IS_ERR_OR_NULL(kbdev->clock)) {
+               dev_info(kbdev->dev, "Continuing without Mali clock control\n");
+               kbdev->clock = NULL;
+               /* Allow probe to continue without clock. */
+       } else {
+               err = clk_prepare_enable(kbdev->clock);
+               if (err) {
+                       dev_err(kbdev->dev,
+                               "Failed to prepare and enable clock (%d)\n", err);
+                       goto out_clock_get;
+               }
+       }
+
 #ifdef CONFIG_DEBUG_FS
        kbdev->mali_debugfs_directory = debugfs_create_dir("mali", NULL);
        if (NULL == kbdev->mali_debugfs_directory) {
                dev_err(kbdev->dev, "Couldn't create mali debugfs directory\n");
-               goto out_reg_unmap;
+               goto out_clock_enable;
+       }
+       kbdev->memory_profile_directory = debugfs_create_dir("mem",
+                       kbdev->mali_debugfs_directory);
+       if (NULL == kbdev->memory_profile_directory) {
+               dev_err(kbdev->dev, "Couldn't create mali mem debugfs directory\n");
+               goto out_mali_debugfs_remove;
+       }
+       if (kbasep_jd_debugfs_init(kbdev)) {
+               dev_err(kbdev->dev, "Couldn't create mali jd debugfs entries\n");
+               goto out_mem_profile_remove;
        }
 #endif /* CONFIG_DEBUG_FS */
 
+
        if (MALI_ERROR_NONE != kbase_device_init(kbdev)) {
                dev_err(kbdev->dev, "Can't initialize device\n");
+
                err = -ENOMEM;
                goto out_debugfs_remove;
        }
 
        /* obtain min/max configured gpu frequencies */
        core_props = &(kbdev->gpu_props.props.core_props);
-       core_props->gpu_freq_khz_min = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN);
-       core_props->gpu_freq_khz_max = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX);
-       kbdev->gpu_props.irq_throttle_time_us = kbasep_get_config_value(kbdev, platform_data, KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US);
+       core_props->gpu_freq_khz_min = GPU_FREQ_KHZ_MIN;
+       core_props->gpu_freq_khz_max = GPU_FREQ_KHZ_MAX;
+       kbdev->gpu_props.irq_throttle_time_us = DEFAULT_IRQ_THROTTLE_TIME_US;
 
        err = kbase_common_device_init(kbdev);
        if (err) {
@@ -2739,9 +2989,16 @@ out_term_dev:
        kbase_device_term(kbdev);
 out_debugfs_remove:
 #ifdef CONFIG_DEBUG_FS
+       kbasep_jd_debugfs_term(kbdev);
+out_mem_profile_remove:
+       debugfs_remove(kbdev->memory_profile_directory);
+out_mali_debugfs_remove:
        debugfs_remove(kbdev->mali_debugfs_directory);
-out_reg_unmap:
+out_clock_enable:
 #endif /* CONFIG_DEBUG_FS */
+       clk_disable_unprepare(kbdev->clock);
+out_clock_get:
+       clk_put(kbdev->clock);
        kbase_common_reg_unmap(kbdev);
 out_free_dev:
 #ifdef CONFIG_MALI_NO_MALI
@@ -2755,14 +3012,18 @@ out:
 
 static int kbase_common_device_remove(struct kbase_device *kbdev)
 {
+#ifdef CONFIG_MALI_DEVFREQ
+       kbase_devfreq_term(kbdev);
+#endif
+
        if (kbdev->pm.callback_power_runtime_term)
                kbdev->pm.callback_power_runtime_term(kbdev);
-
+#if KBASE_PM_EN
        /* Remove the sys power policy file */
        device_remove_file(kbdev->dev, &dev_attr_power_policy);
        device_remove_file(kbdev->dev, &dev_attr_core_availability_policy);
        device_remove_file(kbdev->dev, &dev_attr_core_mask);
-
+#endif
 #ifdef CONFIG_MALI_TRACE_TIMELINE
        kbasep_trace_timeline_debugfs_term(kbdev);
 #endif /* CONFIG_MALI_TRACE_TIMELINE */
@@ -2771,10 +3032,10 @@ static int kbase_common_device_remove(struct kbase_device *kbdev)
        device_remove_file(kbdev->dev, &dev_attr_js_softstop_always);
        device_remove_file(kbdev->dev, &dev_attr_debug_command);
 #endif /* CONFIG_MALI_DEBUG */
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
        device_remove_file(kbdev->dev, &dev_attr_js_timeouts);
        device_remove_file(kbdev->dev, &dev_attr_force_replay);
-#endif /* MALI_CUSTOMER_RELEASE */
+#endif /* !MALI_CUSTOMER_RELEASE */
 #ifdef CONFIG_DEBUG_FS
        kbasep_gpu_memory_debugfs_term(kbdev);
 #endif
@@ -2804,8 +3065,15 @@ static int kbase_common_device_remove(struct kbase_device *kbdev)
        kbase_common_reg_unmap(kbdev);
        kbase_device_term(kbdev);
 #ifdef CONFIG_DEBUG_FS
+       kbasep_jd_debugfs_term(kbdev);
+       debugfs_remove(kbdev->memory_profile_directory);
        debugfs_remove(kbdev->mali_debugfs_directory);
 #endif /* CONFIG_DEBUG_FS */
+       if (kbdev->clock) {
+               clk_disable_unprepare(kbdev->clock);
+               clk_put(kbdev->clock);
+               kbdev->clock = NULL;
+       }
 #ifdef CONFIG_MALI_NO_MALI
        midg_device_destroy(kbdev);
 #endif /* CONFIG_MALI_NO_MALI */
@@ -2839,6 +3107,11 @@ static int kbase_device_suspend(struct device *dev)
        if (!kbdev)
                return -ENODEV;
 
+#if defined(CONFIG_PM_DEVFREQ) && \
+               (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       devfreq_suspend_device(kbdev->devfreq);
+#endif
+
        kbase_pm_suspend(kbdev);
        return 0;
 }
@@ -2859,6 +3132,11 @@ static int kbase_device_resume(struct device *dev)
                return -ENODEV;
 
        kbase_pm_resume(kbdev);
+
+#if defined(CONFIG_PM_DEVFREQ) && \
+               (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       devfreq_resume_device(kbdev->devfreq);
+#endif
        return 0;
 }
 
@@ -2879,6 +3157,11 @@ static int kbase_device_runtime_suspend(struct device *dev)
        if (!kbdev)
                return -ENODEV;
 
+#if defined(CONFIG_PM_DEVFREQ) && \
+               (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       devfreq_suspend_device(kbdev->devfreq);
+#endif
+
        if (kbdev->pm.callback_power_runtime_off) {
                kbdev->pm.callback_power_runtime_off(kbdev);
                dev_dbg(dev, "runtime suspend\n");
@@ -2909,6 +3192,12 @@ int kbase_device_runtime_resume(struct device *dev)
                ret = kbdev->pm.callback_power_runtime_on(kbdev);
                dev_dbg(dev, "runtime resume\n");
        }
+
+#if defined(CONFIG_PM_DEVFREQ) && \
+               (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+       devfreq_resume_device(kbdev->devfreq);
+#endif
+
        return ret;
 }
 #endif /* CONFIG_PM_RUNTIME */
@@ -3022,9 +3311,12 @@ module_exit(kbase_driver_exit);
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MALI_RELEASE_NAME);
 
+#if defined(CONFIG_MALI_GATOR_SUPPORT) || defined(CONFIG_MALI_SYSTEM_TRACE)
+#define CREATE_TRACE_POINTS
+#endif
+
 #ifdef CONFIG_MALI_GATOR_SUPPORT
 /* Create the trace points (otherwise we just get code to call a tracepoint) */
-#define CREATE_TRACE_POINTS
 #include "mali_linux_trace.h"
 
 void kbase_trace_mali_pm_status(u32 event, u64 value)
@@ -3042,7 +3334,7 @@ void kbase_trace_mali_pm_power_on(u32 event, u64 value)
        trace_mali_pm_power_on(event, value);
 }
 
-void kbase_trace_mali_job_slots_event(u32 event, const kbase_context *kctx, u8 atom_id)
+void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id)
 {
        trace_mali_job_slots_event(event, (kctx != NULL ? kctx->tgid : 0), (kctx != NULL ? kctx->pid : 0), atom_id);
 }
@@ -3067,3 +3359,6 @@ void kbase_trace_mali_total_alloc_pages_change(long long int event)
        trace_mali_total_alloc_pages_change(event);
 }
 #endif /* CONFIG_MALI_GATOR_SUPPORT */
+#ifdef CONFIG_MALI_SYSTEM_TRACE
+#include "mali_linux_kbase_trace.h"
+#endif
index b37d22ae72b465829a46cd893966b95cfce44ad5..79e73b0d4aa57fa16940ff9cec0444c70c06ca44 100755 (executable)
  * @brief Macros used to extract cpu id info
  * @see Doc's for Main ID register
  */
-#define KBASE_CPUPROPS_ID_GET_REV(cpuid)    (  (cpuid) & 0x0F         )  /* [3:0]   Revision                            */
-#define KBASE_CPUPROPS_ID_GET_PART_NR(cpuid)( ((cpuid) >>  4) & 0xFFF )  /* [15:4]  Part number                         */
-#define KBASE_CPUPROPS_ID_GET_ARCH(cpuid)   ( ((cpuid) >> 16) & 0x0F  )  /* [19:16] Architecture                        */
-#define KBASE_CPUPROPS_ID_GET_VARIANT(cpuid)( ((cpuid) >> 20) & 0x0F  )  /* [23:20] Variant                             */
-#define KBASE_CPUPROPS_ID_GET_CODE(cpuid)   ( ((cpuid) >> 24) & 0xFF  )  /* [31:23] ASCII code of implementer trademark */
+#define KBASE_CPUPROPS_ID_GET_REV(cpuid)    ((cpuid) & 0x0F)          /* [3:0]   Revision                            */
+#define KBASE_CPUPROPS_ID_GET_PART_NR(cpuid)(((cpuid) >>  4) & 0xFFF) /* [15:4]  Part number                         */
+#define KBASE_CPUPROPS_ID_GET_ARCH(cpuid)   (((cpuid) >> 16) & 0x0F)  /* [19:16] Architecture                        */
+#define KBASE_CPUPROPS_ID_GET_VARIANT(cpuid)(((cpuid) >> 20) & 0x0F)  /* [23:20] Variant                             */
+#define KBASE_CPUPROPS_ID_GET_CODE(cpuid)   (((cpuid) >> 24) & 0xFF)  /* [31:23] ASCII code of implementer trademark */
 
 /*Below value sourced from OSK*/
 #define L1_DCACHE_SIZE ((u32)0x00008000)
@@ -55,7 +55,7 @@
  *
  */
 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 
-static void kbasep_cpuprops_uk_get_cpu_id_info(kbase_uk_cpuprops * const kbase_props)
+static void kbasep_cpuprops_uk_get_cpu_id_info(struct kbase_uk_cpuprops * const kbase_props)
 {
        kbase_props->props.cpu_id.id           = read_cpuid_id();
 
@@ -67,7 +67,7 @@ static void kbasep_cpuprops_uk_get_cpu_id_info(kbase_uk_cpuprops * const kbase_p
        kbase_props->props.cpu_id.implementer  = KBASE_CPUPROPS_ID_GET_CODE(kbase_props->props.cpu_id.id);
 }
 #else
-static void kbasep_cpuprops_uk_get_cpu_id_info(kbase_uk_cpuprops * const kbase_props)
+static void kbasep_cpuprops_uk_get_cpu_id_info(struct kbase_uk_cpuprops * const kbase_props)
 {
        kbase_props->props.cpu_id.id           = 0;
        kbase_props->props.cpu_id.valid        = 0;
@@ -87,7 +87,7 @@ int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed)
        return 0;
 }
 
-mali_error kbase_cpuprops_uk_get_props(kbase_context *kctx, kbase_uk_cpuprops * const kbase_props)
+mali_error kbase_cpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_cpuprops * const kbase_props)
 {
        unsigned int max_cpu_freq;
 
@@ -102,11 +102,11 @@ mali_error kbase_cpuprops_uk_get_props(kbase_context *kctx, kbase_uk_cpuprops *
        kbasep_cpuprops_uk_get_cpu_id_info(kbase_props);
 
        /* check if kernel supports dynamic frequency scaling */
-       max_cpu_freq = cpufreq_quick_get_max( KBASE_DEFAULT_CPU_NUM );
-       if ( max_cpu_freq != 0 )
+       max_cpu_freq = cpufreq_quick_get_max(KBASE_DEFAULT_CPU_NUM);
+       if (max_cpu_freq != 0)
        {
                /* convert from kHz to mHz */
-               kbase_props->props.max_cpu_clock_speed_mhz = max_cpu_freq / 1000 ;
+               kbase_props->props.max_cpu_clock_speed_mhz = max_cpu_freq / 1000;
        }
        else 
        {
index 0f669b706dd73dc85c2f0887b74d419dff4e1100..4f88550921a122d5b2d3e1ad8962d843d225bd57 100755 (executable)
@@ -44,13 +44,13 @@ int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed);
 /**
  * @brief Provides CPU properties data.
  *
- * Fill the kbase_uk_cpuprops with values from CPU configuration.
+ * Fill the struct kbase_uk_cpuprops with values from CPU configuration.
  *
  * @param kctx         The kbase context
- * @param kbase_props  A copy of the kbase_uk_cpuprops structure from userspace
+ * @param kbase_props  A copy of the struct kbase_uk_cpuprops structure from userspace
  *
  * @return MALI_ERROR_NONE on success. Any other value indicates failure.
  */
-mali_error kbase_cpuprops_uk_get_props(kbase_context *kctx, struct kbase_uk_cpuprops * const kbase_props);
+mali_error kbase_cpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_cpuprops * const kbase_props);
 
 #endif /*_KBASE_CPUPROPS_H_*/
index 247ca40bb6cb6b28180478dfd601720e6b650d1c..ce571c9970be08f235122a714eae82379785fa63 100755 (executable)
@@ -19,7 +19,7 @@
 
 #include <mali_kbase.h>
 
-kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = {
+static struct kbasep_debug_assert_cb kbasep_debug_assert_registered_cb = {
        NULL,
        NULL
 };
index 8e388ec2c162c0102fccbd4f33744d282fa74d32..e4ced8c76145507fdae6ec633a136cd9e02e79c9 100755 (executable)
@@ -50,10 +50,10 @@ typedef struct kbasep_debug_assert_cb {
  * @brief Private macro containing the format of the trace to display before every message
  * @sa KBASE_DEBUG_SKIP_TRACE, KBASE_DEBUG_SKIP_FUNCTION_NAME
  */
-#if KBASE_DEBUG_SKIP_TRACE == 0
+#if !KBASE_DEBUG_SKIP_TRACE
 #define KBASEP_DEBUG_PRINT_TRACE \
                "In file: " __FILE__ " line: " CSTD_STR2(__LINE__)
-#if KBASE_DEBUG_SKIP_FUNCTION_NAME == 0
+#if !KBASE_DEBUG_SKIP_FUNCTION_NAME
 #define KBASEP_DEBUG_PRINT_FUNCTION CSTD_FUNC
 #else
 #define KBASEP_DEBUG_PRINT_FUNCTION ""
@@ -113,7 +113,7 @@ typedef struct kbasep_debug_assert_cb {
         */
 #define KBASE_DEBUG_ASSERT_MSG(expr, ...) \
                do { \
-                       if (MALI_FALSE == (expr)) { \
+                       if (!(expr)) { \
                                KBASEP_DEBUG_ASSERT_OUT(KBASEP_DEBUG_PRINT_TRACE, KBASEP_DEBUG_PRINT_FUNCTION, __VA_ARGS__);\
                                KBASE_CALL_ASSERT_HOOK();\
                                BUG();\
index e01d01aa9d9e5fa745eef62ca1bb886d4fd0a460..cb17474b828560663ec6ccdfdf19aa4647501514 100755 (executable)
@@ -31,6 +31,7 @@
 #include <mali_base_hwconfig.h>
 #include <mali_kbase_mem_lowlevel.h>
 #include <mali_kbase_mem_alloc.h>
+#include <mali_kbase_mmu_hw.h>
 
 
 #include <linux/atomic.h>
 #include "sync.h"
 #endif                         /* CONFIG_SYNC */
 
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif                         /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_PM_DEVFREQ
+#include <linux/devfreq.h>
+#endif /* CONFIG_DEVFREQ */
+
 /** Enable SW tracing when set */
 #ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE
 #define KBASE_TRACE_ENABLE 1
@@ -84,7 +93,6 @@
  * @note if not in use, define this value to 0 instead of \#undef'ing it
  */
 #define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
-
 /**
  * Prevent hard-stops from occuring in scheduling situations
  *
  */
 #define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
 
-/* Forward declarations+defintions */
-typedef struct kbase_context kbase_context;
-typedef struct kbase_jd_atom kbasep_jd_atom;
-typedef struct kbase_device kbase_device;
-
 /**
  * The maximum number of Job Slots to support in the Hardware.
  *
@@ -159,63 +162,34 @@ typedef struct kbase_device kbase_device;
 /* Maximum force replay limit when randomization is enabled */
 #define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16
 
-/**
- * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
- * handles retaining cores for power management and affinity management.
- *
- * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
- * where lots of atoms could be submitted before powerup, and each has an
- * affinity chosen that causes other atoms to have an affinity
- * violation. Whilst the affinity was not causing violations at the time it
- * was chosen, it could cause violations thereafter. For example, 1000 jobs
- * could have had their affinity chosen during the powerup time, so any of
- * those 1000 jobs could cause an affinity violation later on.
- *
- * The attack would otherwise occur because other atoms/contexts have to wait for:
- * -# the currently running atoms (which are causing the violation) to
- * finish
- * -# and, the atoms that had their affinity chosen during powerup to
- * finish. These are run preferrentially because they don't cause a
- * violation, but instead continue to cause the violation in others.
- * -# or, the attacker is scheduled out (which might not happen for just 2
- * contexts)
- *
- * By re-choosing the affinity (which is designed to avoid violations at the
- * time it's chosen), we break condition (2) of the wait, which minimizes the
- * problem to just waiting for current jobs to finish (which can be bounded if
- * the Job Scheduling Policy has a timer).
- */
-typedef enum {
-       /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
-       KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
-       /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
-       KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
-       /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
-       KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
-       /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
-       KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
-       /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
-       KBASE_ATOM_COREREF_STATE_READY
-} kbase_atom_coreref_state;
-
-typedef enum {
-       /** Atom is not used */
-       KBASE_JD_ATOM_STATE_UNUSED,
-       /** Atom is queued in JD */
-       KBASE_JD_ATOM_STATE_QUEUED,
-       /** Atom has been given to JS (is runnable/running) */
-       KBASE_JD_ATOM_STATE_IN_JS,
-       /** Atom has been completed, but not yet handed back to userspace */
-       KBASE_JD_ATOM_STATE_COMPLETED
-} kbase_jd_atom_state;
-
 /** Atom has been previously soft-stoppped */
 #define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
 /** Atom has been previously retried to execute */
 #define KBASE_KATOM_FLAGS_RERUN (1<<2)
 #define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
+/** Atom has been previously hard-stopped. */
+#define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4)
+/** Atom has caused us to enter disjoint state */
+#define KBASE_KATOM_FLAG_IN_DISJOINT (1<<5)
+
+/* SW related flags about types of JS_COMMAND action
+ * NOTE: These must be masked off by JS_COMMAND_MASK */
+
+/** This command causes a disjoint event */
+#define JS_COMMAND_SW_CAUSES_DISJOINT 0x100
+
+/** Bitmask of all SW related flags */
+#define JS_COMMAND_SW_BITS  (JS_COMMAND_SW_CAUSES_DISJOINT)
+
+#if (JS_COMMAND_SW_BITS & JS_COMMAND_MASK)
+#error JS_COMMAND_SW_BITS not masked off by JS_COMMAND_MASK. Must update JS_COMMAND_SW_<..> bitmasks
+#endif
+
+/** Soft-stop command that causes a Disjoint event. This of course isn't
+ *  entirely masked off by JS_COMMAND_MASK */
+#define JS_COMMAND_SOFT_STOP_WITH_SW_DISJOINT \
+               (JS_COMMAND_SW_CAUSES_DISJOINT | JS_COMMAND_SOFT_STOP)
 
-typedef struct kbase_jd_atom kbase_jd_atom;
 
 struct kbase_jd_atom_dependency
 {
@@ -302,9 +276,10 @@ struct kbase_ext_res
 struct kbase_jd_atom {
        struct work_struct work;
        ktime_t start_timestamp;
+       u64 time_spent_us; /**< Total time spent on the GPU in microseconds */
 
-       base_jd_udata udata;
-       kbase_context *kctx;
+       struct base_jd_udata udata;
+       struct kbase_context *kctx;
 
        struct list_head dep_head[2];
        struct list_head dep_item[2];
@@ -316,7 +291,7 @@ struct kbase_jd_atom {
        u32 device_nr;
        u64 affinity;
        u64 jc;
-       kbase_atom_coreref_state coreref_state;
+       enum kbase_atom_coreref_state coreref_state;
 #ifdef CONFIG_KDS
        struct list_head node;
        struct kds_resource_set *kds_rset;
@@ -328,21 +303,21 @@ struct kbase_jd_atom {
 #endif                         /* CONFIG_SYNC */
 
        /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
-       base_jd_event_code event_code;
+       enum base_jd_event_code event_code;
        base_jd_core_req core_req;          /**< core requirements */
        /** Job Slot to retry submitting to if submission from IRQ handler failed
         *
         * NOTE: see if this can be unified into the another member e.g. the event */
        int retry_submit_on_slot;
 
-       kbasep_js_policy_job_info sched_info;
+       union kbasep_js_policy_job_info sched_info;
        /* atom priority scaled to nice range with +20 offset 0..39 */
        int nice_prio;
 
        int poking;             /* BASE_HW_ISSUE_8316 */
 
        wait_queue_head_t completed;
-       kbase_jd_atom_state status;
+       enum kbase_jd_atom_state status;
 #ifdef CONFIG_GPU_TRACEPOINTS
        int work_id;
 #endif
@@ -366,10 +341,10 @@ struct kbase_jd_atom {
 
 #define KBASE_JD_DEP_QUEUE_SIZE 256
 
-typedef struct kbase_jd_context {
+struct kbase_jd_context {
        struct mutex lock;
-       kbasep_js_kctx_info sched_info;
-       kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
+       struct kbasep_js_kctx_info sched_info;
+       struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
 
        /** Tracks all job-dispatch jobs.  This includes those not tracked by
         * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
@@ -411,34 +386,26 @@ typedef struct kbase_jd_context {
 #ifdef CONFIG_GPU_TRACEPOINTS
        atomic_t work_id;
 #endif
-} kbase_jd_context;
+};
 
-typedef struct kbase_jm_slot {
+struct kbase_jm_slot {
        /* The number of slots must be a power of two */
 #define BASE_JM_SUBMIT_SLOTS        16
 #define BASE_JM_SUBMIT_SLOTS_MASK   (BASE_JM_SUBMIT_SLOTS - 1)
 
        struct kbase_jd_atom *submitted[BASE_JM_SUBMIT_SLOTS];
 
-       kbase_context *last_context;
+       struct kbase_context *last_context;
 
        u8 submitted_head;
        u8 submitted_nr;
        u8 job_chain_flag;
 
-} kbase_jm_slot;
-
-typedef enum kbase_midgard_type {
-       KBASE_MALI_T601,
-       KBASE_MALI_T604,
-       KBASE_MALI_T608,
-       KBASE_MALI_COUNT
-} kbase_midgard_type;
+};
 
-typedef struct kbase_device_info {
-       kbase_midgard_type dev_type;
+struct kbase_device_info {
        u32 features;
-} kbase_device_info;
+};
 
 /** Poking state for BASE_HW_ISSUE_8316  */
 enum {
@@ -449,23 +416,31 @@ enum {
 /** Poking state for BASE_HW_ISSUE_8316  */
 typedef u32 kbase_as_poke_state;
 
+struct kbase_mmu_setup {
+       u64     transtab;
+       u64     memattr;
+};
+
 /**
- * Important: Our code makes assumptions that a kbase_as structure is always at
+ * Important: Our code makes assumptions that a struct kbase_as structure is always at
  * kbase_device->as[number]. This is used to recover the containing
- * kbase_device from a kbase_as structure.
+ * struct kbase_device from a struct kbase_as structure.
  *
- * Therefore, kbase_as structures must not be allocated anywhere else.
+ * Therefore, struct kbase_as structures must not be allocated anywhere else.
  */
-typedef struct kbase_as {
+struct kbase_as {
        int number;
 
        struct workqueue_struct *pf_wq;
        struct work_struct work_pagefault;
        struct work_struct work_busfault;
-       mali_addr64 fault_addr;
+       enum kbase_mmu_fault_type fault_type;
        u32 fault_status;
+       mali_addr64 fault_addr;
        struct mutex transaction_mutex;
 
+       struct kbase_mmu_setup current_setup;
+
        /* BASE_HW_ISSUE_8316  */
        struct workqueue_struct *poke_wq;
        struct work_struct poke_work;
@@ -474,12 +449,22 @@ typedef struct kbase_as {
        /** Protected by kbasep_js_device_data::runpool_irq::lock */
        kbase_as_poke_state poke_state;
        struct hrtimer poke_timer;
-} kbase_as;
+};
+
+static inline int kbase_as_has_bus_fault(struct kbase_as *as)
+{
+       return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS;
+}
+
+static inline int kbase_as_has_page_fault(struct kbase_as *as)
+{
+       return as->fault_type == KBASE_MMU_FAULT_TYPE_PAGE;
+}
 
 /**
  * Instrumentation State Machine States
  */
-typedef enum {
+enum kbase_instr_state {
        /** State where instrumentation is not active */
        KBASE_INSTR_STATE_DISABLED = 0,
        /** State machine is active and ready for a command. */
@@ -499,19 +484,22 @@ typedef enum {
        KBASE_INSTR_STATE_RESETTING,
        /** An error has occured during DUMPING (page fault). */
        KBASE_INSTR_STATE_FAULT
-} kbase_instr_state;
+};
+
+void kbasep_reset_timeout_worker(struct work_struct *data);
+enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *data);
 
-typedef struct kbasep_mem_device {
+struct kbasep_mem_device {
        atomic_t used_pages;   /* Tracks usage of OS shared memory. Updated
                                   when OS memory is allocated/freed. */
 
-} kbasep_mem_device;
+};
 
 
 
 #define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
 
-typedef enum {
+enum kbase_trace_code {
        /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
         * THIS MUST BE USED AT THE START OF THE ENUM */
 #define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
@@ -521,12 +509,12 @@ typedef enum {
        ,
        /* Must be the last in the enum */
        KBASE_TRACE_CODE_COUNT
-} kbase_trace_code;
+};
 
 #define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
 #define KBASE_TRACE_FLAG_JOBSLOT  (((u8)1) << 1)
 
-typedef struct kbase_trace {
+struct kbase_trace {
        struct timespec timestamp;
        u32 thread_id;
        u32 cpu;
@@ -540,7 +528,7 @@ typedef struct kbase_trace {
        u8 jobslot;
        u8 refcount;
        u8 flags;
-} kbase_trace;
+};
 
 /** Event IDs for the power management framework.
  *
@@ -548,7 +536,7 @@ typedef struct kbase_trace {
  * find the precise state of the GPU at a particular time in the
  * trace. Overall, we should get a high percentage of these events for
  * statisical purposes, and so a few missing should not be a problem */
-typedef enum kbase_timeline_pm_event {
+enum kbase_timeline_pm_event {
        /* helper for tests */
        KBASEP_TIMELINE_PM_EVENT_FIRST,
 
@@ -594,15 +582,15 @@ typedef enum kbase_timeline_pm_event {
        KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
 
        KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
-} kbase_timeline_pm_event;
+};
 
 #ifdef CONFIG_MALI_TRACE_TIMELINE
-typedef struct kbase_trace_kctx_timeline {
+struct kbase_trace_kctx_timeline {
        atomic_t jd_atoms_in_flight;
        u32 owner_tgid;
-} kbase_trace_kctx_timeline;
+};
 
-typedef struct kbase_trace_kbdev_timeline {
+struct kbase_trace_kbdev_timeline {
        /** DebugFS entry */
        struct dentry *dentry;
 
@@ -624,20 +612,20 @@ typedef struct kbase_trace_kbdev_timeline {
         * L2 transition state - MALI_TRUE indicates that the transition is ongoing
         * Expected to be protected by pm.power_change_lock */
        mali_bool l2_transitioning;
-} kbase_trace_kbdev_timeline;
+};
 #endif /* CONFIG_MALI_TRACE_TIMELINE */
 
 
-typedef struct kbasep_kctx_list_element {
+struct kbasep_kctx_list_element {
        struct list_head link;
-       kbase_context    *kctx;
-} kbasep_kctx_list_element;
+       struct kbase_context    *kctx;
+};
 
 #define DEVNAME_SIZE   16
 
 struct kbase_device {
        /** jm_slots is protected by kbasep_js_device_data::runpool_irq::lock */
-       kbase_jm_slot jm_slots[BASE_JM_MAX_NR_SLOTS];
+       struct kbase_jm_slot jm_slots[BASE_JM_MAX_NR_SLOTS];
        s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS];
 
        struct list_head entry;
@@ -652,6 +640,9 @@ struct kbase_device {
                int irq;
                int flags;
        } irqs[3];
+#ifdef CONFIG_HAVE_CLK
+       struct clk *clock;
+#endif
        char devname[DEVNAME_SIZE];
 
 #ifdef CONFIG_MALI_NO_MALI
@@ -664,11 +655,10 @@ struct kbase_device {
        spinlock_t reg_op_lock;
 #endif                         /* CONFIG_MALI_NO_MALI */
 
-       kbase_pm_device_data pm;
-       kbasep_js_device_data js_data;
-       kbasep_mem_device memdev;
-
-       kbase_as as[BASE_MAX_NR_AS];
+       struct kbase_pm_device_data pm;
+       struct kbasep_js_device_data js_data;
+       struct kbasep_mem_device memdev;
+       struct kbase_as as[BASE_MAX_NR_AS];
 
        spinlock_t              mmu_mask_change;
 
@@ -708,6 +698,16 @@ struct kbase_device {
 
        u32 tiler_needed_cnt;
 
+       /* struct for keeping track of the disjoint information
+        *
+        * The state  is > 0 if the GPU is in a disjoint state. Otherwise 0
+        * The count is the number of disjoint events that have occurred on the GPU
+        */
+       struct {
+               atomic_t count;
+               atomic_t state;
+       } disjoint_event;
+
        /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */
        u32 l2_users_count;
 
@@ -732,17 +732,17 @@ struct kbase_device {
                /* The lock should be used when accessing any of the following members */
                spinlock_t lock;
 
-               kbase_context *kctx;
+               struct kbase_context *kctx;
                u64 addr;
                wait_queue_head_t wait;
                int triggered;
-               kbase_instr_state state;
+               enum kbase_instr_state state;
                wait_queue_head_t   cache_clean_wait;
                struct workqueue_struct *cache_clean_wq;
                struct work_struct  cache_clean_work;
 
-               kbase_context *suspended_kctx;
-               kbase_uk_hwcnt_setup suspended_state;
+               struct kbase_context *suspended_kctx;
+               struct kbase_uk_hwcnt_setup suspended_state;
        } hwcnt;
 
        /* Set when we're about to reset the GPU */
@@ -763,16 +763,16 @@ struct kbase_device {
        /*value to be written to the irq_throttle register each time an irq is served */
        atomic_t irq_throttle_cycles;
 
-       const kbase_attribute *config_attributes;
+       const struct kbase_attribute *config_attributes;
 
-#if KBASE_TRACE_ENABLE != 0
+#if KBASE_TRACE_ENABLE
        spinlock_t              trace_lock;
        u16                     trace_first_out;
        u16                     trace_next_in;
-       kbase_trace            *trace_rbuf;
+       struct kbase_trace            *trace_rbuf;
 #endif
 
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
        /* This is used to override the current job scheduler values for
         * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_SS
         * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_CL
@@ -811,8 +811,20 @@ struct kbase_device {
        struct delayed_work runtime_pm_workqueue;
 #endif
 
+#ifdef CONFIG_PM_DEVFREQ
+       struct devfreq_dev_profile devfreq_profile;
+       struct devfreq *devfreq;
+       bool reset_utilization;
+#ifdef CONFIG_DEVFREQ_THERMAL
+       struct devfreq_cooling_device *devfreq_cooling;
+#ifdef CONFIG_MALI_POWER_ACTOR
+       struct power_actor *power_actor;
+#endif
+#endif
+#endif
+
 #ifdef CONFIG_MALI_TRACE_TIMELINE
-       kbase_trace_kbdev_timeline timeline;
+       struct kbase_trace_kbdev_timeline timeline;
 #endif
 
 #ifdef CONFIG_DEBUG_FS
@@ -822,6 +834,10 @@ struct kbase_device {
        struct dentry *gpu_memory_dentry;
        /* debugfs entry for trace */
        struct dentry *trace_dentry;
+       /* directory for per-ctx memory profiling data */
+       struct dentry *memory_profile_directory;
+       /* Root directory for job dispatcher data */
+       struct dentry *jd_directory;
 #endif /* CONFIG_DEBUG_FS */
 
        /* fbdump profiling controls set by gator */
@@ -844,10 +860,14 @@ struct kbase_device {
         */
        mali_bool force_replay_random;
 #endif
+
+       /* Total number of created contexts */
+       atomic_t ctx_num;
 };
 
 struct kbase_context {
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
+       int id; /* System wide unique id */
        phys_addr_t pgd;
        struct list_head event_list;
        struct mutex event_mutex;
@@ -875,12 +895,12 @@ struct kbase_context {
        pid_t tgid;
        pid_t pid;
 
-       kbase_jd_context jctx;
+       struct kbase_jd_context jctx;
        atomic_t used_pages;
        atomic_t         nonmapped_pages;
 
-       kbase_mem_allocator osalloc;
-       kbase_mem_allocator * pgd_allocator;
+       struct kbase_mem_allocator osalloc;
+       struct kbase_mem_allocator * pgd_allocator;
 
        struct list_head waiting_soft_jobs;
 #ifdef CONFIG_KDS
@@ -909,20 +929,32 @@ struct kbase_context {
        struct mm_struct * process_mm;
 
 #ifdef CONFIG_MALI_TRACE_TIMELINE
-       kbase_trace_kctx_timeline timeline;
+       struct kbase_trace_kctx_timeline timeline;
 #endif
+#ifdef CONFIG_DEBUG_FS
+       /* debugfs entry for memory profile */
+       struct dentry *mem_dentry;
+       /* Content of mem_profile file */
+       char *mem_profile_data;
+       /* Size of @c mem_profile_data */
+       size_t mem_profile_size;
+       /* Spinlock guarding data */
+       spinlock_t mem_profile_lock;
+       /* Per-context directory for JD data */
+       struct dentry *jd_ctx_dir;
+#endif /* CONFIG_DEBUG_FS */
 };
 
-typedef enum kbase_reg_access_type {
+enum kbase_reg_access_type {
        REG_READ,
        REG_WRITE
-} kbase_reg_access_type;
+};
 
-typedef enum kbase_share_attr_bits {
+enum kbase_share_attr_bits {
        /* (1ULL << 8) bit is reserved */
        SHARE_BOTH_BITS = (2ULL << 8),  /* inner and outer shareable coherency */
        SHARE_INNER_BITS = (3ULL << 8)  /* inner shareable coherency */
-} kbase_share_attr_bits;
+};
 
 /* Conversion helpers for setting up high resolution timers */
 #define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime((x)*1000000U))
@@ -930,19 +962,8 @@ typedef enum kbase_share_attr_bits {
 
 /* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
 #define KBASE_CLEAN_CACHE_MAX_LOOPS     100000
-/* Maximum number of loops polling the GPU for an AS flush to complete before we assume the GPU has hung */
-#define KBASE_AS_FLUSH_MAX_LOOPS        100000
-
-/* Return values from kbase_replay_process */
-
-/* Replay job has completed */
-#define MALI_REPLAY_STATUS_COMPLETE  0
-/* Replay job is replaying and will continue once replayed jobs have completed.
- */
-#define MALI_REPLAY_STATUS_REPLAYING 1
-#define MALI_REPLAY_STATUS_MASK      0xff
-/* Caller must call kbasep_js_try_schedule_head_ctx */
-#define MALI_REPLAY_FLAG_JS_RESCHED  0x100
+/* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
+#define KBASE_AS_INACTIVE_MAX_LOOPS     100000
 
 /* Maximum number of times a job can be replayed */
 #define BASEP_JD_REPLAY_LIMIT 15
diff --git a/drivers/gpu/arm/midgard/mali_kbase_devfreq.c b/drivers/gpu/arm/midgard/mali_kbase_devfreq.c
new file mode 100755 (executable)
index 0000000..e28657c
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+
+#include <linux/devfreq.h>
+#ifdef CONFIG_DEVFREQ_THERMAL
+#include <linux/devfreq_cooling.h>
+#endif
+
+#include "mali_kbase_power_actor.h"
+
+static int
+kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+{
+       struct kbase_device *kbdev = dev_get_drvdata(dev);
+       struct dev_pm_opp *opp;
+       unsigned long freq = 0;
+       int err;
+
+
+       kbdev->reset_utilization = true;
+
+       freq = *target_freq;
+
+       rcu_read_lock();
+       opp = devfreq_recommended_opp(dev, &freq, flags);
+       rcu_read_unlock();
+       if (IS_ERR_OR_NULL(opp)) {
+               dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
+               return PTR_ERR(opp);
+       }
+
+       err = clk_set_rate(kbdev->clock, freq);
+       if (err) {
+               dev_err(dev, "Failed to set clock %lu (target %lu)\n",
+                               freq, *target_freq);
+               return err;
+       }
+
+       *target_freq = freq;
+
+       return 0;
+}
+
+static int
+kbase_devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+       struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+       *freq = clk_get_rate(kbdev->clock);
+
+       return 0;
+}
+
+static int
+kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+       struct kbase_device *kbdev = dev_get_drvdata(dev);
+       int err;
+
+       err = kbase_devfreq_cur_freq(dev, &stat->current_frequency);
+       if (err)
+               return err;
+
+       kbase_pm_get_dvfs_utilisation(kbdev,
+                       &stat->total_time, &stat->busy_time,
+                       kbdev->reset_utilization);
+
+       /* TODO vsync info for governor? */
+       stat->private_data = NULL;
+
+       return 0;
+}
+
+static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
+               struct devfreq_dev_profile *dp)
+{
+       int count;
+       int i = 0;
+       unsigned long freq = 0;
+       struct dev_pm_opp *opp;
+
+       rcu_read_lock();
+       count = dev_pm_opp_get_opp_count(kbdev->dev);
+       if (count < 0) {
+               rcu_read_unlock();
+               return count;
+       }
+       rcu_read_unlock();
+
+       dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
+                               GFP_KERNEL);
+       if (!dp->freq_table)
+               return -ENOMEM;
+
+       rcu_read_lock();
+       for (i = 0; i < count; i++, freq++) {
+               opp = dev_pm_opp_find_freq_ceil(kbdev->dev, &freq);
+               if (IS_ERR(opp))
+                       break;
+
+               dp->freq_table[i] = freq;
+       }
+       rcu_read_unlock();
+
+       if (count != i)
+               dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n",
+                               count, i);
+
+       dp->max_state = i;
+
+       return 0;
+}
+
+static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev)
+{
+       struct devfreq_dev_profile *dp = kbdev->devfreq->profile;
+
+       kfree(dp->freq_table);
+}
+
+static void kbase_devfreq_exit(struct device *dev)
+{
+       struct kbase_device *kbdev = dev_get_drvdata(dev);
+
+       kbase_devfreq_term_freq_table(kbdev);
+}
+
+int kbase_devfreq_init(struct kbase_device *kbdev)
+{
+       struct devfreq_dev_profile *dp;
+       int err;
+
+       dev_dbg(kbdev->dev, "Init Mali devfreq\n");
+
+       if (!kbdev->clock)
+               return -ENODEV;
+
+       dp = &kbdev->devfreq_profile;
+
+       dp->initial_freq = clk_get_rate(kbdev->clock);
+       dp->polling_ms = 1000;
+       dp->target = kbase_devfreq_target;
+       dp->get_dev_status = kbase_devfreq_status;
+       dp->get_cur_freq = kbase_devfreq_cur_freq;
+       dp->exit = kbase_devfreq_exit;
+
+       if (kbase_devfreq_init_freq_table(kbdev, dp))
+               return -EFAULT;
+
+       kbdev->devfreq = devfreq_add_device(kbdev->dev, dp,
+                               "simple_ondemand", NULL);
+       if (IS_ERR_OR_NULL(kbdev->devfreq)) {
+               kbase_devfreq_term_freq_table(kbdev);
+               return PTR_ERR(kbdev->devfreq);
+       }
+
+       err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq);
+       if (err) {
+               dev_err(kbdev->dev,
+                       "Failed to register OPP notifier (%d)\n", err);
+               goto opp_notifier_failed;
+       }
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+       kbdev->devfreq_cooling = of_devfreq_cooling_register(
+                                               kbdev->dev->of_node,
+                                               kbdev->devfreq);
+       if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) {
+               err = PTR_ERR(kbdev->devfreq_cooling);
+               dev_err(kbdev->dev,
+                       "Failed to register cooling device (%d)\n", err);
+               goto cooling_failed;
+       }
+
+#ifdef CONFIG_MALI_POWER_ACTOR
+       err = mali_pa_init(kbdev);
+       if (err) {
+               dev_err(kbdev->dev, "Failed to init power actor\n");
+               goto pa_failed;
+       }
+#endif
+#endif
+
+       return 0;
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+#ifdef CONFIG_MALI_POWER_ACTOR
+pa_failed:
+       devfreq_cooling_unregister(kbdev->devfreq_cooling);
+#endif /* CONFIG_MALI_POWER_ACTOR */
+cooling_failed:
+       devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+#endif /* CONFIG_DEVFREQ_THERMAL */
+opp_notifier_failed:
+       err = devfreq_remove_device(kbdev->devfreq);
+       if (err)
+               dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+       else
+               kbdev->devfreq = NULL;
+
+       return err;
+}
+
+void kbase_devfreq_term(struct kbase_device *kbdev)
+{
+       int err;
+
+       dev_dbg(kbdev->dev, "Term Mali devfreq\n");
+
+#ifdef CONFIG_DEVFREQ_THERMAL
+#ifdef CONFIG_MALI_POWER_ACTOR
+       mali_pa_term(kbdev);
+#endif
+
+       devfreq_cooling_unregister(kbdev->devfreq_cooling);
+#endif
+
+       devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+
+       err = devfreq_remove_device(kbdev->devfreq);
+       if (err)
+               dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+       else
+               kbdev->devfreq = NULL;
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_devfreq.h b/drivers/gpu/arm/midgard/mali_kbase_devfreq.h
new file mode 100755 (executable)
index 0000000..e3b333a
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _BASE_DEVFREQ_H_
+#define _BASE_DEVFREQ_H_
+
+int kbase_devfreq_init(struct kbase_device *kbdev);
+void kbase_devfreq_term(struct kbase_device *kbdev);
+
+#endif /* _BASE_DEVFREQ_H_ */
index 1d69de4f77061c560fd8875430966426f5077e90..bb276de9cd4eb8a3651fa34e7d35473bde20a1df 100755 (executable)
@@ -23,6 +23,7 @@
  */
 
 #include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
 #include <linux/seq_file.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
 #if defined(CONFIG_MALI_PLATFORM_VEXPRESS) || defined(CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ)
 #ifdef CONFIG_MALI_PLATFORM_FAKE
-extern kbase_attribute config_attributes_hw_issue_8408[];
+extern struct kbase_attribute config_attributes_hw_issue_8408[];
 #endif                         /* CONFIG_MALI_PLATFORM_FAKE */
 #endif                         /* CONFIG_MALI_PLATFORM_VEXPRESS || CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ */
 
-#if KBASE_TRACE_ENABLE != 0
+#if KBASE_TRACE_ENABLE
 STATIC CONST char *kbasep_trace_code_string[] = {
        /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
         * THIS MUST BE USED AT THE START OF THE ARRAY */
@@ -57,23 +58,20 @@ STATIC CONST char *kbasep_trace_code_string[] = {
 
 #define DEBUG_MESSAGE_SIZE 256
 
-STATIC mali_error kbasep_trace_init(kbase_device *kbdev);
-STATIC void kbasep_trace_term(kbase_device *kbdev);
+STATIC mali_error kbasep_trace_init(struct kbase_device *kbdev);
+STATIC void kbasep_trace_term(struct kbase_device *kbdev);
 STATIC void kbasep_trace_hook_wrapper(void *param);
-#if KBASE_TRACE_ENABLE != 0
-STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev);
+#if KBASE_TRACE_ENABLE
+STATIC void kbasep_trace_debugfs_init(struct kbase_device *kbdev);
+STATIC void kbasep_trace_debugfs_term(struct kbase_device *kbdev);
 #endif
 
-void kbasep_as_do_poke(struct work_struct *work);
-enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *data);
-void kbasep_reset_timeout_worker(struct work_struct *data);
-
-kbase_device *kbase_device_alloc(void)
+struct kbase_device *kbase_device_alloc(void)
 {
-       return kzalloc(sizeof(kbase_device), GFP_KERNEL);
+       return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
 }
 
-mali_error kbase_device_init(kbase_device * const kbdev)
+mali_error kbase_device_init(struct kbase_device * const kbdev)
 {
        int i;                  /* i used after the for loop, don't reuse ! */
 
@@ -94,10 +92,30 @@ mali_error kbase_device_init(kbase_device * const kbdev)
                kbase_pm_register_access_disable(kbdev);
                goto free_platform;
        }
-
        /* Set the list of features available on the current HW (identified by the GPU_ID register) */
        kbase_hw_set_features_mask(kbdev);
 
+#if defined(CONFIG_ARM64)
+       set_dma_ops(kbdev->dev, &noncoherent_swiotlb_dma_ops);
+#endif
+
+       /* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
+        * device structure was created by device-tree
+        */
+       if (!kbdev->dev->dma_mask)
+               kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
+
+       if (dma_set_mask(kbdev->dev,
+                       DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits)))
+               goto dma_set_mask_failed;
+
+       if (dma_set_coherent_mask(kbdev->dev,
+                       DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits)))
+               goto dma_set_mask_failed;
+
+       if (kbase_mem_lowlevel_init(kbdev))
+               goto mem_lowlevel_init_failed;
+
        kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
 
        /* We're done accessing the GPU registers for now. */
@@ -161,6 +179,7 @@ mali_error kbase_device_init(kbase_device * const kbdev)
        if (NULL == kbdev->hwcnt.cache_clean_wq)
                goto free_workqs;
 
+#if KBASE_GPU_RESET_EN
        kbdev->reset_workq = alloc_workqueue("Mali reset workqueue", 0, 1);
        if (NULL == kbdev->reset_workq)
                goto free_cache_clean_workq;
@@ -173,6 +192,10 @@ mali_error kbase_device_init(kbase_device * const kbdev)
 
        if (kbasep_trace_init(kbdev) != MALI_ERROR_NONE)
                goto free_reset_workq;
+#else
+       if (kbasep_trace_init(kbdev) != MALI_ERROR_NONE)
+               goto free_cache_clean_workq;
+#endif /* KBASE_GPU_RESET_EN */
 
        mutex_init(&kbdev->cacheclean_lock);
        atomic_set(&kbdev->keep_gpu_powered_count, 0);
@@ -200,11 +223,14 @@ mali_error kbase_device_init(kbase_device * const kbdev)
 #endif                         /* CONFIG_MALI_PLATFORM_FAKE */
 #endif                         /* CONFIG_MALI_PLATFORM_VEXPRESS || CONFIG_MALI_PLATFORM_VEXPRESS_VIRTEX7_40MHZ */
 
-       return MALI_ERROR_NONE;
+       atomic_set(&kbdev->ctx_num, 0);
 
- free_reset_workq:
+       return MALI_ERROR_NONE;
+#if KBASE_GPU_RESET_EN
+free_reset_workq:
        destroy_workqueue(kbdev->reset_workq);
- free_cache_clean_workq:
+#endif /* KBASE_GPU_RESET_EN */
+free_cache_clean_workq:
        destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
  free_workqs:
        while (i > 0) {
@@ -213,25 +239,31 @@ mali_error kbase_device_init(kbase_device * const kbdev)
                if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316))
                        destroy_workqueue(kbdev->as[i].poke_wq);
        }
- free_platform:
+       kbase_mem_lowlevel_term(kbdev);
+mem_lowlevel_init_failed:
+dma_set_mask_failed:
+free_platform:
        kbasep_platform_device_term(kbdev);
- fail:
+fail:
        return MALI_ERROR_FUNCTION_FAILED;
 }
 
-void kbase_device_term(kbase_device *kbdev)
+void kbase_device_term(struct kbase_device *kbdev)
 {
        int i;
 
        KBASE_DEBUG_ASSERT(kbdev);
 
-#if KBASE_TRACE_ENABLE != 0
+#if KBASE_TRACE_ENABLE
        kbase_debug_assert_register_hook(NULL, NULL);
 #endif
 
        kbasep_trace_term(kbdev);
 
+#if KBASE_GPU_RESET_EN
        destroy_workqueue(kbdev->reset_workq);
+#endif
+
        destroy_workqueue(kbdev->hwcnt.cache_clean_wq);
 
        for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
@@ -240,15 +272,16 @@ void kbase_device_term(kbase_device *kbdev)
                        destroy_workqueue(kbdev->as[i].poke_wq);
        }
 
+       kbase_mem_lowlevel_term(kbdev);
        kbasep_platform_device_term(kbdev);
 }
 
-void kbase_device_free(kbase_device *kbdev)
+void kbase_device_free(struct kbase_device *kbdev)
 {
        kfree(kbdev);
 }
 
-void kbase_device_trace_buffer_install(kbase_context *kctx, u32 *tb, size_t size)
+void kbase_device_trace_buffer_install(struct kbase_context *kctx, u32 *tb, size_t size)
 {
        unsigned long flags;
        KBASE_DEBUG_ASSERT(kctx);
@@ -270,7 +303,7 @@ void kbase_device_trace_buffer_install(kbase_context *kctx, u32 *tb, size_t size
        spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
 }
 
-void kbase_device_trace_buffer_uninstall(kbase_context *kctx)
+void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx)
 {
        unsigned long flags;
        KBASE_DEBUG_ASSERT(kctx);
@@ -280,7 +313,7 @@ void kbase_device_trace_buffer_uninstall(kbase_context *kctx)
        spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
 }
 
-void kbase_device_trace_register_access(kbase_context *kctx, kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
+void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
 {
        unsigned long flags;
        spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
@@ -321,7 +354,7 @@ void kbase_device_trace_register_access(kbase_context *kctx, kbase_reg_access_ty
        spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
 }
 
-void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *kctx)
+void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value, struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
        KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
@@ -334,7 +367,7 @@ void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *
 
 KBASE_EXPORT_TEST_API(kbase_reg_write)
 
-u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx)
+u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset, struct kbase_context *kctx)
 {
        u32 val;
        KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
@@ -349,7 +382,8 @@ u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx)
 
 KBASE_EXPORT_TEST_API(kbase_reg_read)
 
-void kbase_report_gpu_fault(kbase_device *kbdev, int multiple)
+#if KBASE_PM_EN
+void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
 {
        u32 status;
        u64 address;
@@ -364,7 +398,7 @@ void kbase_report_gpu_fault(kbase_device *kbdev, int multiple)
                dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
 }
 
-void kbase_gpu_interrupt(kbase_device *kbdev, u32 val)
+void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
 {
        KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, NULL, 0u, val);
        if (val & GPU_FAULT)
@@ -427,17 +461,17 @@ void kbase_gpu_interrupt(kbase_device *kbdev, u32 val)
        }
        KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, NULL, 0u, val);
 }
-
+#endif  /* KBASE_PM_EN */
 /*
  * Device trace functions
  */
-#if KBASE_TRACE_ENABLE != 0
+#if KBASE_TRACE_ENABLE
 
-STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
+STATIC mali_error kbasep_trace_init(struct kbase_device *kbdev)
 {
        void *rbuf;
 
-       rbuf = kmalloc(sizeof(kbase_trace) * KBASE_TRACE_SIZE, GFP_KERNEL);
+       rbuf = kmalloc(sizeof(struct kbase_trace) * KBASE_TRACE_SIZE, GFP_KERNEL);
 
        if (!rbuf)
                return MALI_ERROR_FUNCTION_FAILED;
@@ -448,14 +482,13 @@ STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
        return MALI_ERROR_NONE;
 }
 
-STATIC void kbasep_trace_term(kbase_device *kbdev)
+STATIC void kbasep_trace_term(struct kbase_device *kbdev)
 {
-       debugfs_remove(kbdev->trace_dentry);
-       kbdev->trace_dentry= NULL;
+       kbasep_trace_debugfs_term(kbdev);
        kfree(kbdev->trace_rbuf);
 }
 
-void kbasep_trace_format_msg(kbase_trace *trace_msg, char *buffer, int len)
+static void kbasep_trace_format_msg(struct kbase_trace *trace_msg, char *buffer, int len)
 {
        s32 written = 0;
 
@@ -486,7 +519,7 @@ void kbasep_trace_format_msg(kbase_trace *trace_msg, char *buffer, int len)
 
 }
 
-void kbasep_trace_dump_msg(kbase_device *kbdev, kbase_trace *trace_msg)
+static void kbasep_trace_dump_msg(struct kbase_device *kbdev, struct kbase_trace *trace_msg)
 {
        char buffer[DEBUG_MESSAGE_SIZE];
 
@@ -494,10 +527,10 @@ void kbasep_trace_dump_msg(kbase_device *kbdev, kbase_trace *trace_msg)
        dev_dbg(kbdev->dev, "%s", buffer);
 }
 
-void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
 {
        unsigned long irqflags;
-       kbase_trace *trace_msg;
+       struct kbase_trace *trace_msg;
 
        spin_lock_irqsave(&kbdev->trace_lock, irqflags);
 
@@ -537,7 +570,7 @@ void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kba
        spin_unlock_irqrestore(&kbdev->trace_lock, irqflags);
 }
 
-void kbasep_trace_clear(kbase_device *kbdev)
+void kbasep_trace_clear(struct kbase_device *kbdev)
 {
        unsigned long flags;
        spin_lock_irqsave(&kbdev->trace_lock, flags);
@@ -545,7 +578,7 @@ void kbasep_trace_clear(kbase_device *kbdev)
        spin_unlock_irqrestore(&kbdev->trace_lock, flags);
 }
 
-void kbasep_trace_dump(kbase_device *kbdev)
+void kbasep_trace_dump(struct kbase_device *kbdev)
 {
        unsigned long flags;
        u32 start;
@@ -557,7 +590,7 @@ void kbasep_trace_dump(kbase_device *kbdev)
        end = kbdev->trace_next_in;
 
        while (start != end) {
-               kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
+               struct kbase_trace *trace_msg = &kbdev->trace_rbuf[start];
                kbasep_trace_dump_msg(kbdev, trace_msg);
 
                start = (start + 1) & KBASE_TRACE_MASK;
@@ -571,18 +604,18 @@ void kbasep_trace_dump(kbase_device *kbdev)
 
 STATIC void kbasep_trace_hook_wrapper(void *param)
 {
-       kbase_device *kbdev = (kbase_device *) param;
+       struct kbase_device *kbdev = (struct kbase_device *)param;
        kbasep_trace_dump(kbdev);
 }
 
 #ifdef CONFIG_DEBUG_FS
 struct trace_seq_state {
-       kbase_trace trace_buf[KBASE_TRACE_SIZE];
+       struct kbase_trace trace_buf[KBASE_TRACE_SIZE];
        u32 start;
        u32 end;
 };
 
-void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
+static void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
 {
        struct trace_seq_state *state = s->private;
        int i;
@@ -599,11 +632,11 @@ void *kbasep_trace_seq_start(struct seq_file *s, loff_t *pos)
        return &state->trace_buf[i];
 }
 
-void kbasep_trace_seq_stop(struct seq_file *s, void *data)
+static void kbasep_trace_seq_stop(struct seq_file *s, void *data)
 {
 }
 
-void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
+static void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
 {
        struct trace_seq_state *state = s->private;
        int i;
@@ -617,9 +650,9 @@ void *kbasep_trace_seq_next(struct seq_file *s, void *data, loff_t *pos)
        return &state->trace_buf[i];
 }
 
-int kbasep_trace_seq_show(struct seq_file *s, void *data)
+static int kbasep_trace_seq_show(struct seq_file *s, void *data)
 {
-       kbase_trace *trace_msg = data;
+       struct kbase_trace *trace_msg = data;
        char buffer[DEBUG_MESSAGE_SIZE];
 
        kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
@@ -636,7 +669,7 @@ static const struct seq_operations kbasep_trace_seq_ops = {
 
 static int kbasep_trace_debugfs_open(struct inode *inode, struct file *file)
 {
-       kbase_device *kbdev = inode->i_private;
+       struct kbase_device *kbdev = inode->i_private;
        unsigned long flags;
 
        struct trace_seq_state *state;
@@ -661,27 +694,37 @@ static const struct file_operations kbasep_trace_debugfs_fops = {
        .release = seq_release_private,
 };
 
-STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
+STATIC void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
 {
        kbdev->trace_dentry = debugfs_create_file("mali_trace", S_IRUGO,
                        kbdev->mali_debugfs_directory, kbdev,
                        &kbasep_trace_debugfs_fops);
 }
+
+STATIC void kbasep_trace_debugfs_term(struct kbase_device *kbdev)
+{
+       debugfs_remove(kbdev->trace_dentry);
+       kbdev->trace_dentry = NULL;
+}
 #else
-STATIC void kbasep_trace_debugfs_init(kbase_device *kbdev)
+STATIC void kbasep_trace_debugfs_init(struct kbase_device *kbdev)
+{
+
+}
+STATIC void kbasep_trace_debugfs_term(struct kbase_device *kbdev)
 {
 
 }
 #endif                         /* CONFIG_DEBUG_FS */
 
-#else                          /* KBASE_TRACE_ENABLE != 0 */
-STATIC mali_error kbasep_trace_init(kbase_device *kbdev)
+#else                          /* KBASE_TRACE_ENABLE  */
+STATIC mali_error kbasep_trace_init(struct kbase_device *kbdev)
 {
        CSTD_UNUSED(kbdev);
        return MALI_ERROR_NONE;
 }
 
-STATIC void kbasep_trace_term(kbase_device *kbdev)
+STATIC void kbasep_trace_term(struct kbase_device *kbdev)
 {
        CSTD_UNUSED(kbdev);
 }
@@ -691,7 +734,7 @@ STATIC void kbasep_trace_hook_wrapper(void *param)
        CSTD_UNUSED(param);
 }
 
-void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
+void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
 {
        CSTD_UNUSED(kbdev);
        CSTD_UNUSED(code);
@@ -704,16 +747,16 @@ void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kba
        CSTD_UNUSED(info_val);
 }
 
-void kbasep_trace_clear(kbase_device *kbdev)
+void kbasep_trace_clear(struct kbase_device *kbdev)
 {
        CSTD_UNUSED(kbdev);
 }
 
-void kbasep_trace_dump(kbase_device *kbdev)
+void kbasep_trace_dump(struct kbase_device *kbdev)
 {
        CSTD_UNUSED(kbdev);
 }
-#endif                         /* KBASE_TRACE_ENABLE != 0 */
+#endif                         /* KBASE_TRACE_ENABLE  */
 
 void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value)
 {
diff --git a/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c b/drivers/gpu/arm/midgard/mali_kbase_disjoint_events.c
new file mode 100755 (executable)
index 0000000..22b6227
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_disjoint_events.c
+ * Base kernel disjoint events helper functions
+ */
+
+#include <mali_kbase.h>
+
+void kbase_disjoint_init(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+       atomic_set(&kbdev->disjoint_event.count, 0);
+       atomic_set(&kbdev->disjoint_event.state, 0);
+}
+
+/* increment the disjoint event count */
+void kbase_disjoint_event(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+       atomic_inc(&kbdev->disjoint_event.count);
+}
+
+/* increment the state and the event counter */
+void kbase_disjoint_state_up(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+       atomic_inc(&kbdev->disjoint_event.state);
+
+       kbase_disjoint_event(kbdev);
+}
+
+/* decrement the state */
+void kbase_disjoint_state_down(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+       KBASE_DEBUG_ASSERT(atomic_read(&kbdev->disjoint_event.state) > 0);
+
+       kbase_disjoint_event(kbdev);
+
+       atomic_dec(&kbdev->disjoint_event.state);
+}
+
+/* increments the count only if the state is > 0 */
+void kbase_disjoint_event_potential(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+       if (atomic_read(&kbdev->disjoint_event.state))
+               kbase_disjoint_event(kbdev);
+}
+
+u32 kbase_disjoint_event_get(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+       return atomic_read(&kbdev->disjoint_event.count);
+}
+KBASE_EXPORT_TEST_API(kbase_disjoint_event_get)
index 1e048696446a9b0ecc939b4c8e0383412e7fcf2e..27312ca7a1252713723bf90af3af08018863099f 100755 (executable)
@@ -20,9 +20,9 @@
 #include <mali_kbase.h>
 #include <mali_kbase_debug.h>
 
-STATIC base_jd_udata kbase_event_process(kbase_context *kctx, kbase_jd_atom *katom)
+STATIC struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
-       base_jd_udata data;
+       struct base_jd_udata data;
 
        KBASE_DEBUG_ASSERT(kctx != NULL);
        KBASE_DEBUG_ASSERT(katom != NULL);
@@ -41,7 +41,7 @@ STATIC base_jd_udata kbase_event_process(kbase_context *kctx, kbase_jd_atom *kat
        return data;
 }
 
-int kbase_event_pending(kbase_context *ctx)
+int kbase_event_pending(struct kbase_context *ctx)
 {
        int ret;
 
@@ -56,9 +56,9 @@ int kbase_event_pending(kbase_context *ctx)
 
 KBASE_EXPORT_TEST_API(kbase_event_pending)
 
-int kbase_event_dequeue(kbase_context *ctx, base_jd_event_v2 *uevent)
+int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
 {
-       kbase_jd_atom *atom;
+       struct kbase_jd_atom *atom;
 
        KBASE_DEBUG_ASSERT(ctx);
 
@@ -81,7 +81,7 @@ int kbase_event_dequeue(kbase_context *ctx, base_jd_event_v2 *uevent)
        }
 
        /* normal event processing */
-       atom = list_entry(ctx->event_list.next, kbase_jd_atom, dep_item[0]);
+       atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
        list_del(ctx->event_list.next);
 
        mutex_unlock(&ctx->event_mutex);
@@ -98,8 +98,8 @@ KBASE_EXPORT_TEST_API(kbase_event_dequeue)
 
 static void kbase_event_post_worker(struct work_struct *data)
 {
-       kbase_jd_atom *atom = CONTAINER_OF(data, kbase_jd_atom, work);
-       kbase_context *ctx = atom->kctx;
+       struct kbase_jd_atom *atom = container_of(data, struct kbase_jd_atom, work);
+       struct kbase_context *ctx = atom->kctx;
 
        if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
                kbase_jd_free_external_resources(atom);
@@ -125,7 +125,7 @@ static void kbase_event_post_worker(struct work_struct *data)
        kbase_event_wakeup(ctx);
 }
 
-void kbase_event_post(kbase_context *ctx, kbase_jd_atom *atom)
+void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
 {
        KBASE_DEBUG_ASSERT(ctx);
        KBASE_DEBUG_ASSERT(ctx->event_workq);
@@ -137,7 +137,7 @@ void kbase_event_post(kbase_context *ctx, kbase_jd_atom *atom)
 
 KBASE_EXPORT_TEST_API(kbase_event_post)
 
-void kbase_event_close(kbase_context *kctx)
+void kbase_event_close(struct kbase_context *kctx)
 {
        mutex_lock(&kctx->event_mutex);
        kctx->event_closed = MALI_TRUE;
@@ -145,7 +145,7 @@ void kbase_event_close(kbase_context *kctx)
        kbase_event_wakeup(kctx);
 }
 
-mali_error kbase_event_init(kbase_context *kctx)
+mali_error kbase_event_init(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kctx);
 
@@ -162,7 +162,7 @@ mali_error kbase_event_init(kbase_context *kctx)
 
 KBASE_EXPORT_TEST_API(kbase_event_init)
 
-void kbase_event_cleanup(kbase_context *kctx)
+void kbase_event_cleanup(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kctx);
        KBASE_DEBUG_ASSERT(kctx->event_workq);
@@ -177,7 +177,8 @@ void kbase_event_cleanup(kbase_context *kctx)
         * thread using it (because we're about to terminate the lock)
         */
        while (!list_empty(&kctx->event_list)) {
-               base_jd_event_v2 event;
+               struct base_jd_event_v2 event;
+
                kbase_event_dequeue(kctx, &event);
        }
 }
index 3c4ffed687317b6aa125e3b7a08f803f23065a64..c931af6dfa08d968c3d1f9d4e669eca3b1b9f2a5 100755 (executable)
@@ -30,7 +30,7 @@
 #define GATOR_JOB_SLOT_STOP  2
 #define GATOR_JOB_SLOT_SOFT_STOPPED  3
 
-void kbase_trace_mali_job_slots_event(u32 event, const kbase_context *kctx, u8 atom_id);
+void kbase_trace_mali_job_slots_event(u32 event, const struct kbase_context *kctx, u8 atom_id);
 void kbase_trace_mali_pm_status(u32 event, u64 value);
 void kbase_trace_mali_pm_power_off(u32 event, u64 value);
 void kbase_trace_mali_pm_power_on(u32 event, u64 value);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.c b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c
new file mode 100755 (executable)
index 0000000..308bbf0
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#include "mali_kbase.h"
+#include "mali_kbase_mem_linux.h"
+#include "mali_kbase_gator_api.h"
+#include "mali_kbase_gator_hwcnt_names.h"
+
+#define MALI_MAX_CORES_PER_GROUP               4
+#define MALI_MAX_NUM_BLOCKS_PER_GROUP  8
+#define MALI_COUNTERS_PER_BLOCK                        64
+#define MALI_BYTES_PER_COUNTER                 4
+
+struct kbase_gator_hwcnt_handles {
+       struct kbase_device  *kbdev;
+       struct kbase_context *kctx;
+       struct kbase_hwc_dma_mapping kernel_dump_buffer_handle;
+};
+
+const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_number_of_counters)
+{
+       uint32_t gpu_id;
+       const char * const *hardware_counter_names;
+       struct kbase_device *kbdev;
+
+       if (!total_number_of_counters)
+               return NULL;
+
+       /* Get the first device - it doesn't matter in this case */
+       kbdev = kbase_find_device(-1);
+       if (!kbdev)
+               return NULL;
+
+       gpu_id = kbdev->gpu_props.props.core_props.product_id;
+
+       switch (gpu_id) {
+       /* If we are using a Mali-T60x device */
+       case GPU_ID_PI_T60X:
+                       hardware_counter_names = hardware_counter_names_mali_t60x;
+                       *total_number_of_counters = ARRAY_SIZE(hardware_counter_names_mali_t60x);
+                       break;
+       /* If we are using a Mali-T62x device */
+       case GPU_ID_PI_T62X:
+                       hardware_counter_names = hardware_counter_names_mali_t62x;
+                       *total_number_of_counters = ARRAY_SIZE(hardware_counter_names_mali_t62x);
+                       break;
+       /* If we are using a Mali-T72x device */
+       case GPU_ID_PI_T72X:
+                       hardware_counter_names = hardware_counter_names_mali_t72x;
+                       *total_number_of_counters = ARRAY_SIZE(hardware_counter_names_mali_t72x);
+                       break;
+       /* If we are using a Mali-T76x device */
+       case GPU_ID_PI_T76X:
+                       hardware_counter_names = hardware_counter_names_mali_t76x;
+                       *total_number_of_counters = ARRAY_SIZE(hardware_counter_names_mali_t76x);
+                       break;
+#ifdef MALI_INCLUDE_TFRX
+       /* If we are using a Mali-TFRX device - for now just mimic the T760 counters */
+       case GPU_ID_PI_TFRX:
+                       hardware_counter_names = hardware_counter_names_mali_t76x;
+                       *total_number_of_counters = ARRAY_SIZE(hardware_counter_names_mali_t76x);
+                       break;
+#endif /* MALI_INCLUDE_TRFX */
+#ifdef MALI_INCLUDE_TF2X
+       /* If we are using a Mali-TF2X device - for now just mimic the T760 counters */
+       case GPU_ID_PI_TF2X:
+                       hardware_counter_names = hardware_counter_names_mali_t76x;
+                       *total_number_of_counters = ARRAY_SIZE(hardware_counter_names_mali_t76x);
+                       break;
+#endif /* MALI_INCLUDE_TF2X */
+       default:
+                       hardware_counter_names = NULL;
+                       *total_number_of_counters = 0;
+                       dev_err(kbdev->dev, "Unrecognized gpu ID: %u\n", gpu_id);
+                       break;
+       }
+
+       /* Release the kbdev reference. */
+       kbase_release_device(kbdev);
+
+       /* If we return a string array take a reference on the module (or fail). */
+       if (hardware_counter_names && !try_module_get(THIS_MODULE))
+               return NULL;
+
+       return hardware_counter_names;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init_names)
+
+void kbase_gator_hwcnt_term_names(void)
+{
+       /* Release the module reference. */
+       module_put(THIS_MODULE);
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term_names)
+
+struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info)
+{
+       struct kbase_gator_hwcnt_handles *hand;
+       struct kbase_uk_hwcnt_setup setup;
+       mali_error err;
+       uint32_t dump_size = 0, i = 0;
+
+       if (!in_out_info)
+               return NULL;
+
+       hand = kzalloc(sizeof(*hand), GFP_KERNEL);
+       if (!hand)
+               return NULL;
+
+       /* Get the first device */
+       hand->kbdev = kbase_find_device(-1);
+       if (!hand->kbdev)
+               goto free_hand;
+
+       /* Create a kbase_context */
+       hand->kctx = kbase_create_context(hand->kbdev);
+       if (!hand->kctx)
+               goto release_device;
+
+       in_out_info->nr_cores = hand->kbdev->gpu_props.num_cores;
+       in_out_info->nr_core_groups = hand->kbdev->gpu_props.num_core_groups;
+       in_out_info->gpu_id = hand->kbdev->gpu_props.props.core_props.product_id;
+
+       /* If we are using a Mali-T6xx or Mali-T72x device */
+       if (in_out_info->gpu_id == GPU_ID_PI_T60X ||
+           in_out_info->gpu_id == GPU_ID_PI_T62X ||
+           in_out_info->gpu_id == GPU_ID_PI_T72X) {
+               uint32_t cg, j;
+               uint64_t core_mask;
+
+               /* There are 8 hardware counters blocks per core group */
+               in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) *
+                       MALI_MAX_NUM_BLOCKS_PER_GROUP *
+                       in_out_info->nr_core_groups, GFP_KERNEL);
+
+               if (!in_out_info->hwc_layout)
+                       goto destroy_context;
+
+               dump_size = in_out_info->nr_core_groups *
+                       MALI_MAX_NUM_BLOCKS_PER_GROUP *
+                       MALI_COUNTERS_PER_BLOCK *
+                       MALI_BYTES_PER_COUNTER;
+
+               for (cg = 0; cg < in_out_info->nr_core_groups; cg++) {
+                       core_mask = hand->kbdev->gpu_props.props.coherency_info.group[cg].core_mask;
+
+                       for (j = 0; j < MALI_MAX_CORES_PER_GROUP; j++) {
+                               if (core_mask & (1u << j))
+                                       in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+                               else
+                                       in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+                       }
+
+                       in_out_info->hwc_layout[i++] = TILER_BLOCK;
+                       in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+                       /* There are no implementation with L3 cache */
+                       in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+
+                       if (0 == cg)
+                               in_out_info->hwc_layout[i++] = JM_BLOCK;
+                       else
+                               in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+               }
+       /* If we are using a Mali-T76x device */
+       } else if (
+                       (in_out_info->gpu_id == GPU_ID_PI_T76X)
+#ifdef MALI_INCLUDE_TFRX
+                               || (in_out_info->gpu_id == GPU_ID_PI_TFRX)
+#endif /* MALI_INCLUDE_TFRX */
+#ifdef MALI_INCLUDE_TF2X
+                               || (in_out_info->gpu_id == GPU_ID_PI_TF2X)
+#endif /* MALI_INCLUDE_TF2X */
+                       ) {
+               uint32_t nr_l2, nr_sc, j;
+               uint64_t core_mask;
+
+               nr_l2 = hand->kbdev->gpu_props.props.l2_props.num_l2_slices;
+
+               core_mask = hand->kbdev->gpu_props.props.coherency_info.group[0].core_mask;
+
+               nr_sc = hand->kbdev->gpu_props.props.coherency_info.group[0].num_cores;
+
+               /* For Mali-T76x, the job manager and tiler sets of counters are always present */
+               in_out_info->hwc_layout = kmalloc(sizeof(enum hwc_type) * (2 + nr_sc + nr_l2), GFP_KERNEL);
+
+               if (!in_out_info->hwc_layout)
+                       goto destroy_context;
+
+               dump_size = (2 + nr_sc + nr_l2) * MALI_COUNTERS_PER_BLOCK * MALI_BYTES_PER_COUNTER;
+
+               in_out_info->hwc_layout[i++] = JM_BLOCK;
+               in_out_info->hwc_layout[i++] = TILER_BLOCK;
+
+               for (j = 0; j < nr_l2; j++)
+                       in_out_info->hwc_layout[i++] = MMU_L2_BLOCK;
+
+               while (core_mask != 0ull) {
+                       if ((core_mask & 1ull) != 0ull)
+                               in_out_info->hwc_layout[i++] = SHADER_BLOCK;
+                       else
+                               in_out_info->hwc_layout[i++] = RESERVED_BLOCK;
+                       core_mask >>= 1;
+               }
+       }
+
+       in_out_info->nr_hwc_blocks = i;
+
+       in_out_info->size = dump_size;
+
+       in_out_info->kernel_dump_buffer = kbase_va_alloc(hand->kctx, dump_size, &hand->kernel_dump_buffer_handle);
+       if (!in_out_info->kernel_dump_buffer)
+               goto free_layout;
+
+       setup.dump_buffer = (uintptr_t)in_out_info->kernel_dump_buffer;
+       setup.jm_bm = in_out_info->bitmask[0];
+       setup.tiler_bm = in_out_info->bitmask[1];
+       setup.shader_bm = in_out_info->bitmask[2];
+       setup.mmu_l2_bm = in_out_info->bitmask[3];
+
+       /* There are no implementations with L3 cache */
+       setup.l3_cache_bm = 0;
+
+       err = kbase_instr_hwcnt_enable(hand->kctx, &setup);
+       if (err != MALI_ERROR_NONE)
+               goto free_buffer;
+
+       kbase_instr_hwcnt_clear(hand->kctx);
+
+       return hand;
+
+free_buffer:
+       kbase_va_free(hand->kctx, &hand->kernel_dump_buffer_handle);
+
+free_layout:
+       kfree(in_out_info->hwc_layout);
+
+destroy_context:
+       kbase_destroy_context(hand->kctx);
+
+release_device:
+       kbase_release_device(hand->kbdev);
+
+free_hand:
+       kfree(hand);
+
+       return NULL;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_init)
+
+void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+       if (in_out_info)
+               kfree(in_out_info->hwc_layout);
+
+       if (opaque_handles) {
+               kbase_instr_hwcnt_disable(opaque_handles->kctx);
+               kbase_va_free(opaque_handles->kctx, &opaque_handles->kernel_dump_buffer_handle);
+               kbase_destroy_context(opaque_handles->kctx);
+               kbase_release_device(opaque_handles->kbdev);
+               kfree(opaque_handles);
+       }
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term)
+
+uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success)
+{
+       if (opaque_handles && success)
+               return (kbase_instr_hwcnt_dump_complete(opaque_handles->kctx, success) != 0);
+       return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_complete)
+
+uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles)
+{
+       if (opaque_handles)
+               return  (kbase_instr_hwcnt_dump_irq(opaque_handles->kctx) == MALI_ERROR_NONE);
+       return 0;
+}
+KBASE_EXPORT_SYMBOL(kbase_gator_instr_hwcnt_dump_irq)
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.h b/drivers/gpu/arm/midgard/mali_kbase_gator_api.h
new file mode 100755 (executable)
index 0000000..75251e7
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_GATOR_API_H_
+#define _KBASE_GATOR_API_H_
+
+/**
+ * @brief This file describes the API used by Gator to collect hardware counters data from a Mali device.
+ */
+
+/* This define is used by the gator kernel module compile to select which DDK
+ * API calling convention to use. If not defined (legacy DDK) gator assumes
+ * version 1. The version to DDK release mapping is:
+ *     Version 1 API: DDK versions r1px, r2px
+ *     Version 2 API: DDK versions r3px, r4px
+ *     Version 3 API: DDK version r5p0 and newer
+ *
+ * API Usage
+ * =========
+ *
+ * 1] Call kbase_gator_hwcnt_init_names() to return the list of short counter
+ * names for the GPU present in this device.
+ *
+ * 2] Create a kbase_gator_hwcnt_info structure and set the counter enables for
+ * the counters you want enabled. The enables can all be set for simplicity in
+ * most use cases, but disabling some will let you minimize bandwidth impact.
+ *
+ * 3] Call kbase_gator_hwcnt_init() using the above structure, to create a
+ * counter context. On successful return the DDK will have populated the
+ * structure with a variety of useful information.
+ *
+ * 4] Call kbase_gator_hwcnt_dump_irq() to queue a non-blocking request for a
+ * counter dump. If this returns a non-zero value the request has been queued,
+ * otherwise the driver has been unable to do so (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 5] Call kbase_gator_hwcnt_dump_complete() to test whether the  previously
+ * requested dump has been succesful. If this returns non-zero the counter dump
+ * has resolved, but the value of *success must also be tested as the dump
+ * may have not been successful. If it returns zero the counter dump was
+ * abandoned due to the device being busy (typically because of another
+ * user of the instrumentation exists concurrently).
+ *
+ * 6] Process the counters stored in the buffer pointed to by ...
+ *
+ *        kbase_gator_hwcnt_info->kernel_dump_buffer
+ *
+ *    In pseudo code you can find all of the counters via this approach:
+ *
+ *
+ *        hwcnt_info # pointer to kbase_gator_hwcnt_info structure
+ *        hwcnt_name # pointer to name list
+ *
+ *        u32 * hwcnt_data = (u32*)hwcnt_info->kernel_dump_buffer
+ *
+ *        # Iterate over each 64-counter block in this GPU configuration
+ *        for( i = 0; i < hwcnt_info->nr_hwc_blocks; i++) {
+ *            hwc_type type = hwcnt_info->hwc_layout[i];
+ *
+ *            # Skip reserved type blocks - they contain no counters at all
+ *            if( type == RESERVED_BLOCK ) {
+ *                continue;
+ *            }
+ *
+ *            size_t name_offset = type * 64;
+ *            size_t data_offset = i * 64;
+ *
+ *            # Iterate over the names of the counters in this block type
+ *            for( j = 0; j < 64; j++) {
+ *                const char * name = hwcnt_name[name_offset+j];
+ *
+ *                # Skip empty name strings - there is no counter here
+ *                if( name[0] == '\0' ) {
+ *                    continue;
+ *                }
+ *
+ *                u32 data = hwcnt_data[data_offset+j];
+ *
+ *                printk( "COUNTER: %s DATA: %u\n", name, data );
+ *            }
+ *        }
+ *
+ *
+ *     Note that in most implementations you typically want to either SUM or
+ *     AVERAGE multiple instances of the same counter if, for example, you have
+ *     multiple shader cores or multiple L2 caches. The most sensible view for
+ *     analysis is to AVERAGE shader core counters, but SUM L2 cache and MMU
+ *     counters.
+ *
+ * 7] Goto 4, repeating until you want to stop collecting counters.
+ *
+ * 8] Release the dump resources by calling kbase_gator_hwcnt_term().
+ *
+ * 9] Release the name table resources by calling kbase_gator_hwcnt_term_names().
+ *    This function must only be called if init_names() returned a non-NULL value.
+ **/
+
+#define MALI_DDK_GATOR_API_VERSION 3
+
+enum hwc_type {
+       JM_BLOCK = 0,
+       TILER_BLOCK,
+       SHADER_BLOCK,
+       MMU_L2_BLOCK,
+       RESERVED_BLOCK
+};
+
+struct kbase_gator_hwcnt_info {
+
+       /* Passed from Gator to kbase */
+
+       /* the bitmask of enabled hardware counters for each counter block */
+       uint16_t bitmask[4];
+
+       /* Passed from kbase to Gator */
+
+       /* ptr to counter dump memory */
+       void *kernel_dump_buffer;
+
+       /* size of counter dump memory */
+       uint32_t size;
+
+       /* the ID of the Mali device */
+       uint32_t gpu_id;
+
+       /* the number of shader cores in the GPU */
+       uint32_t nr_cores;
+
+       /* the number of core groups */
+       uint32_t nr_core_groups;
+
+       /* the memory layout of the performance counters */
+       enum hwc_type *hwc_layout;
+
+       /* the total number of hardware couter blocks */
+       uint32_t nr_hwc_blocks;
+};
+
+/**
+ * @brief Opaque block of Mali data which Gator needs to return to the API later.
+ */
+struct kbase_gator_hwcnt_handles;
+
+/**
+ * @brief Initialize the resources Gator needs for performance profiling.
+ *
+ * @param in_out_info   A pointer to a structure containing the enabled counters passed from Gator and all the Mali
+ *                      specific information that will be returned to Gator. On entry Gator must have populated the
+ *                      'bitmask' field with the counters it wishes to enable for each class of counter block.
+ *                      Each entry in the array corresponds to a single counter class based on the "hwc_type"
+ *                      enumeration, and each bit corresponds to an enable for 4 sequential counters (LSB enables
+ *                      the first 4 counters in the block, and so on). See the GPU counter array as returned by
+ *                      kbase_gator_hwcnt_get_names() for the index values of each counter for the curernt GPU.
+ *
+ * @return              Pointer to an opaque handle block on success, NULL on error.
+ */
+extern struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info);
+
+/**
+ * @brief Free all resources once Gator has finished using performance counters.
+ *
+ * @param in_out_info       A pointer to a structure containing the enabled counters passed from Gator and all the
+ *                          Mali specific information that will be returned to Gator.
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ */
+extern void kbase_gator_hwcnt_term(struct kbase_gator_hwcnt_info *in_out_info, struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief Poll whether a counter dump is successful.
+ *
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ * @param[out] success      Non-zero on success, zero on failure.
+ *
+ * @return                  Zero if the dump is still pending, non-zero if the dump has completed. Note that a
+ *                          completed dump may not have dumped succesfully, so the caller must test for both
+ *                          a completed and successful dump before processing counters.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_complete(struct kbase_gator_hwcnt_handles *opaque_handles, uint32_t * const success);
+
+/**
+ * @brief Request the generation of a new counter dump.
+ *
+ * @param opaque_handles    A wrapper structure for kbase structures.
+ *
+ * @return                  Zero if the hardware device is busy and cannot handle the request, non-zero otherwise.
+ */
+extern uint32_t kbase_gator_instr_hwcnt_dump_irq(struct kbase_gator_hwcnt_handles *opaque_handles);
+
+/**
+ * @brief This function is used to fetch the names table based on the Mali device in use.
+ *
+ * @param[out] total_number_of_counters The total number of counters short names in the Mali devices' list.
+ *
+ * @return                              Pointer to an array of strings of length *total_number_of_counters.
+ */
+extern const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_number_of_counters);
+
+/**
+ * @brief This function is used to terminate the use of the names table.
+ *
+ * This function must only be called if the initial call to kbase_gator_hwcnt_init_names returned a non-NULL value.
+ */
+extern void kbase_gator_hwcnt_term_names(void);
+
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h
new file mode 100755 (executable)
index 0000000..97784bb
--- /dev/null
@@ -0,0 +1,1095 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_H_
+#define _KBASE_GATOR_HWCNT_NAMES_H_
+
+/*
+ * "Short names" for hardware counters used by Streamline. Counters names are
+ * stored in accordance with their memory layout in the binary counter block
+ * emitted by the Mali GPU. Each "master" in the GPU emits a fixed-size block
+ * of 64 counters, and each GPU implements the same set of "masters" although
+ * the counters each master exposes within its block of 64 may vary.
+ *
+ * Counters which are an empty string are simply "holes" in the counter memory
+ * where no counter exists.
+ */
+
+static const char * const hardware_counter_names_mali_t60x[] = {
+       /* Job Manager */
+       "",
+       "",
+       "",
+       "",
+       "T60x_MESSAGES_SENT",
+       "T60x_MESSAGES_RECEIVED",
+       "T60x_GPU_ACTIVE",
+       "T60x_IRQ_ACTIVE",
+       "T60x_JS0_JOBS",
+       "T60x_JS0_TASKS",
+       "T60x_JS0_ACTIVE",
+       "",
+       "T60x_JS0_WAIT_READ",
+       "T60x_JS0_WAIT_ISSUE",
+       "T60x_JS0_WAIT_DEPEND",
+       "T60x_JS0_WAIT_FINISH",
+       "T60x_JS1_JOBS",
+       "T60x_JS1_TASKS",
+       "T60x_JS1_ACTIVE",
+       "",
+       "T60x_JS1_WAIT_READ",
+       "T60x_JS1_WAIT_ISSUE",
+       "T60x_JS1_WAIT_DEPEND",
+       "T60x_JS1_WAIT_FINISH",
+       "T60x_JS2_JOBS",
+       "T60x_JS2_TASKS",
+       "T60x_JS2_ACTIVE",
+       "",
+       "T60x_JS2_WAIT_READ",
+       "T60x_JS2_WAIT_ISSUE",
+       "T60x_JS2_WAIT_DEPEND",
+       "T60x_JS2_WAIT_FINISH",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+
+       /*Tiler */
+       "",
+       "",
+       "",
+       "T60x_TI_JOBS_PROCESSED",
+       "T60x_TI_TRIANGLES",
+       "T60x_TI_QUADS",
+       "T60x_TI_POLYGONS",
+       "T60x_TI_POINTS",
+       "T60x_TI_LINES",
+       "T60x_TI_VCACHE_HIT",
+       "T60x_TI_VCACHE_MISS",
+       "T60x_TI_FRONT_FACING",
+       "T60x_TI_BACK_FACING",
+       "T60x_TI_PRIM_VISIBLE",
+       "T60x_TI_PRIM_CULLED",
+       "T60x_TI_PRIM_CLIPPED",
+       "T60x_TI_LEVEL0",
+       "T60x_TI_LEVEL1",
+       "T60x_TI_LEVEL2",
+       "T60x_TI_LEVEL3",
+       "T60x_TI_LEVEL4",
+       "T60x_TI_LEVEL5",
+       "T60x_TI_LEVEL6",
+       "T60x_TI_LEVEL7",
+       "T60x_TI_COMMAND_1",
+       "T60x_TI_COMMAND_2",
+       "T60x_TI_COMMAND_3",
+       "T60x_TI_COMMAND_4",
+       "T60x_TI_COMMAND_4_7",
+       "T60x_TI_COMMAND_8_15",
+       "T60x_TI_COMMAND_16_63",
+       "T60x_TI_COMMAND_64",
+       "T60x_TI_COMPRESS_IN",
+       "T60x_TI_COMPRESS_OUT",
+       "T60x_TI_COMPRESS_FLUSH",
+       "T60x_TI_TIMESTAMPS",
+       "T60x_TI_PCACHE_HIT",
+       "T60x_TI_PCACHE_MISS",
+       "T60x_TI_PCACHE_LINE",
+       "T60x_TI_PCACHE_STALL",
+       "T60x_TI_WRBUF_HIT",
+       "T60x_TI_WRBUF_MISS",
+       "T60x_TI_WRBUF_LINE",
+       "T60x_TI_WRBUF_PARTIAL",
+       "T60x_TI_WRBUF_STALL",
+       "T60x_TI_ACTIVE",
+       "T60x_TI_LOADING_DESC",
+       "T60x_TI_INDEX_WAIT",
+       "T60x_TI_INDEX_RANGE_WAIT",
+       "T60x_TI_VERTEX_WAIT",
+       "T60x_TI_PCACHE_WAIT",
+       "T60x_TI_WRBUF_WAIT",
+       "T60x_TI_BUS_READ",
+       "T60x_TI_BUS_WRITE",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T60x_TI_UTLB_STALL",
+       "T60x_TI_UTLB_REPLAY_MISS",
+       "T60x_TI_UTLB_REPLAY_FULL",
+       "T60x_TI_UTLB_NEW_MISS",
+       "T60x_TI_UTLB_HIT",
+
+       /* Shader Core */
+       "",
+       "",
+       "",
+       "",
+       "T60x_FRAG_ACTIVE",
+       "T60x_FRAG_PRIMITIVES",
+       "T60x_FRAG_PRIMITIVES_DROPPED",
+       "T60x_FRAG_CYCLES_DESC",
+       "T60x_FRAG_CYCLES_PLR",
+       "T60x_FRAG_CYCLES_VERT",
+       "T60x_FRAG_CYCLES_TRISETUP",
+       "T60x_FRAG_CYCLES_RAST",
+       "T60x_FRAG_THREADS",
+       "T60x_FRAG_DUMMY_THREADS",
+       "T60x_FRAG_QUADS_RAST",
+       "T60x_FRAG_QUADS_EZS_TEST",
+       "T60x_FRAG_QUADS_EZS_KILLED",
+       "T60x_FRAG_THREADS_LZS_TEST",
+       "T60x_FRAG_THREADS_LZS_KILLED",
+       "T60x_FRAG_CYCLES_NO_TILE",
+       "T60x_FRAG_NUM_TILES",
+       "T60x_FRAG_TRANS_ELIM",
+       "T60x_COMPUTE_ACTIVE",
+       "T60x_COMPUTE_TASKS",
+       "T60x_COMPUTE_THREADS",
+       "T60x_COMPUTE_CYCLES_DESC",
+       "T60x_TRIPIPE_ACTIVE",
+       "T60x_ARITH_WORDS",
+       "T60x_ARITH_CYCLES_REG",
+       "T60x_ARITH_CYCLES_L0",
+       "T60x_ARITH_FRAG_DEPEND",
+       "T60x_LS_WORDS",
+       "T60x_LS_ISSUES",
+       "T60x_LS_RESTARTS",
+       "T60x_LS_REISSUES_MISS",
+       "T60x_LS_REISSUES_VD",
+       "T60x_LS_REISSUE_ATTRIB_MISS",
+       "T60x_LS_NO_WB",
+       "T60x_TEX_WORDS",
+       "T60x_TEX_BUBBLES",
+       "T60x_TEX_WORDS_L0",
+       "T60x_TEX_WORDS_DESC",
+       "T60x_TEX_ISSUES",
+       "T60x_TEX_RECIRC_FMISS",
+       "T60x_TEX_RECIRC_DESC",
+       "T60x_TEX_RECIRC_MULTI",
+       "T60x_TEX_RECIRC_PMISS",
+       "T60x_TEX_RECIRC_CONF",
+       "T60x_LSC_READ_HITS",
+       "T60x_LSC_READ_MISSES",
+       "T60x_LSC_WRITE_HITS",
+       "T60x_LSC_WRITE_MISSES",
+       "T60x_LSC_ATOMIC_HITS",
+       "T60x_LSC_ATOMIC_MISSES",
+       "T60x_LSC_LINE_FETCHES",
+       "T60x_LSC_DIRTY_LINE",
+       "T60x_LSC_SNOOPS",
+       "T60x_AXI_TLB_STALL",
+       "T60x_AXI_TLB_MIESS",
+       "T60x_AXI_TLB_TRANSACTION",
+       "T60x_LS_TLB_MISS",
+       "T60x_LS_TLB_HIT",
+       "T60x_AXI_BEATS_READ",
+       "T60x_AXI_BEATS_WRITTEN",
+
+       /*L2 and MMU */
+       "",
+       "",
+       "",
+       "",
+       "T60x_MMU_HIT",
+       "T60x_MMU_NEW_MISS",
+       "T60x_MMU_REPLAY_FULL",
+       "T60x_MMU_REPLAY_MISS",
+       "T60x_MMU_TABLE_WALK",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T60x_UTLB_HIT",
+       "T60x_UTLB_NEW_MISS",
+       "T60x_UTLB_REPLAY_FULL",
+       "T60x_UTLB_REPLAY_MISS",
+       "T60x_UTLB_STALL",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T60x_L2_EXT_WRITE_BEATS",
+       "T60x_L2_EXT_READ_BEATS",
+       "T60x_L2_ANY_LOOKUP",
+       "T60x_L2_READ_LOOKUP",
+       "T60x_L2_SREAD_LOOKUP",
+       "T60x_L2_READ_REPLAY",
+       "T60x_L2_READ_SNOOP",
+       "T60x_L2_READ_HIT",
+       "T60x_L2_CLEAN_MISS",
+       "T60x_L2_WRITE_LOOKUP",
+       "T60x_L2_SWRITE_LOOKUP",
+       "T60x_L2_WRITE_REPLAY",
+       "T60x_L2_WRITE_SNOOP",
+       "T60x_L2_WRITE_HIT",
+       "T60x_L2_EXT_READ_FULL",
+       "T60x_L2_EXT_READ_HALF",
+       "T60x_L2_EXT_WRITE_FULL",
+       "T60x_L2_EXT_WRITE_HALF",
+       "T60x_L2_EXT_READ",
+       "T60x_L2_EXT_READ_LINE",
+       "T60x_L2_EXT_WRITE",
+       "T60x_L2_EXT_WRITE_LINE",
+       "T60x_L2_EXT_WRITE_SMALL",
+       "T60x_L2_EXT_BARRIER",
+       "T60x_L2_EXT_AR_STALL",
+       "T60x_L2_EXT_R_BUF_FULL",
+       "T60x_L2_EXT_RD_BUF_FULL",
+       "T60x_L2_EXT_R_RAW",
+       "T60x_L2_EXT_W_STALL",
+       "T60x_L2_EXT_W_BUF_FULL",
+       "T60x_L2_EXT_R_W_HAZARD",
+       "T60x_L2_TAG_HAZARD",
+       "T60x_L2_SNOOP_FULL",
+       "T60x_L2_REPLAY_FULL"
+};
+static const char * const hardware_counter_names_mali_t62x[] = {
+       /* Job Manager */
+       "",
+       "",
+       "",
+       "",
+       "T62x_MESSAGES_SENT",
+       "T62x_MESSAGES_RECEIVED",
+       "T62x_GPU_ACTIVE",
+       "T62x_IRQ_ACTIVE",
+       "T62x_JS0_JOBS",
+       "T62x_JS0_TASKS",
+       "T62x_JS0_ACTIVE",
+       "",
+       "T62x_JS0_WAIT_READ",
+       "T62x_JS0_WAIT_ISSUE",
+       "T62x_JS0_WAIT_DEPEND",
+       "T62x_JS0_WAIT_FINISH",
+       "T62x_JS1_JOBS",
+       "T62x_JS1_TASKS",
+       "T62x_JS1_ACTIVE",
+       "",
+       "T62x_JS1_WAIT_READ",
+       "T62x_JS1_WAIT_ISSUE",
+       "T62x_JS1_WAIT_DEPEND",
+       "T62x_JS1_WAIT_FINISH",
+       "T62x_JS2_JOBS",
+       "T62x_JS2_TASKS",
+       "T62x_JS2_ACTIVE",
+       "",
+       "T62x_JS2_WAIT_READ",
+       "T62x_JS2_WAIT_ISSUE",
+       "T62x_JS2_WAIT_DEPEND",
+       "T62x_JS2_WAIT_FINISH",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+
+       /*Tiler */
+       "",
+       "",
+       "",
+       "T62x_TI_JOBS_PROCESSED",
+       "T62x_TI_TRIANGLES",
+       "T62x_TI_QUADS",
+       "T62x_TI_POLYGONS",
+       "T62x_TI_POINTS",
+       "T62x_TI_LINES",
+       "T62x_TI_VCACHE_HIT",
+       "T62x_TI_VCACHE_MISS",
+       "T62x_TI_FRONT_FACING",
+       "T62x_TI_BACK_FACING",
+       "T62x_TI_PRIM_VISIBLE",
+       "T62x_TI_PRIM_CULLED",
+       "T62x_TI_PRIM_CLIPPED",
+       "T62x_TI_LEVEL0",
+       "T62x_TI_LEVEL1",
+       "T62x_TI_LEVEL2",
+       "T62x_TI_LEVEL3",
+       "T62x_TI_LEVEL4",
+       "T62x_TI_LEVEL5",
+       "T62x_TI_LEVEL6",
+       "T62x_TI_LEVEL7",
+       "T62x_TI_COMMAND_1",
+       "T62x_TI_COMMAND_2",
+       "T62x_TI_COMMAND_3",
+       "T62x_TI_COMMAND_4",
+       "T62x_TI_COMMAND_5_7",
+       "T62x_TI_COMMAND_8_15",
+       "T62x_TI_COMMAND_16_63",
+       "T62x_TI_COMMAND_64",
+       "T62x_TI_COMPRESS_IN",
+       "T62x_TI_COMPRESS_OUT",
+       "T62x_TI_COMPRESS_FLUSH",
+       "T62x_TI_TIMESTAMPS",
+       "T62x_TI_PCACHE_HIT",
+       "T62x_TI_PCACHE_MISS",
+       "T62x_TI_PCACHE_LINE",
+       "T62x_TI_PCACHE_STALL",
+       "T62x_TI_WRBUF_HIT",
+       "T62x_TI_WRBUF_MISS",
+       "T62x_TI_WRBUF_LINE",
+       "T62x_TI_WRBUF_PARTIAL",
+       "T62x_TI_WRBUF_STALL",
+       "T62x_TI_ACTIVE",
+       "T62x_TI_LOADING_DESC",
+       "T62x_TI_INDEX_WAIT",
+       "T62x_TI_INDEX_RANGE_WAIT",
+       "T62x_TI_VERTEX_WAIT",
+       "T62x_TI_PCACHE_WAIT",
+       "T62x_TI_WRBUF_WAIT",
+       "T62x_TI_BUS_READ",
+       "T62x_TI_BUS_WRITE",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T62x_TI_UTLB_STALL",
+       "T62x_TI_UTLB_REPLAY_MISS",
+       "T62x_TI_UTLB_REPLAY_FULL",
+       "T62x_TI_UTLB_NEW_MISS",
+       "T62x_TI_UTLB_HIT",
+
+       /* Shader Core */
+       "",
+       "",
+       "",
+       "T62x_SHADER_CORE_ACTIVE",
+       "T62x_FRAG_ACTIVE",
+       "T62x_FRAG_PRIMITIVES",
+       "T62x_FRAG_PRIMITIVES_DROPPED",
+       "T62x_FRAG_CYCLES_DESC",
+       "T62x_FRAG_CYCLES_FPKQ_ACTIVE",
+       "T62x_FRAG_CYCLES_VERT",
+       "T62x_FRAG_CYCLES_TRISETUP",
+       "T62x_FRAG_CYCLES_EZS_ACTIVE",
+       "T62x_FRAG_THREADS",
+       "T62x_FRAG_DUMMY_THREADS",
+       "T62x_FRAG_QUADS_RAST",
+       "T62x_FRAG_QUADS_EZS_TEST",
+       "T62x_FRAG_QUADS_EZS_KILLED",
+       "T62x_FRAG_THREADS_LZS_TEST",
+       "T62x_FRAG_THREADS_LZS_KILLED",
+       "T62x_FRAG_CYCLES_NO_TILE",
+       "T62x_FRAG_NUM_TILES",
+       "T62x_FRAG_TRANS_ELIM",
+       "T62x_COMPUTE_ACTIVE",
+       "T62x_COMPUTE_TASKS",
+       "T62x_COMPUTE_THREADS",
+       "T62x_COMPUTE_CYCLES_DESC",
+       "T62x_TRIPIPE_ACTIVE",
+       "T62x_ARITH_WORDS",
+       "T62x_ARITH_CYCLES_REG",
+       "T62x_ARITH_CYCLES_L0",
+       "T62x_ARITH_FRAG_DEPEND",
+       "T62x_LS_WORDS",
+       "T62x_LS_ISSUES",
+       "T62x_LS_RESTARTS",
+       "T62x_LS_REISSUES_MISS",
+       "T62x_LS_REISSUES_VD",
+       "T62x_LS_REISSUE_ATTRIB_MISS",
+       "T62x_LS_NO_WB",
+       "T62x_TEX_WORDS",
+       "T62x_TEX_BUBBLES",
+       "T62x_TEX_WORDS_L0",
+       "T62x_TEX_WORDS_DESC",
+       "T62x_TEX_ISSUES",
+       "T62x_TEX_RECIRC_FMISS",
+       "T62x_TEX_RECIRC_DESC",
+       "T62x_TEX_RECIRC_MULTI",
+       "T62x_TEX_RECIRC_PMISS",
+       "T62x_TEX_RECIRC_CONF",
+       "T62x_LSC_READ_HITS",
+       "T62x_LSC_READ_MISSES",
+       "T62x_LSC_WRITE_HITS",
+       "T62x_LSC_WRITE_MISSES",
+       "T62x_LSC_ATOMIC_HITS",
+       "T62x_LSC_ATOMIC_MISSES",
+       "T62x_LSC_LINE_FETCHES",
+       "T62x_LSC_DIRTY_LINE",
+       "T62x_LSC_SNOOPS",
+       "T62x_AXI_TLB_STALL",
+       "T62x_AXI_TLB_MIESS",
+       "T62x_AXI_TLB_TRANSACTION",
+       "T62x_LS_TLB_MISS",
+       "T62x_LS_TLB_HIT",
+       "T62x_AXI_BEATS_READ",
+       "T62x_AXI_BEATS_WRITTEN",
+
+       /*L2 and MMU */
+       "",
+       "",
+       "",
+       "",
+       "T62x_MMU_HIT",
+       "T62x_MMU_NEW_MISS",
+       "T62x_MMU_REPLAY_FULL",
+       "T62x_MMU_REPLAY_MISS",
+       "T62x_MMU_TABLE_WALK",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T62x_UTLB_HIT",
+       "T62x_UTLB_NEW_MISS",
+       "T62x_UTLB_REPLAY_FULL",
+       "T62x_UTLB_REPLAY_MISS",
+       "T62x_UTLB_STALL",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T62x_L2_EXT_WRITE_BEATS",
+       "T62x_L2_EXT_READ_BEATS",
+       "T62x_L2_ANY_LOOKUP",
+       "T62x_L2_READ_LOOKUP",
+       "T62x_L2_SREAD_LOOKUP",
+       "T62x_L2_READ_REPLAY",
+       "T62x_L2_READ_SNOOP",
+       "T62x_L2_READ_HIT",
+       "T62x_L2_CLEAN_MISS",
+       "T62x_L2_WRITE_LOOKUP",
+       "T62x_L2_SWRITE_LOOKUP",
+       "T62x_L2_WRITE_REPLAY",
+       "T62x_L2_WRITE_SNOOP",
+       "T62x_L2_WRITE_HIT",
+       "T62x_L2_EXT_READ_FULL",
+       "T62x_L2_EXT_READ_HALF",
+       "T62x_L2_EXT_WRITE_FULL",
+       "T62x_L2_EXT_WRITE_HALF",
+       "T62x_L2_EXT_READ",
+       "T62x_L2_EXT_READ_LINE",
+       "T62x_L2_EXT_WRITE",
+       "T62x_L2_EXT_WRITE_LINE",
+       "T62x_L2_EXT_WRITE_SMALL",
+       "T62x_L2_EXT_BARRIER",
+       "T62x_L2_EXT_AR_STALL",
+       "T62x_L2_EXT_R_BUF_FULL",
+       "T62x_L2_EXT_RD_BUF_FULL",
+       "T62x_L2_EXT_R_RAW",
+       "T62x_L2_EXT_W_STALL",
+       "T62x_L2_EXT_W_BUF_FULL",
+       "T62x_L2_EXT_R_W_HAZARD",
+       "T62x_L2_TAG_HAZARD",
+       "T62x_L2_SNOOP_FULL",
+       "T62x_L2_REPLAY_FULL"
+};
+
+static const char * const hardware_counter_names_mali_t72x[] = {
+       /* Job Manager */
+       "",
+       "",
+       "",
+       "",
+       "T72x_GPU_ACTIVE",
+       "T72x_IRQ_ACTIVE",
+       "T72x_JS0_JOBS",
+       "T72x_JS0_TASKS",
+       "T72x_JS0_ACTIVE",
+       "T72x_JS1_JOBS",
+       "T72x_JS1_TASKS",
+       "T72x_JS1_ACTIVE",
+       "T72x_JS2_JOBS",
+       "T72x_JS2_TASKS",
+       "T72x_JS2_ACTIVE",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+
+       /*Tiler */
+       "",
+       "",
+       "",
+       "T72x_TI_JOBS_PROCESSED",
+       "T72x_TI_TRIANGLES",
+       "T72x_TI_QUADS",
+       "T72x_TI_POLYGONS",
+       "T72x_TI_POINTS",
+       "T72x_TI_LINES",
+       "T72x_TI_FRONT_FACING",
+       "T72x_TI_BACK_FACING",
+       "T72x_TI_PRIM_VISIBLE",
+       "T72x_TI_PRIM_CULLED",
+       "T72x_TI_PRIM_CLIPPED",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T72x_TI_ACTIVE",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+
+       /* Shader Core */
+       "",
+       "",
+       "",
+       "",
+       "T72x_FRAG_ACTIVE",
+       "T72x_FRAG_PRIMITIVES",
+       "T72x_FRAG_PRIMITIVES_DROPPED",
+       "T72x_FRAG_THREADS",
+       "T72x_FRAG_DUMMY_THREADS",
+       "T72x_FRAG_QUADS_RAST",
+       "T72x_FRAG_QUADS_EZS_TEST",
+       "T72x_FRAG_QUADS_EZS_KILLED",
+       "T72x_FRAG_THREADS_LZS_TEST",
+       "T72x_FRAG_THREADS_LZS_KILLED",
+       "T72x_FRAG_CYCLES_NO_TILE",
+       "T72x_FRAG_NUM_TILES",
+       "T72x_FRAG_TRANS_ELIM",
+       "T72x_COMPUTE_ACTIVE",
+       "T72x_COMPUTE_TASKS",
+       "T72x_COMPUTE_THREADS",
+       "T72x_TRIPIPE_ACTIVE",
+       "T72x_ARITH_WORDS",
+       "T72x_ARITH_CYCLES_REG",
+       "T72x_LS_WORDS",
+       "T72x_LS_ISSUES",
+       "T72x_LS_RESTARTS",
+       "T72x_LS_REISSUES_MISS",
+       "T72x_TEX_WORDS",
+       "T72x_TEX_BUBBLES",
+       "T72x_TEX_ISSUES",
+       "T72x_LSC_READ_HITS",
+       "T72x_LSC_READ_MISSES",
+       "T72x_LSC_WRITE_HITS",
+       "T72x_LSC_WRITE_MISSES",
+       "T72x_LSC_ATOMIC_HITS",
+       "T72x_LSC_ATOMIC_MISSES",
+       "T72x_LSC_LINE_FETCHES",
+       "T72x_LSC_DIRTY_LINE",
+       "T72x_LSC_SNOOPS",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+
+       /*L2 and MMU */
+       "",
+       "",
+       "",
+       "",
+       "T72x_L2_EXT_WRITE_BEAT",
+       "T72x_L2_EXT_READ_BEAT",
+       "T72x_L2_READ_SNOOP",
+       "T72x_L2_READ_HIT",
+       "T72x_L2_WRITE_SNOOP",
+       "T72x_L2_WRITE_HIT",
+       "T72x_L2_EXT_WRITE_SMALL",
+       "T72x_L2_EXT_BARRIER",
+       "T72x_L2_EXT_AR_STALL",
+       "T72x_L2_EXT_W_STALL",
+       "T72x_L2_SNOOP_FULL",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       ""
+};
+
+static const char * const hardware_counter_names_mali_t76x[] = {
+       /* Job Manager */
+       "",
+       "",
+       "",
+       "",
+       "T76x_MESSAGES_SENT",
+       "T76x_MESSAGES_RECEIVED",
+       "T76x_GPU_ACTIVE",
+       "T76x_IRQ_ACTIVE",
+       "T76x_JS0_JOBS",
+       "T76x_JS0_TASKS",
+       "T76x_JS0_ACTIVE",
+       "",
+       "T76x_JS0_WAIT_READ",
+       "T76x_JS0_WAIT_ISSUE",
+       "T76x_JS0_WAIT_DEPEND",
+       "T76x_JS0_WAIT_FINISH",
+       "T76x_JS1_JOBS",
+       "T76x_JS1_TASKS",
+       "T76x_JS1_ACTIVE",
+       "",
+       "T76x_JS1_WAIT_READ",
+       "T76x_JS1_WAIT_ISSUE",
+       "T76x_JS1_WAIT_DEPEND",
+       "T76x_JS1_WAIT_FINISH",
+       "T76x_JS2_JOBS",
+       "T76x_JS2_TASKS",
+       "T76x_JS2_ACTIVE",
+       "",
+       "T76x_JS2_WAIT_READ",
+       "T76x_JS2_WAIT_ISSUE",
+       "T76x_JS2_WAIT_DEPEND",
+       "T76x_JS2_WAIT_FINISH",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+
+       /*Tiler */
+       "",
+       "",
+       "",
+       "T76x_TI_JOBS_PROCESSED",
+       "T76x_TI_TRIANGLES",
+       "T76x_TI_QUADS",
+       "T76x_TI_POLYGONS",
+       "T76x_TI_POINTS",
+       "T76x_TI_LINES",
+       "T76x_TI_VCACHE_HIT",
+       "T76x_TI_VCACHE_MISS",
+       "T76x_TI_FRONT_FACING",
+       "T76x_TI_BACK_FACING",
+       "T76x_TI_PRIM_VISIBLE",
+       "T76x_TI_PRIM_CULLED",
+       "T76x_TI_PRIM_CLIPPED",
+       "T76x_TI_LEVEL0",
+       "T76x_TI_LEVEL1",
+       "T76x_TI_LEVEL2",
+       "T76x_TI_LEVEL3",
+       "T76x_TI_LEVEL4",
+       "T76x_TI_LEVEL5",
+       "T76x_TI_LEVEL6",
+       "T76x_TI_LEVEL7",
+       "T76x_TI_COMMAND_1",
+       "T76x_TI_COMMAND_2",
+       "T76x_TI_COMMAND_3",
+       "T76x_TI_COMMAND_4",
+       "T76x_TI_COMMAND_5_7",
+       "T76x_TI_COMMAND_8_15",
+       "T76x_TI_COMMAND_16_63",
+       "T76x_TI_COMMAND_64",
+       "T76x_TI_COMPRESS_IN",
+       "T76x_TI_COMPRESS_OUT",
+       "T76x_TI_COMPRESS_FLUSH",
+       "T76x_TI_TIMESTAMPS",
+       "T76x_TI_PCACHE_HIT",
+       "T76x_TI_PCACHE_MISS",
+       "T76x_TI_PCACHE_LINE",
+       "T76x_TI_PCACHE_STALL",
+       "T76x_TI_WRBUF_HIT",
+       "T76x_TI_WRBUF_MISS",
+       "T76x_TI_WRBUF_LINE",
+       "T76x_TI_WRBUF_PARTIAL",
+       "T76x_TI_WRBUF_STALL",
+       "T76x_TI_ACTIVE",
+       "T76x_TI_LOADING_DESC",
+       "T76x_TI_INDEX_WAIT",
+       "T76x_TI_INDEX_RANGE_WAIT",
+       "T76x_TI_VERTEX_WAIT",
+       "T76x_TI_PCACHE_WAIT",
+       "T76x_TI_WRBUF_WAIT",
+       "T76x_TI_BUS_READ",
+       "T76x_TI_BUS_WRITE",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T76x_TI_UTLB_HIT",
+       "T76x_TI_UTLB_NEW_MISS",
+       "T76x_TI_UTLB_REPLAY_FULL",
+       "T76x_TI_UTLB_REPLAY_MISS",
+       "T76x_TI_UTLB_STALL",
+
+       /* Shader Core */
+       "",
+       "",
+       "",
+       "",
+       "T76x_FRAG_ACTIVE",
+       "T76x_FRAG_PRIMITIVES",
+       "T76x_FRAG_PRIMITIVES_DROPPED",
+       "T76x_FRAG_CYCLES_DESC",
+       "T76x_FRAG_CYCLES_FPKQ_ACTIVE",
+       "T76x_FRAG_CYCLES_VERT",
+       "T76x_FRAG_CYCLES_TRISETUP",
+       "T76x_FRAG_CYCLES_EZS_ACTIVE",
+       "T76x_FRAG_THREADS",
+       "T76x_FRAG_DUMMY_THREADS",
+       "T76x_FRAG_QUADS_RAST",
+       "T76x_FRAG_QUADS_EZS_TEST",
+       "T76x_FRAG_QUADS_EZS_KILLED",
+       "T76x_FRAG_THREADS_LZS_TEST",
+       "T76x_FRAG_THREADS_LZS_KILLED",
+       "T76x_FRAG_CYCLES_NO_TILE",
+       "T76x_FRAG_NUM_TILES",
+       "T76x_FRAG_TRANS_ELIM",
+       "T76x_COMPUTE_ACTIVE",
+       "T76x_COMPUTE_TASKS",
+       "T76x_COMPUTE_THREADS",
+       "T76x_COMPUTE_CYCLES_DESC",
+       "T76x_TRIPIPE_ACTIVE",
+       "T76x_ARITH_WORDS",
+       "T76x_ARITH_CYCLES_REG",
+       "T76x_ARITH_CYCLES_L0",
+       "T76x_ARITH_FRAG_DEPEND",
+       "T76x_LS_WORDS",
+       "T76x_LS_ISSUES",
+       "T76x_LS_REISSUE_ATTR",
+       "T76x_LS_REISSUES_VARY",
+       "T76x_LS_VARY_RV_MISS",
+       "T76x_LS_VARY_RV_HIT",
+       "T76x_LS_NO_UNPARK",
+       "T76x_TEX_WORDS",
+       "T76x_TEX_BUBBLES",
+       "T76x_TEX_WORDS_L0",
+       "T76x_TEX_WORDS_DESC",
+       "T76x_TEX_ISSUES",
+       "T76x_TEX_RECIRC_FMISS",
+       "T76x_TEX_RECIRC_DESC",
+       "T76x_TEX_RECIRC_MULTI",
+       "T76x_TEX_RECIRC_PMISS",
+       "T76x_TEX_RECIRC_CONF",
+       "T76x_LSC_READ_HITS",
+       "T76x_LSC_READ_OP",
+       "T76x_LSC_WRITE_HITS",
+       "T76x_LSC_WRITE_OP",
+       "T76x_LSC_ATOMIC_HITS",
+       "T76x_LSC_ATOMIC_OP",
+       "T76x_LSC_LINE_FETCHES",
+       "T76x_LSC_DIRTY_LINE",
+       "T76x_LSC_SNOOPS",
+       "T76x_AXI_TLB_STALL",
+       "T76x_AXI_TLB_MIESS",
+       "T76x_AXI_TLB_TRANSACTION",
+       "T76x_LS_TLB_MISS",
+       "T76x_LS_TLB_HIT",
+       "T76x_AXI_BEATS_READ",
+       "T76x_AXI_BEATS_WRITTEN",
+
+       /*L2 and MMU */
+       "",
+       "",
+       "",
+       "",
+       "T76x_MMU_HIT",
+       "T76x_MMU_NEW_MISS",
+       "T76x_MMU_REPLAY_FULL",
+       "T76x_MMU_REPLAY_MISS",
+       "T76x_MMU_TABLE_WALK",
+       "T76x_MMU_REQUESTS",
+       "",
+       "",
+       "T76x_UTLB_HIT",
+       "T76x_UTLB_NEW_MISS",
+       "T76x_UTLB_REPLAY_FULL",
+       "T76x_UTLB_REPLAY_MISS",
+       "T76x_UTLB_STALL",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "",
+       "T76x_L2_EXT_WRITE_BEATS",
+       "T76x_L2_EXT_READ_BEATS",
+       "T76x_L2_ANY_LOOKUP",
+       "T76x_L2_READ_LOOKUP",
+       "T76x_L2_SREAD_LOOKUP",
+       "T76x_L2_READ_REPLAY",
+       "T76x_L2_READ_SNOOP",
+       "T76x_L2_READ_HIT",
+       "T76x_L2_CLEAN_MISS",
+       "T76x_L2_WRITE_LOOKUP",
+       "T76x_L2_SWRITE_LOOKUP",
+       "T76x_L2_WRITE_REPLAY",
+       "T76x_L2_WRITE_SNOOP",
+       "T76x_L2_WRITE_HIT",
+       "T76x_L2_EXT_READ_FULL",
+       "",
+       "T76x_L2_EXT_WRITE_FULL",
+       "T76x_L2_EXT_R_W_HAZARD",
+       "T76x_L2_EXT_READ",
+       "T76x_L2_EXT_READ_LINE",
+       "T76x_L2_EXT_WRITE",
+       "T76x_L2_EXT_WRITE_LINE",
+       "T76x_L2_EXT_WRITE_SMALL",
+       "T76x_L2_EXT_BARRIER",
+       "T76x_L2_EXT_AR_STALL",
+       "T76x_L2_EXT_R_BUF_FULL",
+       "T76x_L2_EXT_RD_BUF_FULL",
+       "T76x_L2_EXT_R_RAW",
+       "T76x_L2_EXT_W_STALL",
+       "T76x_L2_EXT_W_BUF_FULL",
+       "T76x_L2_EXT_R_BUF_FULL",
+       "T76x_L2_TAG_HAZARD",
+       "T76x_L2_SNOOP_FULL",
+       "T76x_L2_REPLAY_FULL"
+};
+
+#endif
index 1338b6b61073f7225b16379bc3ecffec6a0bd423..45d0ce53e44ef3f9393d91153ea1ed2109545ed4 100755 (executable)
@@ -17,6 +17,7 @@
 
 #include <mali_kbase_gpu_memory_debugfs.h>
 
+#ifdef CONFIG_DEBUG_FS
 /** Show callback for the @c gpu_memory debugfs file.
  *
  * This function is called to get the contents of the @c gpu_memory debugfs
@@ -37,7 +38,7 @@ static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
        kbdev_list = kbase_dev_list_get();
        list_for_each(entry, kbdev_list) {
                struct kbase_device *kbdev = NULL;
-               kbasep_kctx_list_element *element;
+               struct kbasep_kctx_list_element *element;
 
                kbdev = list_entry(entry, struct kbase_device, entry);
                /* output the total memory usage and cap for this device */
@@ -80,7 +81,7 @@ static const struct file_operations kbasep_gpu_memory_debugfs_fops = {
 /*
  *  Initialize debugfs entry for gpu_memory
  */
-mali_error kbasep_gpu_memory_debugfs_init(kbase_device *kbdev)
+mali_error kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
 {
        kbdev->gpu_memory_dentry = debugfs_create_file("gpu_memory", \
                                        S_IRUGO, \
@@ -96,8 +97,19 @@ mali_error kbasep_gpu_memory_debugfs_init(kbase_device *kbdev)
 /*
  *  Terminate debugfs entry for gpu_memory
  */
-void kbasep_gpu_memory_debugfs_term(kbase_device *kbdev)
+void kbasep_gpu_memory_debugfs_term(struct kbase_device *kbdev)
 {
        debugfs_remove(kbdev->gpu_memory_dentry);
 }
-
+#else
+/*
+ * Stub functions for when debugfs is disabled
+ */
+mali_error kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev)
+{
+       return MALI_ERROR_NONE;
+}
+void kbasep_gpu_memory_debugfs_term(struct kbase_device *kbdev)
+{
+}
+#endif
index b53a9ffdedf902fefcd5a7d98b23d92906e0b3e4..7ceb380a2653aa94c492b5b9e4f3bff91429ef99 100755 (executable)
 /**
  * @brief Initialize gpu_memory debugfs entry
  */
-mali_error kbasep_gpu_memory_debugfs_init(kbase_device *kbdev);
+mali_error kbasep_gpu_memory_debugfs_init(struct kbase_device *kbdev);
 
 /**
  * @brief Terminate gpu_memory debugfs entry
  */
-void kbasep_gpu_memory_debugfs_term(kbase_device *kbdev);
+void kbasep_gpu_memory_debugfs_term(struct kbase_device *kbdev);
 
 #endif  /*_KBASE_GPU_MEMORY_H*/
index 0fbefea11af801120b0e59e2b455210c8e827c4a..2d2ff64b90cfcc18f1b881909d5e1cc843117d07 100755 (executable)
@@ -41,7 +41,7 @@
 #define KBASE_UBFX32(value, offset, size) \
        (((u32)(value) >> (u32)(offset)) & (u32)((1ULL << (u32)(size)) - 1))
 
-mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops * const kbase_props)
+mali_error kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props)
 {
        kbase_gpuprops_clock_speed_function get_gpu_speed_mhz;
        u32 gpu_speed_mhz;
@@ -52,7 +52,7 @@ mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops *
 
        /* Current GPU speed is requested from the system integrator via the KBASE_CONFIG_ATTR_GPU_SPEED_FUNC function.
         * If that function fails, or the function is not provided by the system integrator, we report the maximum
-        * GPU speed as specified by KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX.
+        * GPU speed as specified by GPU_FREQ_KHZ_MAX.
         */
        get_gpu_speed_mhz = (kbase_gpuprops_clock_speed_function) kbasep_get_config_value(kctx->kbdev, kctx->kbdev->config_attributes, KBASE_CONFIG_ATTR_GPU_SPEED_FUNC);
        if (get_gpu_speed_mhz != NULL) {
@@ -76,7 +76,7 @@ mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops *
        return MALI_ERROR_NONE;
 }
 
-STATIC void kbase_gpuprops_dump_registers(kbase_device *kbdev, kbase_gpuprops_regdump *regdump)
+STATIC void kbase_gpuprops_dump_registers(struct kbase_device *kbdev, struct kbase_gpuprops_regdump *regdump)
 {
        int i;
 
@@ -195,11 +195,11 @@ STATIC void kbase_gpuprops_construct_coherent_groups(base_gpu_props * const prop
  * Only the raw properties are filled in this function
  *
  * @param gpu_props  The base_gpu_props structure
- * @param kbdev      The kbase_device structure for the device
+ * @param kbdev      The struct kbase_device structure for the device
  */
-static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, kbase_device *kbdev)
+static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
 {
-       kbase_gpuprops_regdump regdump;
+       struct kbase_gpuprops_regdump regdump;
        int i;
 
        KBASE_DEBUG_ASSERT(NULL != kbdev);
@@ -207,7 +207,6 @@ static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, kbase_dev
 
        /* Dump relevant registers */
        kbase_gpuprops_dump_registers(kbdev, &regdump);
-
        gpu_props->raw_props.gpu_id = regdump.gpu_id;
        gpu_props->raw_props.tiler_features = regdump.tiler_features;
        gpu_props->raw_props.mem_features = regdump.mem_features;
@@ -240,9 +239,9 @@ static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, kbase_dev
  * Fill the base_gpu_props structure with values derived from the GPU configuration registers
  *
  * @param gpu_props  The base_gpu_props structure
- * @param kbdev      The kbase_device structure for the device
+ * @param kbdev      The struct kbase_device structure for the device
  */
-static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, kbase_device *kbdev)
+static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, struct kbase_device *kbdev)
 {
        int i;
 
@@ -300,7 +299,7 @@ static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, kba
        kbase_gpuprops_construct_coherent_groups(gpu_props);
 }
 
-void kbase_gpuprops_set(kbase_device *kbdev)
+void kbase_gpuprops_set(struct kbase_device *kbdev)
 {
        kbase_gpu_props *gpu_props;
        struct midg_raw_gpu_props *raw;
index 835c87fe89d4eb85973d0a27d1b852c070ae0161..fe2676cd9b998089cf49f051dec2702519a4ab95 100755 (executable)
@@ -35,20 +35,20 @@ struct kbase_device;
  *
  * Set up Kbase GPU properties with information from the GPU registers
  *
- * @param kbdev                The kbase_device structure for the device
+ * @param kbdev                The struct kbase_device structure for the device
  */
 void kbase_gpuprops_set(struct kbase_device *kbdev);
 
 /**
  * @brief Provide GPU properties to userside through UKU call.
  *
- * Fill the kbase_uk_gpuprops with values from GPU configuration registers.
+ * Fill the struct kbase_uk_gpuprops with values from GPU configuration registers.
  *
- * @param kctx         The kbase_context structure
- * @param kbase_props  A copy of the kbase_uk_gpuprops structure from userspace
+ * @param kctx         The struct kbase_context structure
+ * @param kbase_props  A copy of the struct kbase_uk_gpuprops structure from userspace
  *
  * @return MALI_ERROR_NONE on success. Any other value indicates failure.
  */
-mali_error kbase_gpuprops_uk_get_props(kbase_context *kctx, kbase_uk_gpuprops * const kbase_props);
+mali_error kbase_gpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_gpuprops * const kbase_props);
 
 #endif                         /* _KBASE_GPUPROPS_H_ */
index 8793e0aff6642a5dcd3f5ea94de34daa64558133..6ae5a20d5d4e9b90d05464a335e41ef2fabf7779 100755 (executable)
@@ -39,27 +39,20 @@ typedef struct kbase_gpuprops_regdump {
        u32 mmu_features;
        u32 as_present;
        u32 js_present;
-
-       u32 js_features[MIDG_MAX_JOB_SLOTS];
-
+       u32 thread_max_threads;
+       u32 thread_max_workgroup_size;
+       u32 thread_max_barrier_size;
+       u32 thread_features;
        u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
-
+       u32 js_features[MIDG_MAX_JOB_SLOTS];
        u32 shader_present_lo;
        u32 shader_present_hi;
-
        u32 tiler_present_lo;
        u32 tiler_present_hi;
-
        u32 l2_present_lo;
        u32 l2_present_hi;
-
        u32 l3_present_lo;
        u32 l3_present_hi;
-
-       u32 thread_max_threads;
-       u32 thread_max_workgroup_size;
-       u32 thread_max_barrier_size;
-       u32 thread_features;
 } kbase_gpuprops_regdump;
 
 typedef struct kbase_gpu_cache_props {
@@ -85,11 +78,11 @@ typedef struct mali_kbase_gpu_props {
        u8 num_address_spaces;
        u8 num_job_slots;
 
-       kbase_gpu_cache_props l2_props;
-       kbase_gpu_cache_props l3_props;
+       struct kbase_gpu_cache_props l2_props;
+       struct kbase_gpu_cache_props l3_props;
 
-       kbase_gpu_mem_props mem;
-       kbase_gpu_mmu_props mmu;
+       struct kbase_gpu_mem_props mem;
+       struct kbase_gpu_mmu_props mmu;
 
        /**
         * Implementation specific irq throttle value (us), should be adjusted during integration.
index 872d24bc187c333b5280592708b0f29100e29131..3a4ea27926b8b30bfe28e160bc31ed747918e3c0 100755 (executable)
@@ -27,9 +27,9 @@
 #include "mali_kbase.h"
 #include "mali_kbase_hw.h"
 
-void kbase_hw_set_features_mask(kbase_device *kbdev)
+void kbase_hw_set_features_mask(struct kbase_device *kbdev)
 {
-       const base_hw_feature *features;
+       const enum base_hw_feature *features;
        u32 gpu_id;
 
        gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
@@ -42,8 +42,20 @@ void kbase_hw_set_features_mask(kbase_device *kbdev)
        case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1):
        case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1):
        case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0):
+#ifdef MALI_INCLUDE_TFRX
+       case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 0, 0):
+       case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 0, 1):
+#endif /* MALI_INCLUDE_TFRX */
+#ifdef MALI_INCLUDE_TF2X
+       case GPU_ID_MAKE(GPU_ID_PI_TF2X, 0, 0, 1):
+#endif /* MALI_INCLUDE_TF2X */
                features = base_hw_features_t76x;
                break;
+       case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 0):
+       case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 1):
+       case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0):
+               features = base_hw_features_t72x;
+               break;
        default:
                features = base_hw_features_generic;
                break;
@@ -53,9 +65,9 @@ void kbase_hw_set_features_mask(kbase_device *kbdev)
                set_bit(*features, &kbdev->hw_features_mask[0]);
 }
 
-mali_error kbase_hw_set_issues_mask(kbase_device *kbdev)
+mali_error kbase_hw_set_issues_mask(struct kbase_device *kbdev)
 {
-       const base_hw_issue *issues;
+       const enum base_hw_issue *issues;
        u32 gpu_id;
        u32 impl_tech;
 
@@ -83,6 +95,9 @@ mali_error kbase_hw_set_issues_mask(kbase_device *kbdev)
                case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 1, 0):
                        issues = base_hw_issues_t62x_r1p1;
                        break;
+               case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 0):
+                       issues = base_hw_issues_t76x_r0p0_beta;
+                       break;
                case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 1):
                        issues = base_hw_issues_t76x_r0p0;
                        break;
@@ -109,6 +124,20 @@ mali_error kbase_hw_set_issues_mask(kbase_device *kbdev)
                case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0):
                        issues = base_hw_issues_t72x_r1p0;
                        break;
+               case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 1, 0):
+                       issues = base_hw_issues_t72x_r1p1;
+                       break;
+#ifdef MALI_INCLUDE_TFRX
+               case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 0, 0):
+               case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 0, 1):
+                       issues = base_hw_issues_tFRx_r0p0;
+                       break;
+#endif /* MALI_INCLUDE_TFRX */
+#ifdef MALI_INCLUDE_TF2X
+               case GPU_ID_MAKE(GPU_ID_PI_TF2X, 0, 0, 1):
+                       issues = base_hw_issues_tF2x_r0p0;
+                       break;
+#endif /* MALI_INCLUDE_TF2X */
                default:
                        dev_err(kbdev->dev, "Unknown GPU ID %x", gpu_id);
                        return MALI_ERROR_FUNCTION_FAILED;
@@ -126,7 +155,16 @@ mali_error kbase_hw_set_issues_mask(kbase_device *kbdev)
                case GPU_ID_PI_T76X:
                        issues = base_hw_issues_model_t7xx;
                        break;
-
+#ifdef MALI_INCLUDE_TFRX
+               case GPU_ID_PI_TFRX:
+                       issues = base_hw_issues_model_tFRx;
+                       break;
+#endif /* MALI_INCLUDE_TFRX */
+#ifdef MALI_INCLUDE_TF2X
+               case GPU_ID_PI_TF2X:
+                       issues = base_hw_issues_model_tF2x;
+                       break;
+#endif /* MALI_INCLUDE_TF2X */
                default:
                        dev_err(kbdev->dev, "Unknown GPU ID %x", gpu_id);
                        return MALI_ERROR_FUNCTION_FAILED;
index 4501af74f1c1ddf38c16c2a740b021647bef92ea..1074290702ae455e3d7c080e21421f3951ba29c5 100755 (executable)
 /**
  * @brief Set the HW issues mask depending on the GPU ID
  */
-mali_error kbase_hw_set_issues_mask(kbase_device *kbdev);
+mali_error kbase_hw_set_issues_mask(struct kbase_device *kbdev);
 
 /**
  * @brief Set the features mask depending on the GPU ID
  */
-void kbase_hw_set_features_mask(kbase_device *kbdev);
+void kbase_hw_set_features_mask(struct kbase_device *kbdev);
 
 #endif                         /* _KBASE_HW_H_ */
index 0b9f355202501c91228cbb710734db51ec1a9caa..a5cdba6fa6bd655039830904ce22f50f13aa7bef 100755 (executable)
@@ -28,7 +28,7 @@
 /**
  * @brief Issue Cache Clean & Invalidate command to hardware
  */
-static void kbasep_instr_hwcnt_cacheclean(kbase_device *kbdev)
+static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev)
 {
        unsigned long flags;
        unsigned long pm_flags;
@@ -41,7 +41,7 @@ static void kbasep_instr_hwcnt_cacheclean(kbase_device *kbdev)
        while (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
                spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
                wait_event(kbdev->hwcnt.cache_clean_wait,
-                          kbdev->hwcnt.state != KBASE_INSTR_STATE_RESETTING);
+                               kbdev->hwcnt.state != KBASE_INSTR_STATE_RESETTING);
                spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
        }
        KBASE_DEBUG_ASSERT(kbdev->hwcnt.state == KBASE_INSTR_STATE_REQUEST_CLEAN);
@@ -60,11 +60,11 @@ static void kbasep_instr_hwcnt_cacheclean(kbase_device *kbdev)
        spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 }
 
-STATIC mali_error kbase_instr_hwcnt_enable_internal(kbase_device *kbdev, kbase_context *kctx, kbase_uk_hwcnt_setup *setup)
+STATIC mali_error kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup)
 {
        unsigned long flags, pm_flags;
        mali_error err = MALI_ERROR_FUNCTION_FAILED;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        u32 irq_mask;
        int ret;
        u64 shader_cores_needed;
@@ -159,7 +159,7 @@ STATIC mali_error kbase_instr_hwcnt_enable_internal(kbase_device *kbdev, kbase_c
        /* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump */
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
                kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), setup->tiler_bm, kctx);
-       
+
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
        if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) {
@@ -191,10 +191,11 @@ STATIC mali_error kbase_instr_hwcnt_enable_internal(kbase_device *kbdev, kbase_c
  *
  * Note: will wait for a cache clean to complete
  */
-mali_error kbase_instr_hwcnt_enable(kbase_context *kctx, kbase_uk_hwcnt_setup *setup)
+mali_error kbase_instr_hwcnt_enable(struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup)
 {
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        mali_bool access_allowed;
+
        kbdev = kctx->kbdev;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
@@ -212,12 +213,12 @@ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_enable)
  *
  * Note: might sleep, waiting for an ongoing dump to complete
  */
-mali_error kbase_instr_hwcnt_disable(kbase_context *kctx)
+mali_error kbase_instr_hwcnt_disable(struct kbase_context *kctx)
 {
        unsigned long flags, pm_flags;
        mali_error err = MALI_ERROR_FUNCTION_FAILED;
        u32 irq_mask;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
        kbdev = kctx->kbdev;
@@ -245,7 +246,6 @@ mali_error kbase_instr_hwcnt_disable(kbase_context *kctx)
 
                /* Ongoing dump/setup - wait for its completion */
                wait_event(kbdev->hwcnt.wait, kbdev->hwcnt.triggered != 0);
-
        }
 
        kbdev->hwcnt.state = KBASE_INSTR_STATE_DISABLED;
@@ -287,10 +287,10 @@ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_disable)
 /**
  * @brief Configure HW counters collection
  */
-mali_error kbase_instr_hwcnt_setup(kbase_context *kctx, kbase_uk_hwcnt_setup *setup)
+mali_error kbase_instr_hwcnt_setup(struct kbase_context *kctx, struct kbase_uk_hwcnt_setup *setup)
 {
        mali_error err = MALI_ERROR_FUNCTION_FAILED;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
 
@@ -320,11 +320,11 @@ mali_error kbase_instr_hwcnt_setup(kbase_context *kctx, kbase_uk_hwcnt_setup *se
  * Notes:
  * - does not sleep
  */
-mali_error kbase_instr_hwcnt_dump_irq(kbase_context *kctx)
+mali_error kbase_instr_hwcnt_dump_irq(struct kbase_context *kctx)
 {
        unsigned long flags;
        mali_error err = MALI_ERROR_FUNCTION_FAILED;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
        kbdev = kctx->kbdev;
@@ -373,11 +373,11 @@ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_irq)
  * - success will be set to MALI_TRUE if the dump succeeded or
  *   MALI_FALSE on failure
  */
-mali_bool kbase_instr_hwcnt_dump_complete(kbase_context *kctx, mali_bool * const success)
+mali_bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx, mali_bool * const success)
 {
        unsigned long flags;
        mali_bool complete = MALI_FALSE;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
        kbdev = kctx->kbdev;
@@ -404,11 +404,11 @@ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete)
 /**
  * @brief Issue Dump command to hardware and wait for completion
  */
-mali_error kbase_instr_hwcnt_dump(kbase_context *kctx)
+mali_error kbase_instr_hwcnt_dump(struct kbase_context *kctx)
 {
        unsigned long flags;
        mali_error err = MALI_ERROR_FUNCTION_FAILED;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
        kbdev = kctx->kbdev;
@@ -450,11 +450,11 @@ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump)
 /**
  * @brief Clear the HW counters
  */
-mali_error kbase_instr_hwcnt_clear(kbase_context *kctx)
+mali_error kbase_instr_hwcnt_clear(struct kbase_context *kctx)
 {
        unsigned long flags;
        mali_error err = MALI_ERROR_FUNCTION_FAILED;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
        kbdev = kctx->kbdev;
@@ -490,18 +490,18 @@ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear)
  */
 void kbasep_cache_clean_worker(struct work_struct *data)
 {
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        unsigned long flags;
 
-       kbdev = container_of(data, kbase_device, hwcnt.cache_clean_work);
+       kbdev = container_of(data, struct kbase_device, hwcnt.cache_clean_work);
 
        mutex_lock(&kbdev->cacheclean_lock);
        kbasep_instr_hwcnt_cacheclean(kbdev);
 
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
        /* Wait for our condition, and any reset to complete */
-       while (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING
-                  || kbdev->hwcnt.state == KBASE_INSTR_STATE_CLEANING) {
+       while (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING ||
+                       kbdev->hwcnt.state == KBASE_INSTR_STATE_CLEANING) {
                spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
                wait_event(kbdev->hwcnt.cache_clean_wait,
                           (kbdev->hwcnt.state != KBASE_INSTR_STATE_RESETTING
@@ -522,9 +522,10 @@ void kbasep_cache_clean_worker(struct work_struct *data)
 /**
  * @brief Dump complete interrupt received
  */
-void kbase_instr_hwcnt_sample_done(kbase_device *kbdev)
+void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
 {
        unsigned long flags;
+
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
        if (kbdev->hwcnt.state == KBASE_INSTR_STATE_FAULT) {
@@ -547,7 +548,7 @@ void kbase_instr_hwcnt_sample_done(kbase_device *kbdev)
 /**
  * @brief Cache clean interrupt received
  */
-void kbase_clean_caches_done(kbase_device *kbdev)
+void kbase_clean_caches_done(struct kbase_device *kbdev)
 {
        u32 irq_mask;
 
@@ -580,9 +581,10 @@ void kbase_clean_caches_done(kbase_device *kbdev)
  * It's assumed that there's only one privileged context
  * Safe to do this without lock when doing an OS suspend, because it only
  * changes in response to user-space IOCTLs */
-void kbase_instr_hwcnt_suspend(kbase_device *kbdev)
+void kbase_instr_hwcnt_suspend(struct kbase_device *kbdev)
 {
-       kbase_context *kctx;
+       struct kbase_context *kctx;
+
        KBASE_DEBUG_ASSERT(kbdev);
        KBASE_DEBUG_ASSERT(!kbdev->hwcnt.suspended_kctx);
 
@@ -592,23 +594,22 @@ void kbase_instr_hwcnt_suspend(kbase_device *kbdev)
        /* Relevant state was saved into hwcnt.suspended_state when enabling the
         * counters */
 
-       if (kctx)
-       {
+       if (kctx) {
                KBASE_DEBUG_ASSERT(kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED);
                kbase_instr_hwcnt_disable(kctx);
        }
 }
 
-void kbase_instr_hwcnt_resume(kbase_device *kbdev)
+void kbase_instr_hwcnt_resume(struct kbase_device *kbdev)
 {
-       kbase_context *kctx;
+       struct kbase_context *kctx;
+
        KBASE_DEBUG_ASSERT(kbdev);
 
        kctx = kbdev->hwcnt.suspended_kctx;
        kbdev->hwcnt.suspended_kctx = NULL;
 
-       if (kctx)
-       {
+       if (kctx) {
                mali_error err;
                err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &kbdev->hwcnt.suspended_state);
                WARN(err != MALI_ERROR_NONE,
index f5ddcf4219a7314091aabc61d227404774958d56..c61050ad6a6566a34f7b37410a0e8520230ae676 100755 (executable)
@@ -32,7 +32,7 @@
 #endif                         /* CONFIG_UMP */
 #include <linux/random.h>
 
-#define beenthere(kctx,f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
+#define beenthere(kctx, f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
 /* random32 was renamed to prandom_u32 in 3.8 */
@@ -49,7 +49,7 @@
  * - to the event subsystem (signals the completion/failure of bag/job-chains).
  */
 
-static void *get_compat_pointer(const kbase_pointer *p)
+static void __user *get_compat_pointer(const union kbase_pointer *p)
 {
 #ifdef CONFIG_COMPAT
        if (is_compat_task())
@@ -66,9 +66,10 @@ static void *get_compat_pointer(const kbase_pointer *p)
  * Note that the caller must also check the atom status and
  * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
  */
-static int jd_run_atom(kbase_jd_atom *katom)
+static int jd_run_atom(struct kbase_jd_atom *katom)
 {
-       kbase_context *kctx = katom->kctx;
+       struct kbase_context *kctx = katom->kctx;
+
        KBASE_DEBUG_ASSERT(katom->status != KBASE_JD_ATOM_STATE_UNUSED);
 
        if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP) {
@@ -79,14 +80,8 @@ static int jd_run_atom(kbase_jd_atom *katom)
                /* Soft-job */
                if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE)
                                                  == BASE_JD_REQ_SOFT_REPLAY) {
-                       int status = kbase_replay_process(katom);
-
-                       if ((status & MALI_REPLAY_STATUS_MASK)
-                                              == MALI_REPLAY_STATUS_REPLAYING)
-                               return status & MALI_REPLAY_FLAG_JS_RESCHED;
-                       else
+                       if (!kbase_replay_process(katom))
                                katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
-                       return 0;
                } else if (kbase_process_soft_job(katom) == 0) {
                        kbase_finish_soft_job(katom);
                        katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
@@ -108,9 +103,10 @@ static int jd_run_atom(kbase_jd_atom *katom)
  * Atoms must be added to the waiting list after a successful call to kds_async_waitall.
  * The caller must hold the kbase_jd_context.lock */
 
-static void kbase_jd_kds_waiters_add(kbase_jd_atom *katom)
+static void kbase_jd_kds_waiters_add(struct kbase_jd_atom *katom)
 {
-       kbase_context *kctx;
+       struct kbase_context *kctx;
+
        KBASE_DEBUG_ASSERT(katom);
 
        kctx = katom->kctx;
@@ -123,7 +119,7 @@ static void kbase_jd_kds_waiters_add(kbase_jd_atom *katom)
  * The supplied katom must first have been added to the list with a call to kbase_jd_kds_waiters_add.
  * The caller must hold the kbase_jd_context.lock */
 
-static void kbase_jd_kds_waiters_remove(kbase_jd_atom *katom)
+static void kbase_jd_kds_waiters_remove(struct kbase_jd_atom *katom)
 {
        KBASE_DEBUG_ASSERT(katom);
        list_del(&katom->node);
@@ -131,11 +127,11 @@ static void kbase_jd_kds_waiters_remove(kbase_jd_atom *katom)
 
 static void kds_dep_clear(void *callback_parameter, void *callback_extra_parameter)
 {
-       kbase_jd_atom *katom;
-       kbase_jd_context *ctx;
-       kbase_device *kbdev;
+       struct kbase_jd_atom *katom;
+       struct kbase_jd_context *ctx;
+       struct kbase_device *kbdev;
 
-       katom = (kbase_jd_atom *) callback_parameter;
+       katom = (struct kbase_jd_atom *)callback_parameter;
        KBASE_DEBUG_ASSERT(katom);
        ctx = &katom->kctx->jctx;
        kbdev = katom->kctx->kbdev;
@@ -151,9 +147,11 @@ static void kds_dep_clear(void *callback_parameter, void *callback_extra_paramet
        katom->kds_dep_satisfied = MALI_TRUE;
 
        /* Check whether the atom's other dependencies were already met */
-       if (!kbase_jd_katom_dep_atom(&katom->dep[0]) && !kbase_jd_katom_dep_atom(&katom->dep[1])) {
+       if (!kbase_jd_katom_dep_atom(&katom->dep[0]) &&
+                       !kbase_jd_katom_dep_atom(&katom->dep[1])) {
                /* katom dep complete, attempt to run it */
                mali_bool resched = MALI_FALSE;
+
                resched = jd_run_atom(katom);
 
                if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
@@ -168,25 +166,24 @@ static void kds_dep_clear(void *callback_parameter, void *callback_extra_paramet
        mutex_unlock(&ctx->lock);
 }
 
-void kbase_cancel_kds_wait_job(kbase_jd_atom *katom)
+static void kbase_cancel_kds_wait_job(struct kbase_jd_atom *katom)
 {
        KBASE_DEBUG_ASSERT(katom);
 
        /* Prevent job_done_nolock from being called twice on an atom when
         *  there is a race between job completion and cancellation */
 
-       if ( katom->status == KBASE_JD_ATOM_STATE_QUEUED ) {
+       if (katom->status == KBASE_JD_ATOM_STATE_QUEUED) {
                /* Wait was cancelled - zap the atom */
                katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
-               if (jd_done_nolock(katom)) {
-                       kbasep_js_try_schedule_head_ctx( katom->kctx->kbdev );
-               }
+               if (jd_done_nolock(katom))
+                       kbasep_js_try_schedule_head_ctx(katom->kctx->kbdev);
        }
 }
 #endif                         /* CONFIG_KDS */
 
 #ifdef CONFIG_DMA_SHARED_BUFFER
-static mali_error kbase_jd_umm_map(kbase_context *kctx, struct kbase_va_region *reg)
+static mali_error kbase_jd_umm_map(struct kbase_context *kctx, struct kbase_va_region *reg)
 {
        struct sg_table *sgt;
        struct scatterlist *s;
@@ -228,8 +225,8 @@ static mali_error kbase_jd_umm_map(kbase_context *kctx, struct kbase_va_region *
        }
 
        if (WARN_ONCE(count < reg->nr_pages,
-                                 "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
-                                 reg->alloc->imported.umm.dma_buf->size)) {
+                       "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
+                       reg->alloc->imported.umm.dma_buf->size)) {
                err = MALI_ERROR_FUNCTION_FAILED;
                goto out;
        }
@@ -248,24 +245,24 @@ out:
        return err;
 }
 
-static void kbase_jd_umm_unmap(kbase_context *kctx, struct kbase_mem_phy_alloc *alloc)
+static void kbase_jd_umm_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc)
 {
        KBASE_DEBUG_ASSERT(kctx);
        KBASE_DEBUG_ASSERT(alloc);
        KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
        KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
        dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
-                       alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+           alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
        alloc->imported.umm.sgt = NULL;
        alloc->nents = 0;
 }
 #endif                         /* CONFIG_DMA_SHARED_BUFFER */
 
-void kbase_jd_free_external_resources(kbase_jd_atom *katom)
+void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
 {
 #ifdef CONFIG_KDS
        if (katom->kds_rset) {
-               kbase_jd_context * jctx = &katom->kctx->jctx;
+               struct kbase_jd_context *jctx = &katom->kctx->jctx;
 
                /*
                 * As the atom is no longer waiting, remove it from
@@ -273,7 +270,7 @@ void kbase_jd_free_external_resources(kbase_jd_atom *katom)
                 */
 
                mutex_lock(&jctx->lock);
-               kbase_jd_kds_waiters_remove( katom );
+               kbase_jd_kds_waiters_remove(katom);
                mutex_unlock(&jctx->lock);
 
                /* Release the kds resource or cancel if zapping */
@@ -282,7 +279,7 @@ void kbase_jd_free_external_resources(kbase_jd_atom *katom)
 #endif                         /* CONFIG_KDS */
 }
 
-static void kbase_jd_post_external_resources(kbase_jd_atom *katom)
+static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
 {
        KBASE_DEBUG_ASSERT(katom);
        KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
@@ -297,6 +294,7 @@ static void kbase_jd_post_external_resources(kbase_jd_atom *katom)
        /* only roll back if extres is non-NULL */
        if (katom->extres) {
                u32 res_no;
+
                res_no = katom->nr_extres;
                while (res_no-- > 0) {
                        struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
@@ -306,13 +304,16 @@ static void kbase_jd_post_external_resources(kbase_jd_atom *katom)
 
                                if (0 == alloc->imported.umm.current_mapping_usage_count) {
                                        struct kbase_va_region *reg;
+
                                        reg = kbase_region_tracker_find_region_base_address(
-                                                 katom->kctx, katom->extres[res_no].gpu_address);
+                                                       katom->kctx,
+                                                       katom->extres[res_no].gpu_address);
 
-                                       if (reg && reg->alloc == alloc) {
-                                               kbase_mmu_teardown_pages(katom->kctx, reg->start_pfn,
-                                                   kbase_reg_current_backed_size(reg));
-                                       }
+                                       if (reg && reg->alloc == alloc)
+                                               kbase_mmu_teardown_pages(
+                                                               katom->kctx,
+                                                               reg->start_pfn,
+                                                               kbase_reg_current_backed_size(reg));
 
                                        kbase_jd_umm_unmap(katom->kctx, alloc);
                                }
@@ -350,7 +351,7 @@ static void add_kds_resource(struct kds_resource *kds_res, struct kds_resource *
  * jctx.lock must be held when this is called.
  */
 
-static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const base_jd_atom_v2 *user_atom)
+static mali_error kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom_v2 *user_atom)
 {
        mali_error err_ret_val = MALI_ERROR_FUNCTION_FAILED;
        u32 res_no;
@@ -359,7 +360,7 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
        struct kds_resource **kds_resources = NULL;
        unsigned long *kds_access_bitmap = NULL;
 #endif                         /* CONFIG_KDS */
-       struct base_external_resource * input_extres;
+       struct base_external_resource *input_extres;
 
        KBASE_DEBUG_ASSERT(katom);
        KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
@@ -368,7 +369,7 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
        if (!katom->nr_extres)
                return MALI_ERROR_FUNCTION_FAILED;
 
-       katom->extres = kmalloc(sizeof(*katom->extres) * katom->nr_extres, GFP_KERNEL);
+       katom->extres = kmalloc_array(katom->nr_extres, sizeof(*katom->extres), GFP_KERNEL);
        if (NULL == katom->extres) {
                err_ret_val = MALI_ERROR_OUT_OF_MEMORY;
                goto early_err_out;
@@ -378,16 +379,21 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
         * Make sure the struct sizes haven't changed in a way
         * we don't support */
        BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres));
-       input_extres = (struct base_external_resource*)(((unsigned char *)katom->extres) + (sizeof(*katom->extres) - sizeof(*input_extres)) * katom->nr_extres);
-
-       if (copy_from_user(input_extres, get_compat_pointer(&user_atom->extres_list), sizeof(*input_extres) * katom->nr_extres) != 0) {
+       input_extres = (struct base_external_resource *)
+                       (((unsigned char *)katom->extres) +
+                       (sizeof(*katom->extres) - sizeof(*input_extres)) *
+                       katom->nr_extres);
+
+       if (copy_from_user(input_extres,
+                       get_compat_pointer(&user_atom->extres_list),
+                       sizeof(*input_extres) * katom->nr_extres) != 0) {
                err_ret_val = MALI_ERROR_FUNCTION_FAILED;
                goto early_err_out;
        }
 #ifdef CONFIG_KDS
        /* assume we have to wait for all */
        KBASE_DEBUG_ASSERT(0 != katom->nr_extres);
-       kds_resources = kmalloc(sizeof(struct kds_resource *) * katom->nr_extres, GFP_KERNEL);
+       kds_resources = kmalloc_array(katom->nr_extres, sizeof(struct kds_resource *), GFP_KERNEL);
 
        if (NULL == kds_resources) {
                err_ret_val = MALI_ERROR_OUT_OF_MEMORY;
@@ -406,11 +412,12 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
        /* need to keep the GPU VM locked while we set up UMM buffers */
        kbase_gpu_vm_lock(katom->kctx);
        for (res_no = 0; res_no < katom->nr_extres; res_no++) {
-               base_external_resource *res;
-               kbase_va_region *reg;
+               struct base_external_resource *res;
+               struct kbase_va_region *reg;
 
                res = &input_extres[res_no];
-               reg = kbase_region_tracker_find_region_enclosing_address(katom->kctx, res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+               reg = kbase_region_tracker_find_region_enclosing_address(katom->kctx,
+                               res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
                /* did we find a matching region object? */
                if (NULL == reg || (reg->flags & KBASE_REG_FREE)) {
                        /* roll back */
@@ -423,9 +430,12 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
                        {
 #if defined(CONFIG_KDS) && defined(CONFIG_UMP)
                                struct kds_resource *kds_res;
+
                                kds_res = ump_dd_kds_resource_get(reg->alloc->imported.ump_handle);
                                if (kds_res)
-                                       add_kds_resource(kds_res, kds_resources, &kds_res_count, kds_access_bitmap, res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE);
+                                       add_kds_resource(kds_res, kds_resources, &kds_res_count,
+                                                       kds_access_bitmap,
+                                                       res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE);
 #endif                         /*defined(CONFIG_KDS) && defined(CONFIG_UMP) */
                                break;
                        }
@@ -434,6 +444,7 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
                        {
 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
                                struct kds_resource *kds_res;
+
                                kds_res = get_dma_buf_kds_resource(reg->alloc->imported.umm.dma_buf);
                                if (kds_res)
                                        add_kds_resource(kds_res, kds_resources, &kds_res_count, kds_access_bitmap, res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE);
@@ -475,21 +486,19 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
 #ifdef CONFIG_KDS
        if (kds_res_count) {
                int wait_failed;
+
                /* We have resources to wait for with kds */
                katom->kds_dep_satisfied = MALI_FALSE;
 
                wait_failed = kds_async_waitall(&katom->kds_rset,
-                                                                               &katom->kctx->jctx.kds_cb,
-                                                                               katom,
-                                                                               NULL,
-                                                                               kds_res_count,
-                                                                               kds_access_bitmap,
-                                                                               kds_resources);
-               if (wait_failed) {
+                               &katom->kctx->jctx.kds_cb, katom, NULL,
+                               kds_res_count, kds_access_bitmap,
+                               kds_resources);
+
+               if (wait_failed)
                        goto failed_kds_setup;
-               } else {
-                       kbase_jd_kds_waiters_add( katom );
-               }
+               else
+                       kbase_jd_kds_waiters_add(katom);
        } else {
                /* Nothing to wait for, so kds dep met */
                katom->kds_dep_satisfied = MALI_TRUE;
@@ -520,13 +529,15 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
 
                        if (0 == alloc->imported.umm.current_mapping_usage_count) {
                                struct kbase_va_region *reg;
+
                                reg = kbase_region_tracker_find_region_base_address(
-                                         katom->kctx, katom->extres[res_no].gpu_address);
+                                               katom->kctx,
+                                               katom->extres[res_no].gpu_address);
 
-                               if (reg && reg->alloc == alloc) {
-                                       kbase_mmu_teardown_pages(katom->kctx, reg->start_pfn,
-                                           kbase_reg_current_backed_size(reg));
-                               }
+                               if (reg && reg->alloc == alloc) 
+                                       kbase_mmu_teardown_pages(katom->kctx,
+                                                       reg->start_pfn,
+                                                       kbase_reg_current_backed_size(reg));
 
                                kbase_jd_umm_unmap(katom->kctx, alloc);
                        }
@@ -546,16 +557,20 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
        return err_ret_val;
 }
 
-STATIC INLINE void jd_resolve_dep(struct list_head *out_list, kbase_jd_atom *katom, u8 d)
+STATIC INLINE void jd_resolve_dep(struct list_head *out_list, struct kbase_jd_atom *katom, u8 d)
 {
        u8 other_d = !d;
 
        while (!list_empty(&katom->dep_head[d])) {
-               kbase_jd_atom *dep_atom = list_entry(katom->dep_head[d].next, kbase_jd_atom, dep_item[d]);
+               struct kbase_jd_atom *dep_atom;
+
+               dep_atom = list_entry(katom->dep_head[d].next, 
+                               struct kbase_jd_atom, dep_item[d]);
+
                list_del(katom->dep_head[d].next);
 
                kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
-               
+
                if (katom->event_code != BASE_JD_EVENT_DONE) {
                        /* Atom failed, so remove the other dependencies and immediately fail the atom */
                        if (kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
@@ -572,9 +587,8 @@ STATIC INLINE void jd_resolve_dep(struct list_head *out_list, kbase_jd_atom *kat
 #endif
 
                        /* at this point a dependency to the failed job is already removed */
-                       if ( !( kbase_jd_katom_dep_type(&dep_atom->dep[d]) == BASE_JD_DEP_TYPE_ORDER &&
-                                       katom->event_code > BASE_JD_EVENT_ACTIVE) )
-                       {
+                       if (!(kbase_jd_katom_dep_type(&dep_atom->dep[d]) == BASE_JD_DEP_TYPE_ORDER &&
+                                       katom->event_code > BASE_JD_EVENT_ACTIVE)) {
                                dep_atom->event_code = katom->event_code;
                                KBASE_DEBUG_ASSERT(dep_atom->status != KBASE_JD_ATOM_STATE_UNUSED);
                                dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
@@ -593,7 +607,7 @@ STATIC INLINE void jd_resolve_dep(struct list_head *out_list, kbase_jd_atom *kat
 KBASE_EXPORT_TEST_API(jd_resolve_dep)
 
 #if MALI_CUSTOMER_RELEASE == 0
-static void jd_force_failure(kbase_device *kbdev, kbase_jd_atom *katom)
+static void jd_force_failure(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
 {
        kbdev->force_replay_count++;
 
@@ -613,18 +627,20 @@ static void jd_force_failure(kbase_device *kbdev, kbase_jd_atom *katom)
  *
  * This function will check if an atom has a replay job as a dependent. If so
  * then it will be considered for forced failure. */
-static void jd_check_force_failure(kbase_jd_atom *katom)
+static void jd_check_force_failure(struct kbase_jd_atom *katom)
 {
        struct kbase_context *kctx = katom->kctx;
-       kbase_device *kbdev = kctx->kbdev;
+       struct kbase_device *kbdev = kctx->kbdev;
        int i;
+
        if ((kbdev->force_replay_limit == KBASEP_FORCE_REPLAY_DISABLED) ||
            (katom->core_req & BASEP_JD_REQ_EVENT_NEVER))
                return;
+
        for (i = 1; i < BASE_JD_ATOM_COUNT; i++) {
                if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom ||
                    kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
-                       kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
+                       struct kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
 
                        if ((dep_atom->core_req & BASEP_JD_REQ_ATOM_TYPE) ==
                                                     BASE_JD_REQ_SOFT_REPLAY &&
@@ -638,18 +654,6 @@ static void jd_check_force_failure(kbase_jd_atom *katom)
 }
 #endif
 
-static mali_bool jd_replay(kbase_jd_atom *katom)
-{
-       int status = kbase_replay_process(katom);
-
-       if ((status & MALI_REPLAY_STATUS_MASK) ==
-                                               MALI_REPLAY_STATUS_REPLAYING) {
-               if (status & MALI_REPLAY_FLAG_JS_RESCHED)
-                       return MALI_TRUE;
-       }
-       return MALI_FALSE;
-}
-
 /*
  * Perform the necessary handling of an atom that has finished running
  * on the GPU.
@@ -659,10 +663,10 @@ static mali_bool jd_replay(kbase_jd_atom *katom)
  *
  * The caller must hold the kbase_jd_context.lock.
  */
-mali_bool jd_done_nolock(kbase_jd_atom *katom)
+mali_bool jd_done_nolock(struct kbase_jd_atom *katom)
 {
        struct kbase_context *kctx = katom->kctx;
-       kbase_device *kbdev = kctx->kbdev;
+       struct kbase_device *kbdev = kctx->kbdev;
        struct list_head completed_jobs;
        struct list_head runnable_jobs;
        mali_bool need_to_try_schedule_context = MALI_FALSE;
@@ -681,7 +685,7 @@ mali_bool jd_done_nolock(kbase_jd_atom *katom)
        /* This is needed in case an atom is failed due to being invalid, this
         * can happen *before* the jobs that the atom depends on have completed */
        for (i = 0; i < 2; i++) {
-               if ( kbase_jd_katom_dep_atom(&katom->dep[i])) {
+               if (kbase_jd_katom_dep_atom(&katom->dep[i])) {
                        list_del(&katom->dep_item[i]);
                        kbase_jd_katom_dep_clear(&katom->dep[i]);
                }
@@ -696,7 +700,7 @@ mali_bool jd_done_nolock(kbase_jd_atom *katom)
 
        if ((kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10817) || kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10959)) &&
                  katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT) {
-               if ( ( katom->core_req & BASE_JD_REQ_FS ) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) ) {
+               if ((katom->core_req & BASE_JD_REQ_FS) && (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED)) {
                        /* Promote the failure to job done */
                        katom->event_code = BASE_JD_EVENT_DONE;
                        katom->atom_flags = katom->atom_flags & (~KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED);
@@ -707,7 +711,7 @@ mali_bool jd_done_nolock(kbase_jd_atom *katom)
        list_add_tail(&katom->dep_item[0], &completed_jobs);
 
        while (!list_empty(&completed_jobs)) {
-               katom = list_entry(completed_jobs.prev, kbase_jd_atom, dep_item[0]);
+               katom = list_entry(completed_jobs.prev, struct kbase_jd_atom, dep_item[0]);
                list_del(completed_jobs.prev);
 
                KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
@@ -719,7 +723,10 @@ mali_bool jd_done_nolock(kbase_jd_atom *katom)
                        kbase_jd_post_external_resources(katom);
 
                while (!list_empty(&runnable_jobs)) {
-                       kbase_jd_atom *node = list_entry(runnable_jobs.prev, kbase_jd_atom, dep_item[0]);
+                       struct kbase_jd_atom *node;
+
+                       node = list_entry(runnable_jobs.prev, struct kbase_jd_atom, dep_item[0]);
+                       
                        list_del(runnable_jobs.prev);
 
                        KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
@@ -728,22 +735,31 @@ mali_bool jd_done_nolock(kbase_jd_atom *katom)
                                need_to_try_schedule_context |= jd_run_atom(node);
                        } else {
                                node->event_code = katom->event_code;
-                               node->status = KBASE_JD_ATOM_STATE_COMPLETED;
 
                                if ((node->core_req & BASEP_JD_REQ_ATOM_TYPE)
                                                  == BASE_JD_REQ_SOFT_REPLAY) {
-                                       need_to_try_schedule_context |=
-                                                              jd_replay(node);
+                                       if (kbase_replay_process(node))
+                                               /* Don't complete this atom */
+                                               continue;
                                } else if (node->core_req &
                                                        BASE_JD_REQ_SOFT_JOB) {
+                                       /* If this is a fence wait then remove it from the list of sync waiters. */
+                                       if (BASE_JD_REQ_SOFT_FENCE_WAIT == node->core_req)
+                                               list_del(&node->dep_item[0]);
+
                                        kbase_finish_soft_job(node);
                                }
+                               node->status = KBASE_JD_ATOM_STATE_COMPLETED;
                        }
 
                        if (node->status == KBASE_JD_ATOM_STATE_COMPLETED)
                                list_add_tail(&node->dep_item[0], &completed_jobs);
                }
 
+               /* Register a completed job as a disjoint event when the GPU
+                * is in a disjoint state (ie. being reset or replaying jobs).
+                */
+               kbase_disjoint_event_potential(kctx->kbdev);
                kbase_event_post(kctx, katom);
 
                /* Decrement and check the TOTAL number of jobs. This includes
@@ -814,11 +830,11 @@ static const char *kbasep_map_core_reqs_to_string(base_jd_core_req core_req)
 }
 #endif
 
-mali_bool jd_submit_atom(kbase_context *kctx,
-                        const base_jd_atom_v2 *user_atom,
-                        kbase_jd_atom *katom)
+mali_bool jd_submit_atom(struct kbase_context *kctx,
+                        const struct base_jd_atom_v2 *user_atom,
+                        struct kbase_jd_atom *katom)
 {
-       kbase_jd_context *jctx = &kctx->jctx;
+       struct kbase_jd_context *jctx = &kctx->jctx;
        base_jd_core_req core_req;
        int queued = 0;
        int i;
@@ -830,12 +846,13 @@ mali_bool jd_submit_atom(kbase_context *kctx,
 
        core_req = user_atom->core_req;
 
+       katom->start_timestamp.tv64 = 0;
+       katom->time_spent_us = 0;
        katom->udata = user_atom->udata;
        katom->kctx = kctx;
        katom->nr_extres = user_atom->nr_extres;
        katom->extres = NULL;
        katom->device_nr = user_atom->device_nr;
-
        katom->affinity = 0;
        katom->jc = user_atom->jc;
        katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
@@ -844,7 +861,6 @@ mali_bool jd_submit_atom(kbase_context *kctx,
        katom->atom_flags = 0;
        katom->retry_count = 0;
 
-       
 #ifdef CONFIG_KDS
        /* Start by assuming that the KDS dependencies are satisfied,
         * kbase_jd_pre_external_resources will correct this if there are dependencies */
@@ -852,9 +868,8 @@ mali_bool jd_submit_atom(kbase_context *kctx,
        katom->kds_rset = NULL;
 #endif                         /* CONFIG_KDS */
 
-
        /* Don't do anything if there is a mess up with dependencies.
-          This is done in a separate cycle to check both the dependencies at ones, otherwise 
+          This is done in a separate cycle to check both the dependencies at ones, otherwise
           it will be extra complexity to deal with 1st dependency ( just added to the list )
           if only the 2nd one has invalid config.
         */
@@ -863,8 +878,8 @@ mali_bool jd_submit_atom(kbase_context *kctx,
                base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
 
                if (dep_atom_number) {
-                       if ( dep_atom_type != BASE_JD_DEP_TYPE_ORDER && dep_atom_type != BASE_JD_DEP_TYPE_DATA )
-                       {
+                       if ( dep_atom_type != BASE_JD_DEP_TYPE_ORDER &&
+                                       dep_atom_type != BASE_JD_DEP_TYPE_DATA ) {
                                katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
                                katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
                                ret = jd_done_nolock(katom);
@@ -872,7 +887,7 @@ mali_bool jd_submit_atom(kbase_context *kctx,
                        }
                }
        }
-       
+
        /* Add dependencies */
        for (i = 0; i < 2; i++) {
                int dep_atom_number = user_atom->pre_dep[i].atom_id;
@@ -881,7 +896,7 @@ mali_bool jd_submit_atom(kbase_context *kctx,
                kbase_jd_katom_dep_clear(&katom->dep[i]);
 
                if (dep_atom_number) {
-                       kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
+                       struct kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
 
                        if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED || dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
                                if (dep_atom->event_code != BASE_JD_EVENT_DONE) {
@@ -897,26 +912,20 @@ mali_bool jd_submit_atom(kbase_context *kctx,
                                                list_del(&katom->dep_item[0]);
                                                kbase_jd_katom_dep_clear(&katom->dep[0]);
                                        }
-                                       
+
                                        /* Atom has completed, propagate the error code if any */
                                        katom->event_code = dep_atom->event_code;
                                        katom->status = KBASE_JD_ATOM_STATE_QUEUED;
-                                       if ((katom->core_req & 
+                                       if ((katom->core_req &
                                                        BASEP_JD_REQ_ATOM_TYPE)
                                                  == BASE_JD_REQ_SOFT_REPLAY) {
-                                               int status =
-                                                  kbase_replay_process(katom);
-
-                                               if ((status &
-                                                      MALI_REPLAY_STATUS_MASK)
-                                            == MALI_REPLAY_STATUS_REPLAYING) {
-                                                       ret = (status &
-                                                 MALI_REPLAY_FLAG_JS_RESCHED);
+                                               if (kbase_replay_process(katom)) {
+                                                       ret = MALI_FALSE;
                                                        goto out;
                                                }
-                                       }                                       
+                                       }
                                        ret = jd_done_nolock(katom);
-                                       
+
                                        goto out;
                                }
                        } else {
@@ -957,7 +966,10 @@ mali_bool jd_submit_atom(kbase_context *kctx,
         */
        if (0 > katom->nice_prio) {
                mali_bool access_allowed;
-               access_allowed = kbase_security_has_capability(kctx, KBASE_SEC_MODIFY_PRIORITY, KBASE_SEC_FLAG_NOAUDIT);
+
+               access_allowed = kbase_security_has_capability(kctx,
+                               KBASE_SEC_MODIFY_PRIORITY, KBASE_SEC_FLAG_NOAUDIT);
+
                if (!access_allowed) {
                        /* For unprivileged processes - a negative priority is interpreted as zero */
                        katom->nice_prio = 0;
@@ -968,6 +980,7 @@ mali_bool jd_submit_atom(kbase_context *kctx,
        if (katom->nice_prio) {
                /* Remove sign for calculation */
                int nice_priority = katom->nice_prio + 128;
+
                /* Fixed point maths to scale from ..255 to 0..39 (NICE range with +20 offset) */
                katom->nice_prio = (((20 << 16) / 128) * nice_priority) >> 16;
        }
@@ -990,7 +1003,8 @@ mali_bool jd_submit_atom(kbase_context *kctx,
         * If either fail then we immediately complete the atom with an error.
         */
        if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
-               kbasep_js_policy *js_policy = &(kctx->kbdev->js_data.policy);
+               union kbasep_js_policy *js_policy = &(kctx->kbdev->js_data.policy);
+
                if (MALI_ERROR_NONE != kbasep_js_policy_init_job(js_policy, kctx, katom)) {
                        katom->event_code = BASE_JD_EVENT_JOB_INVALID;
                        ret = jd_done_nolock(katom);
@@ -1024,11 +1038,8 @@ mali_bool jd_submit_atom(kbase_context *kctx,
 
        if ((katom->core_req & BASEP_JD_REQ_ATOM_TYPE)
                                                  == BASE_JD_REQ_SOFT_REPLAY) {
-               int status = kbase_replay_process(katom);
-
-               if ((status & MALI_REPLAY_STATUS_MASK)
-                                              == MALI_REPLAY_STATUS_REPLAYING)
-                       ret = status & MALI_REPLAY_FLAG_JS_RESCHED;
+               if (kbase_replay_process(katom))
+                       ret = MALI_FALSE;
                else
                        ret = jd_done_nolock(katom);
 
@@ -1054,14 +1065,21 @@ mali_bool jd_submit_atom(kbase_context *kctx,
        return ret;
 }
 
-mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *submit_data)
+#ifdef BASE_LEGACY_UK6_SUPPORT
+mali_error kbase_jd_submit(struct kbase_context *kctx,
+               const struct kbase_uk_job_submit *submit_data,
+               int uk6_atom)
+#else
+mali_error kbase_jd_submit(struct kbase_context *kctx,
+               const struct kbase_uk_job_submit *submit_data)
+#endif /* BASE_LEGACY_UK6_SUPPORT */
 {
-       kbase_jd_context *jctx = &kctx->jctx;
+       struct kbase_jd_context *jctx = &kctx->jctx;
        mali_error err = MALI_ERROR_NONE;
        int i;
        mali_bool need_to_try_schedule_context = MALI_FALSE;
-       kbase_device *kbdev;
-       void *user_addr;
+       struct kbase_device *kbdev;
+       void __user *user_addr;
 
        /*
         * kbase_jd_submit isn't expected to fail and so all errors with the jobs
@@ -1069,14 +1087,20 @@ mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *submi
         */
        kbdev = kctx->kbdev;
 
-       beenthere(kctx,"%s", "Enter");
+       beenthere(kctx, "%s", "Enter");
 
        if ((kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_SUBMIT_DISABLED) != 0) {
                dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it");
                return MALI_ERROR_FUNCTION_FAILED;
        }
 
+#ifdef BASE_LEGACY_UK6_SUPPORT
+       if ((uk6_atom && submit_data->stride !=
+                       sizeof(struct base_jd_atom_v2_uk6)) ||
+                       submit_data->stride != sizeof(base_jd_atom_v2)) {
+#else
        if (submit_data->stride != sizeof(base_jd_atom_v2)) {
+#endif /* BASE_LEGACY_UK6_SUPPORT */
                dev_err(kbdev->dev, "Stride passed to job_submit doesn't match kernel");
                return MALI_ERROR_FUNCTION_FAILED;
        }
@@ -1086,16 +1110,61 @@ mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *submi
        KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(submit_data->nr_atoms, &kctx->timeline.jd_atoms_in_flight));
 
        for (i = 0; i < submit_data->nr_atoms; i++) {
-               base_jd_atom_v2 user_atom;
-               kbase_jd_atom *katom;
-
+               struct base_jd_atom_v2 user_atom;
+               struct kbase_jd_atom *katom;
+
+#ifdef BASE_LEGACY_UK6_SUPPORT
+               if (uk6_atom) {
+                       struct base_jd_atom_v2_uk6 user_atom_v6;
+                       base_jd_dep_type dep_types[2] = {BASE_JD_DEP_TYPE_DATA, BASE_JD_DEP_TYPE_DATA};
+
+                       if (copy_from_user(&user_atom_v6, user_addr,
+                                       sizeof(user_atom_v6))) {
+                               err = MALI_ERROR_FUNCTION_FAILED;
+                               KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx,
+                                       atomic_sub_return(
+                                       submit_data->nr_atoms - i,
+                                       &kctx->timeline.jd_atoms_in_flight));
+                               break;
+                       }
+                       /* Convert from UK6 atom format to UK7 format */
+                       user_atom.jc = user_atom_v6.jc;
+                       user_atom.udata = user_atom_v6.udata;
+                       user_atom.extres_list = user_atom_v6.extres_list;
+                       user_atom.nr_extres = user_atom_v6.nr_extres;
+                       user_atom.core_req = user_atom_v6.core_req;
+
+                       /* atom number 0 is used for no dependency atoms */
+                       if (!user_atom_v6.pre_dep[0])
+                               dep_types[0] = BASE_JD_DEP_TYPE_INVALID;
+
+                       base_jd_atom_dep_set(&user_atom.pre_dep[0],
+                                       user_atom_v6.pre_dep[0],
+                                       dep_types[0]);
+
+                       /* atom number 0 is used for no dependency atoms */
+                       if (!user_atom_v6.pre_dep[1])
+                               dep_types[1] = BASE_JD_DEP_TYPE_INVALID;
+
+                       base_jd_atom_dep_set(&user_atom.pre_dep[1],
+                                       user_atom_v6.pre_dep[1],
+                                       dep_types[1]);
+
+                       user_atom.atom_number = user_atom_v6.atom_number;
+                       user_atom.prio = user_atom_v6.prio;
+                       user_atom.device_nr = user_atom_v6.device_nr;
+               } else {
+#endif /* BASE_LEGACY_UK6_SUPPORT */
                if (copy_from_user(&user_atom, user_addr, sizeof(user_atom)) != 0) {
                        err = MALI_ERROR_FUNCTION_FAILED;
                        KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(submit_data->nr_atoms - i, &kctx->timeline.jd_atoms_in_flight));
                        break;
                }
+#ifdef BASE_LEGACY_UK6_SUPPORT
+               }
+#endif /* BASE_LEGACY_UK6_SUPPORT */
 
-               user_addr = (void *)((uintptr_t) user_addr + submit_data->stride);
+               user_addr = (void __user *)((uintptr_t) user_addr + submit_data->stride);
 
                mutex_lock(&jctx->lock);
                katom = &jctx->atoms[user_atom.atom_number];
@@ -1126,6 +1195,12 @@ mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *submi
 
                need_to_try_schedule_context |=
                                       jd_submit_atom(kctx, &user_atom, katom);
+
+               /* Register a completed job as a disjoint event when the GPU is in a disjoint state
+                * (ie. being reset or replaying jobs).
+                */
+               kbase_disjoint_event_potential(kbdev);
+
                mutex_unlock(&jctx->lock);
        }
 
@@ -1137,7 +1212,7 @@ mali_error kbase_jd_submit(kbase_context *kctx, const kbase_uk_job_submit *submi
 
 KBASE_EXPORT_TEST_API(kbase_jd_submit)
 
-static void kbasep_jd_cacheclean(kbase_device *kbdev)
+static void kbasep_jd_cacheclean(struct kbase_device *kbdev)
 {
        /* Limit the number of loops to avoid a hang if the interrupt is missed */
        u32 max_loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
@@ -1172,15 +1247,15 @@ static void kbasep_jd_cacheclean(kbase_device *kbdev)
  */
 static void jd_done_worker(struct work_struct *data)
 {
-       kbase_jd_atom *katom = container_of(data, kbase_jd_atom, work);
-       kbase_jd_context *jctx;
-       kbase_context *kctx;
-       kbasep_js_kctx_info *js_kctx_info;
-       kbasep_js_policy *js_policy;
-       kbase_device *kbdev;
-       kbasep_js_device_data *js_devdata;
+       struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+       struct kbase_jd_context *jctx;
+       struct kbase_context *kctx;
+       struct kbasep_js_kctx_info *js_kctx_info;
+       union kbasep_js_policy *js_policy;
+       struct kbase_device *kbdev;
+       struct kbasep_js_device_data *js_devdata;
        u64 cache_jc = katom->jc;
-       kbasep_js_atom_retained_state katom_retained_state;
+       struct kbasep_js_atom_retained_state katom_retained_state;
 
        /* Soft jobs should never reach this function */
        KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
@@ -1209,7 +1284,7 @@ static void jd_done_worker(struct work_struct *data)
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) && katom->event_code != BASE_JD_EVENT_DONE && !(katom->event_code & BASE_JD_SW_EVENT))
                kbasep_jd_cacheclean(kbdev);  /* cache flush when jobs complete with non-done codes */
        else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
-               if (kbdev->gpu_props.num_core_groups > 1 && 
+               if (kbdev->gpu_props.num_core_groups > 1 &&
                    !(katom->affinity & kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
                    (katom->affinity & kbdev->gpu_props.props.coherency_info.group[1].core_mask)) {
                        dev_dbg(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
@@ -1221,16 +1296,14 @@ static void jd_done_worker(struct work_struct *data)
            (katom->core_req & BASE_JD_REQ_FS)                        &&
            katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT       &&
            (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
-           !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)){
-               dev_dbg(kbdev->dev,
-                                      "Soft-stopped fragment shader job got a TILE_RANGE_FAULT." \
-                                      "Possible HW issue, trying SW workaround\n" );
-               if (kbasep_10969_workaround_clamp_coordinates(katom)){
+           !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) {
+               dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n");
+               if (kbasep_10969_workaround_clamp_coordinates(katom)) {
                        /* The job had a TILE_RANGE_FAULT after was soft-stopped.
                         * Due to an HW issue we try to execute the job
                         * again.
                         */
-                       dev_dbg(kbdev->dev, "Clamping has been executed, try to rerun the job\n" );
+                       dev_dbg(kbdev->dev, "Clamping has been executed, try to rerun the job\n");
                        katom->event_code = BASE_JD_EVENT_STOPPED;
                        katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
 
@@ -1265,11 +1338,16 @@ static void jd_done_worker(struct work_struct *data)
 
        if (!kbasep_js_has_atom_finished(&katom_retained_state)) {
                unsigned long flags;
+
                /* Requeue the atom on soft-stop / removed from NEXT registers */
                dev_dbg(kbdev->dev, "JS: Soft Stopped/Removed from next on Ctx %p; Requeuing", kctx);
 
                mutex_lock(&js_devdata->runpool_mutex);
                kbasep_js_clear_job_retry_submit(katom);
+               /* An atom that has been hard-stopped might have previously
+                * been soft-stopped and has just finished before the hard-stop
+                * occurred. For this reason, clear the hard-stopped flag */
+               katom->atom_flags &= ~(KBASE_KATOM_FLAG_BEEN_HARD_STOPPED);
 
                KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, katom));
                spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
@@ -1333,12 +1411,12 @@ static void jd_done_worker(struct work_struct *data)
  */
 static void jd_cancel_worker(struct work_struct *data)
 {
-       kbase_jd_atom *katom = container_of(data, kbase_jd_atom, work);
-       kbase_jd_context *jctx;
-       kbase_context *kctx;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
+       struct kbase_jd_context *jctx;
+       struct kbase_context *kctx;
+       struct kbasep_js_kctx_info *js_kctx_info;
        mali_bool need_to_try_schedule_context;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        /* Soft jobs should never reach this function */
        KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
@@ -1375,7 +1453,6 @@ static void jd_cancel_worker(struct work_struct *data)
 
        /* katom may have been freed now, do not use! */
        mutex_unlock(&jctx->lock);
-
 }
 
 /**
@@ -1383,7 +1460,7 @@ static void jd_cancel_worker(struct work_struct *data)
  *
  * This must be used whenever a job has been removed from the Hardware, e.g.:
  * - An IRQ indicates that the job finished (for both error and 'done' codes)
- * - The job was evicted from the JSn_HEAD_NEXT registers during a Soft/Hard stop.
+ * - The job was evicted from the JS_HEAD_NEXT registers during a Soft/Hard stop.
  *
  * Some work is carried out immediately, and the rest is deferred onto a workqueue
  *
@@ -1392,11 +1469,12 @@ static void jd_cancel_worker(struct work_struct *data)
  * The caller must hold kbasep_js_device_data::runpool_irq::lock
  *
  */
-void kbase_jd_done(kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
-                   kbasep_js_atom_done_code done_code)
+void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
+               kbasep_js_atom_done_code done_code)
 {
-       kbase_context *kctx;
-       kbase_device *kbdev;
+       struct kbase_context *kctx;
+       struct kbase_device *kbdev;
+
        KBASE_DEBUG_ASSERT(katom);
        kctx = katom->kctx;
        KBASE_DEBUG_ASSERT(kctx);
@@ -1410,6 +1488,7 @@ void kbase_jd_done(kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
 
        KBASE_TRACE_ADD(kbdev, JD_DONE, kctx, katom, katom->jc, 0);
 
+       kbase_job_check_leave_disjoint(kbdev, katom);
        kbasep_js_job_done_slot_irq(katom, slot_nr, end_timestamp, done_code);
 
        katom->slot_nr = slot_nr;
@@ -1421,10 +1500,11 @@ void kbase_jd_done(kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
 
 KBASE_EXPORT_TEST_API(kbase_jd_done)
 
-void kbase_jd_cancel(kbase_device *kbdev, kbase_jd_atom *katom)
+void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
 {
-       kbase_context *kctx;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbase_context *kctx;
+       struct kbasep_js_kctx_info *js_kctx_info;
+
        KBASE_DEBUG_ASSERT(NULL != kbdev);
        KBASE_DEBUG_ASSERT(NULL != katom);
        kctx = katom->kctx;
@@ -1444,7 +1524,7 @@ void kbase_jd_cancel(kbase_device *kbdev, kbase_jd_atom *katom)
        queue_work(kctx->jctx.job_done_wq, &katom->work);
 }
 
-typedef struct zap_reset_data {
+struct zap_reset_data {
        /* The stages are:
         * 1. The timer has never been called
         * 2. The zap has timed out, all slots are soft-stopped - the GPU reset will happen.
@@ -1453,15 +1533,15 @@ typedef struct zap_reset_data {
         * (-1 - The timer has been cancelled)
         */
        int stage;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        struct hrtimer timer;
-       spinlock_t lock;
-} zap_reset_data;
+       spinlock_t lock; /* protects updates to stage member */
+};
 
 static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
 {
-       zap_reset_data *reset_data = container_of(timer, zap_reset_data, timer);
-       kbase_device *kbdev = reset_data->kbdev;
+       struct zap_reset_data *reset_data = container_of(timer, struct zap_reset_data, timer);
+       struct kbase_device *kbdev = reset_data->kbdev;
        unsigned long flags;
 
        spin_lock_irqsave(&reset_data->lock, flags);
@@ -1469,11 +1549,12 @@ static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
        if (reset_data->stage == -1)
                goto out;
 
+#if KBASE_GPU_RESET_EN
        if (kbase_prepare_to_reset_gpu(kbdev)) {
                dev_err(kbdev->dev, "Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n", ZAP_TIMEOUT);
                kbase_reset_gpu(kbdev);
        }
-
+#endif /* KBASE_GPU_RESET_EN */
        reset_data->stage = 2;
 
  out:
@@ -1482,14 +1563,12 @@ static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-void kbase_jd_zap_context(kbase_context *kctx)
+void kbase_jd_zap_context(struct kbase_context *kctx)
 {
-       kbase_jd_atom *katom;
-       #if 0
-       struct list_head *entry,*entry1;
-       #endif
-       kbase_device *kbdev;
-       zap_reset_data reset_data;
+       struct kbase_jd_atom *katom;
+       struct list_head *entry, *tmp;
+       struct kbase_device *kbdev;
+       struct zap_reset_data reset_data;
        unsigned long flags;
 
        KBASE_DEBUG_ASSERT(kctx);
@@ -1502,32 +1581,15 @@ void kbase_jd_zap_context(kbase_context *kctx)
        mutex_lock(&kctx->jctx.lock);
 
        /*
-        * While holding the kbase_jd_context lock clean up jobs which are known to kbase but are
+        * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are
         * queued outside the job scheduler.
         */
-       
-       pr_info("%p,%p,%p\n",
-                       &kctx->waiting_soft_jobs,
-                       kctx->waiting_soft_jobs.next,
-                       kctx->waiting_soft_jobs.prev);
-       
-       while (!list_empty(&kctx->waiting_soft_jobs)) {
-               katom = list_first_entry(&kctx->waiting_soft_jobs,
-                                                                struct kbase_jd_atom,
-                                                                dep_item[0]);
-               list_del(&katom->dep_item[0]);
-               kbase_cancel_soft_job(katom);
-       }
-       #if 0
-       list_for_each_safe(entry, entry1, &kctx->waiting_soft_jobs) {
-               if(entry == (struct list_head *)LIST_POISON1)
-                       pr_err("@get to the end of a list, error happened in list somewhere@\n");
-               katom = list_entry(entry, kbase_jd_atom, dep_item[0]);
-                       pr_info("katom = %p,&katom->dep_item[0] = %p\n",katom,&katom->dep_item[0]);
+
+       list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
+               katom = list_entry(entry, struct kbase_jd_atom, dep_item[0]);
                kbase_cancel_soft_job(katom);
        }
-       #endif
-       /* kctx->waiting_soft_jobs is not valid after this point */
+
 
 #ifdef CONFIG_KDS
 
@@ -1536,12 +1598,12 @@ void kbase_jd_zap_context(kbase_context *kctx)
         * on kds resources which may never be released when contexts are zapped, resulting
         * in a hang.
         *
-        * Note that we can safely iterate over the list as the kbase_jd_context lock is held,
+        * Note that we can safely iterate over the list as the struct kbase_jd_context lock is held,
         * this prevents items being removed when calling job_done_nolock in kbase_cancel_kds_wait_job.
         */
 
-       list_for_each( entry, &kctx->waiting_kds_resource) {
-               katom = list_entry(entry, kbase_jd_atom, node);
+       list_for_each(entry, &kctx->waiting_kds_resource) {
+               katom = list_entry(entry, struct kbase_jd_atom, node);
 
                kbase_cancel_kds_wait_job(katom);
        }
@@ -1593,7 +1655,7 @@ void kbase_jd_zap_context(kbase_context *kctx)
 
 KBASE_EXPORT_TEST_API(kbase_jd_zap_context)
 
-mali_error kbase_jd_init(kbase_context *kctx)
+mali_error kbase_jd_init(struct kbase_context *kctx)
 {
        int i;
        mali_error mali_err = MALI_ERROR_NONE;
@@ -1648,7 +1710,7 @@ mali_error kbase_jd_init(kbase_context *kctx)
 
 KBASE_EXPORT_TEST_API(kbase_jd_init)
 
-void kbase_jd_exit(kbase_context *kctx)
+void kbase_jd_exit(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kctx);
 
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c
new file mode 100755 (executable)
index 0000000..92422e6
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/seq_file.h>
+
+#include <mali_kbase_jd_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * @brief Show callback for the @c JD atoms debugfs file.
+ *
+ * This function is called to get the contents of the @c JD atoms debugfs file.
+ * This is a report of all atoms managed by kbase_jd_context::atoms .
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if successfully prints data in debugfs entry file, failure
+ * otherwise
+ */
+static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data)
+{
+       struct kbase_context *kctx = sfile->private;
+       struct kbase_jd_atom *atoms;
+       unsigned long irq_flags;
+       int i;
+
+       KBASE_DEBUG_ASSERT(kctx != NULL);
+
+       /* Print table heading */
+       seq_puts(sfile, "atom id,core reqs,status,coreref status,predeps,start time,time on gpu\n");
+
+       atoms = kctx->jctx.atoms;
+       /* General atom states */
+       mutex_lock(&kctx->jctx.lock);
+       /* JS-related states */
+       spin_lock_irqsave(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags);
+       for (i = 0; i != BASE_JD_ATOM_COUNT; ++i) {
+               struct kbase_jd_atom *atom = &atoms[i];
+               s64 start_timestamp = 0;
+
+               if (atom->status == KBASE_JD_ATOM_STATE_UNUSED)
+                       continue;
+
+               /* start_timestamp is cleared as soon as the atom leaves UNUSED state
+                * and set before a job is submitted to the h/w, a non-zero value means
+                * it is valid */
+               if (ktime_to_ns(atom->start_timestamp))
+                       start_timestamp = ktime_to_ns(
+                                       ktime_sub(ktime_get(), atom->start_timestamp));
+
+               seq_printf(sfile,
+                               "%i,%u,%u,%u,%u %u,%lli,%llu\n",
+                               i, atom->core_req, atom->status, atom->coreref_state,
+                               atom->dep[0].atom ? atom->dep[0].atom - atoms : 0,
+                               atom->dep[1].atom ? atom->dep[1].atom - atoms : 0,
+                               (signed long long)start_timestamp,
+                               (unsigned long long)(atom->time_spent_us ?
+                                       atom->time_spent_us * 1000 : start_timestamp)
+                               );
+       }
+       spin_unlock_irqrestore(&kctx->kbdev->js_data.runpool_irq.lock, irq_flags);
+       mutex_unlock(&kctx->jctx.lock);
+
+       return 0;
+}
+
+
+/**
+ * @brief File operations related to debugfs entry for atoms
+ */
+static int kbasep_jd_debugfs_atoms_open(struct inode *in, struct file *file)
+{
+       return single_open(file, kbasep_jd_debugfs_atoms_show, in->i_private);
+}
+
+static const struct file_operations kbasep_jd_debugfs_atoms_fops = {
+       .open = kbasep_jd_debugfs_atoms_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+
+int kbasep_jd_debugfs_init(struct kbase_device *kbdev)
+{
+       kbdev->jd_directory = debugfs_create_dir(
+                       "jd", kbdev->mali_debugfs_directory);
+       if (IS_ERR(kbdev->jd_directory)) {
+               dev_err(kbdev->dev, "Couldn't create mali jd debugfs directory\n");
+               goto err;
+       }
+
+       return 0;
+
+err:
+       return -1;
+}
+
+
+void kbasep_jd_debugfs_term(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+       if (!IS_ERR(kbdev->jd_directory))
+               debugfs_remove_recursive(kbdev->jd_directory);
+}
+
+
+int kbasep_jd_debugfs_ctx_add(struct kbase_context *kctx)
+{
+       /* Refer below for format string, %u is 10 chars max */
+       char dir_name[10 * 2 + 2];
+
+       KBASE_DEBUG_ASSERT(kctx != NULL);
+
+       /* Create per-context directory */
+       scnprintf(dir_name, sizeof(dir_name), "%u_%u", kctx->pid, kctx->id);
+       kctx->jd_ctx_dir = debugfs_create_dir(dir_name, kctx->kbdev->jd_directory);
+       if (IS_ERR(kctx->jd_ctx_dir))
+               goto err;
+
+       /* Expose all atoms */
+       if (IS_ERR(debugfs_create_file("atoms", S_IRUGO,
+                       kctx->jd_ctx_dir, kctx, &kbasep_jd_debugfs_atoms_fops)))
+               goto err_jd_ctx_dir;
+
+       return 0;
+
+err_jd_ctx_dir:
+       debugfs_remove_recursive(kctx->jd_ctx_dir);
+err:
+       return -1;
+}
+
+
+void kbasep_jd_debugfs_ctx_remove(struct kbase_context *kctx)
+{
+       KBASE_DEBUG_ASSERT(kctx != NULL);
+
+       if (!IS_ERR(kctx->jd_ctx_dir))
+               debugfs_remove_recursive(kctx->jd_ctx_dir);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+/**
+ * @brief Stub functions for when debugfs is disabled
+ */
+int kbasep_jd_debugfs_init(struct kbase_device *kbdev)
+{
+       return 0;
+}
+void kbasep_jd_debugfs_term(struct kbase_device *kbdev)
+{
+}
+int kbasep_jd_debugfs_ctx_add(struct kbase_context *ctx)
+{
+       return 0;
+}
+void kbasep_jd_debugfs_ctx_remove(struct kbase_context *ctx)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.h
new file mode 100755 (executable)
index 0000000..c045736
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_jd_debugfs.h
+ * Header file for job dispatcher-related entries in debugfs
+ */
+
+#ifndef _KBASE_JD_DEBUGFS_H
+#define _KBASE_JD_DEBUGFS_H
+
+#include <linux/debugfs.h>
+
+#include <mali_kbase.h>
+
+/**
+ * @brief Initialize JD debugfs entries
+ *
+ * This should be called during device probing after the main mali debugfs
+ * directory has been created.
+ *
+ * @param[in] kbdev Pointer to kbase_device
+ */
+int kbasep_jd_debugfs_init(struct kbase_device *kbdev);
+
+/**
+ * @brief Clean up all JD debugfs entries and related data
+ *
+ * This should be called during device removal before the main mali debugfs
+ * directory will be removed.
+ *
+ * @param[in] kbdev Pointer to kbase_device
+ */
+void kbasep_jd_debugfs_term(struct kbase_device *kbdev);
+
+/**
+ * @brief Add new entry to JD debugfs
+ *
+ * @param[in] kctx Pointer to kbase_context
+ *
+ * @return 0 on success, failure otherwise
+ */
+int kbasep_jd_debugfs_ctx_add(struct kbase_context *kctx);
+
+/**
+ * @brief Remove entry from JD debugfs
+ *
+ * param[in] kctx Pointer to kbase_context
+ */
+void kbasep_jd_debugfs_ctx_remove(struct kbase_context *kctx);
+
+#endif  /*_KBASE_JD_DEBUGFS_H*/
index f2767770656b4de6319243ef8cc9235fc49ab003..7cf7df470cac18b7c33039d06ffd9c220e9e12f9 100755 (executable)
@@ -23,6 +23,7 @@
  */
 
 #include <mali_kbase.h>
+#include <mali_kbase_config.h>
 #include <mali_midg_regmap.h>
 #include <mali_kbase_gator.h>
 #include <mali_kbase_js_affinity.h>
@@ -38,8 +39,9 @@ u64 mali_js1_affinity_mask = 0xFFFFFFFFFFFFFFFFULL;
 u64 mali_js2_affinity_mask = 0xFFFFFFFFFFFFFFFFULL;
 #endif
 
-
-static void kbasep_try_reset_gpu_early(kbase_device *kbdev);
+#if KBASE_GPU_RESET_EN
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev);
+#endif /* KBASE_GPU_RESET_EN */
 
 #ifdef CONFIG_GPU_TRACEPOINTS
 static char *kbasep_make_job_slot_string(int js, char *js_string)
@@ -49,9 +51,9 @@ static char *kbasep_make_job_slot_string(int js, char *js_string)
 }
 #endif
 
-static void kbase_job_hw_submit(kbase_device *kbdev, kbase_jd_atom *katom, int js)
+static void kbase_job_hw_submit(struct kbase_device *kbdev, struct kbase_jd_atom *katom, int js)
 {
-       kbase_context *kctx;
+       struct kbase_context *kctx;
        u32 cfg;
        u64 jc_head = katom->jc;
 
@@ -66,19 +68,19 @@ static void kbase_job_hw_submit(kbase_device *kbdev, kbase_jd_atom *katom, int j
        kbase_js_debug_log_current_affinities(kbdev);
        KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js, katom->affinity));
 
-       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), jc_head & 0xFFFFFFFF, kctx);
-       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), jc_head >> 32, kctx);
+       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), jc_head & 0xFFFFFFFF, kctx);
+       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), jc_head >> 32, kctx);
 
 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
        {
                u64 mask;
                u32 value;
 
-               if( 0 == js )
+               if (0 == js)
                {
                        mask = mali_js0_affinity_mask;
                }
-               else if( 1 == js )
+               else if (1 == js)
                {
                        mask = mali_js1_affinity_mask;
                }
@@ -89,22 +91,22 @@ static void kbase_job_hw_submit(kbase_device *kbdev, kbase_jd_atom *katom, int j
 
                value = katom->affinity & (mask & 0xFFFFFFFF);
 
-               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), value, kctx);
+               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO), value, kctx);
 
                value = (katom->affinity >> 32) & ((mask>>32) & 0xFFFFFFFF);
-               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), value, kctx);
+               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI), value, kctx);
        }
 #else
-       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_LO), katom->affinity & 0xFFFFFFFF, kctx);
-       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_AFFINITY_NEXT_HI), katom->affinity >> 32, kctx);
+       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO), katom->affinity & 0xFFFFFFFF, kctx);
+       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI), katom->affinity >> 32, kctx);
 #endif
 
        /* start MMU, medium priority, cache clean/flush on end, clean/flush on start */
-       cfg = kctx->as_nr | JSn_CONFIG_END_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_START_MMU | JSn_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JSn_CONFIG_THREAD_PRI(8);
+       cfg = kctx->as_nr | JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE | JS_CONFIG_START_MMU | JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JS_CONFIG_THREAD_PRI(8);
 
        if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
                if (!kbdev->jm_slots[js].job_chain_flag) {
-                       cfg |= JSn_CONFIG_JOB_CHAIN_FLAG;
+                       cfg |= JS_CONFIG_JOB_CHAIN_FLAG;
                        katom->atom_flags |= KBASE_KATOM_FLAGS_JOBCHAIN;
                        kbdev->jm_slots[js].job_chain_flag = MALI_TRUE;
                } else {
@@ -113,7 +115,7 @@ static void kbase_job_hw_submit(kbase_device *kbdev, kbase_jd_atom *katom, int j
                }
        }
 
-       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_CONFIG_NEXT), cfg, kctx);
+       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg, kctx);
 
        /* Write an approximate start timestamp.
         * It's approximate because there might be a job in the HEAD register. In
@@ -140,19 +142,21 @@ static void kbase_job_hw_submit(kbase_device *kbdev, kbase_jd_atom *katom, int j
 #endif
        kbase_timeline_job_slot_submit(kbdev, kctx, katom, js);
 
-       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_START, katom->kctx);
+       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), JS_COMMAND_START, katom->kctx);
 }
 
-void kbase_job_submit_nolock(kbase_device *kbdev, kbase_jd_atom *katom, int js)
+void kbase_job_submit_nolock(struct kbase_device *kbdev, struct kbase_jd_atom *katom, int js)
 {
-       kbase_jm_slot *jm_slots;
+       struct kbase_jm_slot *jm_slots;
+#if KBASE_PM_EN
        base_jd_core_req core_req;
-
+#endif
        KBASE_DEBUG_ASSERT(kbdev);
        KBASE_DEBUG_ASSERT(katom);
 
        jm_slots = kbdev->jm_slots;
 
+#if KBASE_PM_EN
        core_req = katom->core_req;
        if (core_req & BASE_JD_REQ_ONLY_COMPUTE) {
                unsigned long flags;
@@ -170,6 +174,7 @@ void kbase_job_submit_nolock(kbase_device *kbdev, kbase_jd_atom *katom, int js)
                kbdev->pm.metrics.active_gl_ctx++;
                spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
        }
+#endif
 
        /*
         * We can have:
@@ -183,17 +188,17 @@ void kbase_job_submit_nolock(kbase_device *kbdev, kbase_jd_atom *katom, int js)
        kbase_job_hw_submit(kbdev, katom, js);
 }
 
-void kbase_job_done_slot(kbase_device *kbdev, int s, u32 completion_code, u64 job_tail, ktime_t *end_timestamp)
+void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code, u64 job_tail, ktime_t *end_timestamp)
 {
-       kbase_jm_slot *slot;
-       kbase_jd_atom *katom;
+       struct kbase_jm_slot *slot;
+       struct kbase_jd_atom *katom;
        mali_addr64 jc_head;
-       kbase_context *kctx;
+       struct kbase_context *kctx;
 
        KBASE_DEBUG_ASSERT(kbdev);
 
        if (completion_code != BASE_JD_EVENT_DONE && completion_code != BASE_JD_EVENT_STOPPED)
-               dev_err(kbdev->dev, "t6xx: GPU fault 0x%02lx from job slot %d\n", (unsigned long)completion_code, s);
+               dev_err(kbdev->dev, "GPU fault 0x%02lx from job slot %d\n", (unsigned long)completion_code, s);
 
        /* IMPORTANT: this function must only contain work necessary to complete a
         * job from a Real IRQ (and not 'fake' completion, e.g. from
@@ -243,7 +248,7 @@ void kbase_job_done_slot(kbase_device *kbdev, int s, u32 completion_code, u64 jo
         */
 #ifdef CONFIG_GPU_TRACEPOINTS
        if (kbasep_jm_nr_jobs_submitted(slot) != 0) {
-               kbase_jd_atom *katom;
+               struct kbase_jd_atom *katom;
                char js_string[16];
                katom = kbasep_jm_peek_idx_submit_slot(slot, 0);        /* The atom in the HEAD */
                trace_gpu_sched_switch(kbasep_make_job_slot_string(s, js_string), ktime_to_ns(*end_timestamp), (u32)katom->kctx, 0, katom->work_id);
@@ -265,12 +270,12 @@ void kbase_job_done_slot(kbase_device *kbdev, int s, u32 completion_code, u64 jo
  * the time the job was submitted, to work out the best estimate (which might
  * still result in an over-estimate to the calculated time spent)
  */
-STATIC void kbasep_job_slot_update_head_start_timestamp(kbase_device *kbdev, kbase_jm_slot *slot, ktime_t end_timestamp)
+STATIC void kbasep_job_slot_update_head_start_timestamp(struct kbase_device *kbdev, struct kbase_jm_slot *slot, ktime_t end_timestamp)
 {
        KBASE_DEBUG_ASSERT(slot);
 
        if (kbasep_jm_nr_jobs_submitted(slot) > 0) {
-               kbase_jd_atom *katom;
+               struct kbase_jd_atom *katom;
                ktime_t new_timestamp;
                ktime_t timestamp_diff;
                katom = kbasep_jm_peek_idx_submit_slot(slot, 0);        /* The atom in the HEAD */
@@ -294,13 +299,13 @@ STATIC void kbasep_job_slot_update_head_start_timestamp(kbase_device *kbdev, kba
        }
 }
 
-void kbase_job_done(kbase_device *kbdev, u32 done)
+void kbase_job_done(struct kbase_device *kbdev, u32 done)
 {
        unsigned long flags;
        int i;
        u32 count = 0;
        ktime_t end_timestamp = ktime_get();
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        KBASE_DEBUG_ASSERT(kbdev);
        js_devdata = &kbdev->js_data;
@@ -319,7 +324,7 @@ void kbase_job_done(kbase_device *kbdev, u32 done)
        spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
 
        while (done) {
-               kbase_jm_slot *slot;
+               struct kbase_jm_slot *slot;
                u32 failed = done >> 16;
 
                /* treat failed slots as finished slots */
@@ -341,7 +346,7 @@ void kbase_job_done(kbase_device *kbdev, u32 done)
 
                        if (failed & (1u << i)) {
                                /* read out the job slot status code if the job slot reported failure */
-                               completion_code = kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_STATUS), NULL);
+                               completion_code = kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS), NULL);
 
                                switch (completion_code) {
                                case BASE_JD_EVENT_STOPPED:
@@ -349,7 +354,7 @@ void kbase_job_done(kbase_device *kbdev, u32 done)
                                        kbase_trace_mali_job_slots_event(GATOR_MAKE_EVENT(GATOR_JOB_SLOT_SOFT_STOPPED, i), NULL, 0);
 #endif                         /* CONFIG_MALI_GATOR_SUPPORT */
                                        /* Soft-stopped job - read the value of JS<n>_TAIL so that the job chain can be resumed */
-                                       job_tail = (u64) kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_TAIL_LO), NULL) | ((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_TAIL_HI), NULL) << 32);
+                                       job_tail = (u64) kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_TAIL_LO), NULL) | ((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_TAIL_HI), NULL) << 32);
                                        break;
                                case BASE_JD_EVENT_NOT_STARTED:
                                        /* PRLAM-10673 can cause a TERMINATED job to come back as NOT_STARTED, but the error interrupt helps us detect it */
@@ -357,7 +362,7 @@ void kbase_job_done(kbase_device *kbdev, u32 done)
                                        /* fall throught */
                                default:
                                        dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)", i, completion_code, kbase_exception_name(completion_code));
-                               kbdev->kbase_group_error++;
+                                       kbdev->kbase_group_error++;
                                }
                        }
 
@@ -427,7 +432,7 @@ void kbase_job_done(kbase_device *kbdev, u32 done)
 
                        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) {
                                /* Workaround for missing interrupt caused by PRLAM-10883 */
-                               if (((active >> i) & 1) && (0 == kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_STATUS), NULL))) {
+                               if (((active >> i) & 1) && (0 == kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS), NULL))) {
                                        /* Force job slot to be processed again */
                                        done |= (1u << i);
                                }
@@ -440,19 +445,19 @@ void kbase_job_done(kbase_device *kbdev, u32 done)
                kbasep_job_slot_update_head_start_timestamp(kbdev, slot, end_timestamp);
        }
        spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
-
+#if KBASE_GPU_RESET_EN
        if (atomic_read(&kbdev->reset_gpu) == KBASE_RESET_GPU_COMMITTED) {
                /* If we're trying to reset the GPU then we might be able to do it early
                 * (without waiting for a timeout) because some jobs have completed
                 */
                kbasep_try_reset_gpu_early(kbdev);
        }
-
+#endif /* KBASE_GPU_RESET_EN */
        KBASE_TRACE_ADD(kbdev, JM_IRQ_END, NULL, NULL, 0, count);
 }
 KBASE_EXPORT_TEST_API(kbase_job_done)
 
-static mali_bool kbasep_soft_stop_allowed(kbase_device *kbdev, u16 core_reqs)
+static mali_bool kbasep_soft_stop_allowed(struct kbase_device *kbdev, u16 core_reqs)
 {
        mali_bool soft_stops_allowed = MALI_TRUE;
 
@@ -463,7 +468,7 @@ static mali_bool kbasep_soft_stop_allowed(kbase_device *kbdev, u16 core_reqs)
        return soft_stops_allowed;
 }
 
-static mali_bool kbasep_hard_stop_allowed(kbase_device *kbdev, u16 core_reqs)
+static mali_bool kbasep_hard_stop_allowed(struct kbase_device *kbdev, u16 core_reqs)
 {
        mali_bool hard_stops_allowed = MALI_TRUE;
 
@@ -474,21 +479,23 @@ static mali_bool kbasep_hard_stop_allowed(kbase_device *kbdev, u16 core_reqs)
        return hard_stops_allowed;
 }
 
-static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int js, u32 action, u16 core_reqs, kbase_jd_atom * target_katom )
+static void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev, int js, u32 action, u16 core_reqs, struct kbase_jd_atom *target_katom)
 {
-       kbase_context *kctx = target_katom->kctx;
+       struct kbase_context *kctx = target_katom->kctx;
 #if KBASE_TRACE_ENABLE
        u32 status_reg_before;
        u64 job_in_head_before;
        u32 status_reg_after;
 
+       KBASE_DEBUG_ASSERT(!(action & (~JS_COMMAND_MASK)));
+
        /* Check the head pointer */
-       job_in_head_before = ((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_LO), NULL))
-           | (((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_HI), NULL)) << 32);
-       status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_STATUS), NULL);
+       job_in_head_before = ((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_LO), NULL))
+           | (((u64) kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_HI), NULL)) << 32);
+       status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS), NULL);
 #endif
 
-       if (action == JSn_COMMAND_SOFT_STOP) {
+       if (action == JS_COMMAND_SOFT_STOP) {
                mali_bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev, core_reqs);
                if (!soft_stop_allowed) {
 #ifdef CONFIG_MALI_DEBUG
@@ -501,7 +508,7 @@ static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int
                target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED;
        }
 
-       if (action == JSn_COMMAND_HARD_STOP) {
+       if (action == JS_COMMAND_HARD_STOP) {
                mali_bool hard_stop_allowed = kbasep_hard_stop_allowed(kbdev, core_reqs);
                if (!hard_stop_allowed) {
                        /* Jobs can be hard-stopped for the following reasons:
@@ -520,15 +527,16 @@ static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int
                        dev_warn(kbdev->dev, "Attempt made to hard-stop a job that cannot be hard-stopped. core_reqs = 0x%X", (unsigned int)core_reqs);
                        return;
                }
+               target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_HARD_STOPPED;
        }
 
-       if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316) && action == JSn_COMMAND_SOFT_STOP) {
+       if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8316) && action == JS_COMMAND_SOFT_STOP) {
                int i;
-               kbase_jm_slot *slot;
+               struct kbase_jm_slot *slot;
                slot = &kbdev->jm_slots[js];
 
                for (i = 0; i < kbasep_jm_nr_jobs_submitted(slot); i++) {
-                       kbase_jd_atom *katom;
+                       struct kbase_jd_atom *katom;
 
                        katom = kbasep_jm_peek_idx_submit_slot(slot, i);
 
@@ -558,24 +566,24 @@ static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int
        }
 
        if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
-               if (action == JSn_COMMAND_SOFT_STOP)
-                       action = (target_katom->atom_flags & KBASE_KATOM_FLAGS_JOBCHAIN) ? 
-                                JSn_COMMAND_SOFT_STOP_1:
-                        JSn_COMMAND_SOFT_STOP_0;
+               if (action == JS_COMMAND_SOFT_STOP)
+                       action = (target_katom->atom_flags & KBASE_KATOM_FLAGS_JOBCHAIN) ?
+                                JS_COMMAND_SOFT_STOP_1 :
+                        JS_COMMAND_SOFT_STOP_0;
                else
-                       action = (target_katom->atom_flags & KBASE_KATOM_FLAGS_JOBCHAIN) ? 
-                                JSn_COMMAND_HARD_STOP_1:
-                        JSn_COMMAND_HARD_STOP_0;
+                       action = (target_katom->atom_flags & KBASE_KATOM_FLAGS_JOBCHAIN) ?
+                                JS_COMMAND_HARD_STOP_1 :
+                        JS_COMMAND_HARD_STOP_0;
        }
 
-       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND), action, kctx);
+       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action, kctx);
 
 #if KBASE_TRACE_ENABLE
-       status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_STATUS), NULL);
+       status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS), NULL);
        if (status_reg_after == BASE_JD_EVENT_ACTIVE) {
-               kbase_jm_slot *slot;
-               kbase_jd_atom *head;
-               kbase_context *head_kctx;
+               struct kbase_jm_slot *slot;
+               struct kbase_jd_atom *head;
+               struct kbase_context *head_kctx;
 
                slot = &kbdev->jm_slots[js];
                head = kbasep_jm_peek_idx_submit_slot(slot, slot->submitted_nr - 1);
@@ -590,23 +598,23 @@ static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int
                else
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, 0, js);
 
-               switch(action) {
-               case JSn_COMMAND_SOFT_STOP:
+               switch (action) {
+               case JS_COMMAND_SOFT_STOP:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, head_kctx, head, head->jc, js);
                        break;
-               case JSn_COMMAND_SOFT_STOP_0:
+               case JS_COMMAND_SOFT_STOP_0:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, head_kctx, head, head->jc, js);
                        break;
-               case JSn_COMMAND_SOFT_STOP_1:
+               case JS_COMMAND_SOFT_STOP_1:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, head_kctx, head, head->jc, js);
                        break;
-               case JSn_COMMAND_HARD_STOP:
+               case JS_COMMAND_HARD_STOP:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, head_kctx, head, head->jc, js);
                        break;
-               case JSn_COMMAND_HARD_STOP_0:
+               case JS_COMMAND_HARD_STOP_0:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, head_kctx, head, head->jc, js);
                        break;
-               case JSn_COMMAND_HARD_STOP_1:
+               case JS_COMMAND_HARD_STOP_1:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, head_kctx, head, head->jc, js);
                        break;
                default:
@@ -619,23 +627,23 @@ static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int
                else
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_CHECK_HEAD, NULL, NULL, 0, js);
 
-               switch(action) {
-               case JSn_COMMAND_SOFT_STOP:
+               switch (action) {
+               case JS_COMMAND_SOFT_STOP:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP, NULL, NULL, 0, js);
                        break;
-               case JSn_COMMAND_SOFT_STOP_0:
+               case JS_COMMAND_SOFT_STOP_0:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_0, NULL, NULL, 0, js);
                        break;
-               case JSn_COMMAND_SOFT_STOP_1:
+               case JS_COMMAND_SOFT_STOP_1:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_SOFTSTOP_1, NULL, NULL, 0, js);
                        break;
-               case JSn_COMMAND_HARD_STOP:
+               case JS_COMMAND_HARD_STOP:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP, NULL, NULL, 0, js);
                        break;
-               case JSn_COMMAND_HARD_STOP_0:
+               case JS_COMMAND_HARD_STOP_0:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_0, NULL, NULL, 0, js);
                        break;
-               case JSn_COMMAND_HARD_STOP_1:
+               case JS_COMMAND_HARD_STOP_1:
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_HARDSTOP_1, NULL, NULL, 0, js);
                        break;
                default:
@@ -662,19 +670,21 @@ static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int
  * @param kctx          The context to soft/hard-stop job(s) from (or NULL is all jobs should be targeted)
  * @param js            The slot that the job(s) are on
  * @param target_katom  The atom that should be targeted (or NULL if all jobs from the context should be targeted)
- * @param action        The action to perform, either JSn_COMMAND_HARD_STOP or JSn_COMMAND_SOFT_STOP
+ * @param action        The action to perform, either JS_COMMAND_HARD_STOP or JS_COMMAND_SOFT_STOP
  */
-static void kbasep_job_slot_soft_or_hard_stop(kbase_device *kbdev, kbase_context *kctx, int js, kbase_jd_atom *target_katom, u32 action)
+static void kbasep_job_slot_soft_or_hard_stop(struct kbase_device *kbdev, struct kbase_context *kctx, int js, struct kbase_jd_atom *target_katom, u32 action)
 {
-       kbase_jd_atom *katom;
+       struct kbase_jd_atom *katom;
        u8 i;
        u8 jobs_submitted;
-       kbase_jm_slot *slot;
+       struct kbase_jm_slot *slot;
        u16 core_reqs;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool can_safely_stop = kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION);
+       u32 hw_action = action & JS_COMMAND_MASK;
 
-       KBASE_DEBUG_ASSERT(action == JSn_COMMAND_HARD_STOP || action == JSn_COMMAND_SOFT_STOP);
+       KBASE_DEBUG_ASSERT(hw_action == JS_COMMAND_HARD_STOP ||
+                       hw_action == JS_COMMAND_SOFT_STOP);
        KBASE_DEBUG_ASSERT(kbdev);
        js_devdata = &kbdev->js_data;
 
@@ -706,25 +716,33 @@ static void kbasep_job_slot_soft_or_hard_stop(kbase_device *kbdev, kbase_context
                        continue;
 
                core_reqs = katom->core_req;
-       
+
+               /* This will be repeated for anything removed from the next
+                * registers, since their normal flow was also interrupted, and
+                * this function might not enter disjoint state e.g. if we
+                * don't actually do a hard stop on the head atom
+                */
+               kbase_job_check_enter_disjoint(kbdev, action, core_reqs, katom);
+
                if (JM_JOB_IS_CURRENT_JOB_INDEX(jobs_submitted - i)) {
                        /* The last job in the slot, check if there is a job in the next register */
-                       if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), NULL) == 0)
-                               kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, action, core_reqs, katom);
-                       else {
+                       if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), NULL) == 0) {
+                               kbasep_job_slot_soft_or_hard_stop_do_action(kbdev,
+                                               js, hw_action, core_reqs, katom);
+                       } else {
                                /* The job is in the next registers */
                                beenthere(kctx, "clearing job from next registers on slot %d", js);
-                               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_NOP, NULL);
+                               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), JS_COMMAND_NOP, NULL);
                                /* Check to see if we did remove a job from the next registers */
-                               if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), NULL) != 0) {
+                               if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL) != 0) {
                                        /* The job was successfully cleared from the next registers, requeue it */
-                                       kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
+                                       struct kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
                                        KBASE_DEBUG_ASSERT(dequeued_katom == katom);
                                        jobs_submitted--;
 
                                        /* Set the next registers to NULL */
-                                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), 0, NULL);
-                                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), 0, NULL);
+                                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), 0, NULL);
+                                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), 0, NULL);
 
                                        /* As the job is removed from the next registers we undo the associated
                                         * update to the job_chain_flag for the job slot. */
@@ -733,25 +751,32 @@ static void kbasep_job_slot_soft_or_hard_stop(kbase_device *kbdev, kbase_context
 
                                        KBASE_TRACE_ADD_SLOT(kbdev, JM_SLOT_EVICT, dequeued_katom->kctx, dequeued_katom, dequeued_katom->jc, js);
 
+                                       kbase_job_check_enter_disjoint(kbdev, action, 0u, dequeued_katom);
                                        /* Complete the job, indicate it took no time, but don't submit any more at this point */
                                        kbase_jd_done(dequeued_katom, js, NULL, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
                                } else {
                                        /* The job transitioned into the current registers before we managed to evict it,
                                         * in this case we fall back to soft/hard-stopping the job */
                                        beenthere(kctx, "missed job in next register, soft/hard-stopping slot %d", js);
-                                       kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, action, core_reqs, katom);
+                                       kbasep_job_slot_soft_or_hard_stop_do_action(kbdev,
+                                                       js, hw_action, core_reqs, katom);
                                }
                        }
                } else if (JM_JOB_IS_NEXT_JOB_INDEX(jobs_submitted - i)) {
-                       /* There's a job after this one, check to see if that job is in the next registers.
-             * If so, we need to pay attention to not accidently stop that one when issueing
-             * the command to stop the one pointed to by the head registers (as the one in the head
-             * may finish in the mean time and the one in the next moves to the head). Either the hardware
-                        * has support for this using job chain disambiguation or we need to evict the job
-                        * from the next registers first to ensure we can safely stop the one pointed to by
-                        * the head registers. */
-                       if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), NULL) != 0) {
-                               kbase_jd_atom *check_next_atom;
+                       /* There's a job after this one, check to see if that
+                        * job is in the next registers.  If so, we need to pay
+                        * attention to not accidently stop that one when
+                        * issueing the command to stop the one pointed to by
+                        * the head registers (as the one in the head may
+                        * finish in the mean time and the one in the next
+                        * moves to the head). Either the hardware has support
+                        * for this using job chain disambiguation or we need
+                        * to evict the job from the next registers first to
+                        * ensure we can safely stop the one pointed to by the
+                        * head registers.
+                        */
+                       if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), NULL) != 0) {
+                               struct kbase_jd_atom *check_next_atom;
                                /* It is - we should remove that job and soft/hard-stop the slot */
 
                                /* Only proceed when the next job isn't a HW workaround 'dummy' job
@@ -776,21 +801,22 @@ static void kbasep_job_slot_soft_or_hard_stop(kbase_device *kbdev, kbase_context
 
                                if (!can_safely_stop) {
                                        beenthere(kctx, "clearing job from next registers on slot %d", js);
-                                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_NOP, NULL);
+                                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), JS_COMMAND_NOP, NULL);
 
                                        /* Check to see if we did remove a job from the next registers */
-                                       if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), NULL) != 0) {
+                                       if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL) != 0) {
                                                /* We did remove a job from the next registers, requeue it */
-                                               kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
+                                               struct kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
                                                KBASE_DEBUG_ASSERT(dequeued_katom != NULL);
                                                jobs_submitted--;
 
                                                /* Set the next registers to NULL */
-                                               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), 0, NULL);
-                                               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), 0, NULL);
+                                               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), 0, NULL);
+                                               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), 0, NULL);
 
                                                KBASE_TRACE_ADD_SLOT(kbdev, JM_SLOT_EVICT, dequeued_katom->kctx, dequeued_katom, dequeued_katom->jc, js);
 
+                                               kbase_job_check_enter_disjoint(kbdev, action, 0u, dequeued_katom);
                                                /* Complete the job, indicate it took no time, but don't submit any more at this point */
                                                kbase_jd_done(dequeued_katom, js, NULL, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
                                        } else {
@@ -802,7 +828,8 @@ static void kbasep_job_slot_soft_or_hard_stop(kbase_device *kbdev, kbase_context
 
                                /* Next is now free, so we can soft/hard-stop the slot */
                                beenthere(kctx, "soft/hard-stopped slot %d (there was a job in next which was successfully cleared)\n", js);
-                               kbasep_job_slot_soft_or_hard_stop_do_action(kbdev, js, action, core_reqs, katom);
+                               kbasep_job_slot_soft_or_hard_stop_do_action(kbdev,
+                                               js, hw_action, core_reqs, katom);
                        }
                        /* If there was no job in the next registers, then the job we were
                         * interested in has finished, so we need not take any action
@@ -813,11 +840,11 @@ static void kbasep_job_slot_soft_or_hard_stop(kbase_device *kbdev, kbase_context
        KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, 0);
 }
 
-void kbase_job_kill_jobs_from_context(kbase_context *kctx)
+void kbase_job_kill_jobs_from_context(struct kbase_context *kctx)
 {
        unsigned long flags;
-       kbase_device *kbdev;
-       kbasep_js_device_data *js_devdata;
+       struct kbase_device *kbdev;
+       struct kbasep_js_device_data *js_devdata;
        int i;
 
        KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -838,11 +865,11 @@ void kbase_job_kill_jobs_from_context(kbase_context *kctx)
        spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
 }
 
-void kbase_job_zap_context(kbase_context *kctx)
+void kbase_job_zap_context(struct kbase_context *kctx)
 {
-       kbase_device *kbdev;
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbase_device *kbdev;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
        int i;
        mali_bool evict_success;
 
@@ -855,7 +882,7 @@ void kbase_job_zap_context(kbase_context *kctx)
        /*
         * Critical assumption: No more submission is possible outside of the
         * workqueue. This is because the OS *must* prevent U/K calls (IOCTLs)
-        * whilst the kbase_context is terminating.
+        * whilst the struct kbase_context is terminating.
         */
 
        /* First, atomically do the following:
@@ -969,7 +996,7 @@ void kbase_job_zap_context(kbase_context *kctx)
 }
 KBASE_EXPORT_TEST_API(kbase_job_zap_context)
 
-mali_error kbase_job_slot_init(kbase_device *kbdev)
+mali_error kbase_job_slot_init(struct kbase_device *kbdev)
 {
        int i;
        KBASE_DEBUG_ASSERT(kbdev);
@@ -981,17 +1008,38 @@ mali_error kbase_job_slot_init(kbase_device *kbdev)
 }
 KBASE_EXPORT_TEST_API(kbase_job_slot_init)
 
-void kbase_job_slot_halt(kbase_device *kbdev)
+void kbase_job_slot_halt(struct kbase_device *kbdev)
 {
        CSTD_UNUSED(kbdev);
 }
 
-void kbase_job_slot_term(kbase_device *kbdev)
+void kbase_job_slot_term(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev);
 }
 KBASE_EXPORT_TEST_API(kbase_job_slot_term)
 
+/**
+ * Soft-stop the specified job slot, with extra information about the stop
+ *
+ * The job slot lock must be held when calling this function.
+ * The job slot must not already be in the process of being soft-stopped.
+ *
+ * Where possible any job in the next register is evicted before the soft-stop.
+ *
+ * @param kbdev         The kbase device
+ * @param js            The job slot to soft-stop
+ * @param target_katom  The job that should be soft-stopped (or NULL for any job)
+ * @param sw_flags      Flags to pass in about the soft-stop
+ */
+void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+                       struct kbase_jd_atom *target_katom, u32 sw_flags)
+{
+       KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK));
+       kbasep_job_slot_soft_or_hard_stop(kbdev, NULL, js, target_katom,
+                       JS_COMMAND_SOFT_STOP | sw_flags);
+}
+
 /**
  * Soft-stop the specified job slot
  *
@@ -1004,9 +1052,9 @@ KBASE_EXPORT_TEST_API(kbase_job_slot_term)
  * @param js            The job slot to soft-stop
  * @param target_katom  The job that should be soft-stopped (or NULL for any job)
  */
-void kbase_job_slot_softstop(kbase_device *kbdev, int js, kbase_jd_atom *target_katom)
+void kbase_job_slot_softstop(struct kbase_device *kbdev, int js, struct kbase_jd_atom *target_katom)
 {
-       kbasep_job_slot_soft_or_hard_stop(kbdev, NULL, js, target_katom, JSn_COMMAND_SOFT_STOP);
+       kbase_job_slot_softstop_swflags(kbdev, js, target_katom, 0u);
 }
 
 /**
@@ -1020,13 +1068,14 @@ void kbase_job_slot_softstop(kbase_device *kbdev, int js, kbase_jd_atom *target_
  * @param target_katom The job that should be hard-stopped (or NULL for all
  *                     jobs from the context)
  */
-void kbase_job_slot_hardstop(kbase_context *kctx, int js,
-                               kbase_jd_atom *target_katom)
+void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
+                               struct kbase_jd_atom *target_katom)
 {
-       kbase_device *kbdev = kctx->kbdev;
+       struct kbase_device *kbdev = kctx->kbdev;
 
        kbasep_job_slot_soft_or_hard_stop(kbdev, kctx, js, target_katom,
-                                               JSn_COMMAND_HARD_STOP);
+                                               JS_COMMAND_HARD_STOP);
+#if KBASE_GPU_RESET_EN
        if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_8401) ||
                kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_9510) ||
                (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_T76X_3542) &&
@@ -1043,10 +1092,64 @@ void kbase_job_slot_hardstop(kbase_context *kctx, int js,
                        kbase_reset_gpu_locked(kbdev);
                }
        }
+#endif
 }
 
+/**
+ * For a certain soft/hard-stop action, work out whether to enter disjoint
+ * state.
+ *
+ * This does not register multiple disjoint events if the atom has already
+ * started a disjoint period
+ *
+ * core_reqs can be supplied as 0 if the atom had not started on the hardware
+ * (and so a 'real' soft/hard-stop was not required, but it still interrupted
+ * flow, perhaps on another context)
+ *
+ * kbase_job_check_leave_disjoint() should be used to end the disjoint
+ * state when the soft/hard-stop action is complete
+ */
+void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
+               u16 core_reqs, struct kbase_jd_atom *target_katom)
+{
+       u32 hw_action = action & JS_COMMAND_MASK;
+
+       /* For hard-stop, don't enter if hard-stop not allowed */
+       if (hw_action == JS_COMMAND_HARD_STOP &&
+                       !kbasep_hard_stop_allowed(kbdev, core_reqs))
+               return;
+
+       /* For soft-stop, don't enter if soft-stop not allowed, or isn't
+        * causing disjoint */
+       if (hw_action == JS_COMMAND_SOFT_STOP &&
+                       !(kbasep_soft_stop_allowed(kbdev, core_reqs) &&
+                         (action & JS_COMMAND_SW_CAUSES_DISJOINT)))
+               return;
+
+       /* Nothing to do if already logged disjoint state on this atom */
+       if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT)
+               return;
 
-void kbase_debug_dump_registers(kbase_device *kbdev)
+       target_katom->atom_flags |= KBASE_KATOM_FLAG_IN_DISJOINT;
+       kbase_disjoint_state_up(kbdev);
+}
+
+/**
+ * Work out whether to leave disjoint state when finishing an atom that was
+ * originated by kbase_job_check_enter_disjoint().
+ */
+void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+               struct kbase_jd_atom *target_katom)
+{
+       if (target_katom->atom_flags & KBASE_KATOM_FLAG_IN_DISJOINT) {
+               target_katom->atom_flags &= ~KBASE_KATOM_FLAG_IN_DISJOINT;
+               kbase_disjoint_state_down(kbdev);
+       }
+}
+
+
+#if KBASE_GPU_RESET_EN
+static void kbase_debug_dump_registers(struct kbase_device *kbdev)
 {
        int i;
        dev_err(kbdev->dev, "Register state:");
@@ -1059,9 +1162,9 @@ void kbase_debug_dump_registers(kbase_device *kbdev)
                kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_THROTTLE), NULL));
        for (i = 0; i < 3; i++) {
                dev_err(kbdev->dev, "  JS%d_STATUS=0x%08x      JS%d_HEAD_LO=0x%08x",
-                       i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_STATUS),
+                       i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS),
                                        NULL),
-                       i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JSn_HEAD_LO),
+                       i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO),
                                        NULL));
        }
        dev_err(kbdev->dev, "  MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x",
@@ -1082,16 +1185,16 @@ void kbase_debug_dump_registers(kbase_device *kbdev)
 void kbasep_reset_timeout_worker(struct work_struct *data)
 {
        unsigned long flags;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        int i;
        ktime_t end_timestamp = ktime_get();
-       kbasep_js_device_data *js_devdata;
-       kbase_uk_hwcnt_setup hwcnt_setup = { {0} };
-       kbase_instr_state bckp_state;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbase_uk_hwcnt_setup hwcnt_setup = { {0} };
+       enum kbase_instr_state bckp_state;
 
        KBASE_DEBUG_ASSERT(data);
 
-       kbdev = container_of(data, kbase_device, reset_work);
+       kbdev = container_of(data, struct kbase_device, reset_work);
 
        KBASE_DEBUG_ASSERT(kbdev);
        js_devdata = &kbdev->js_data;
@@ -1106,6 +1209,7 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
                /* This would re-activate the GPU. Since it's already idle, there's no
                 * need to reset it */
                atomic_set(&kbdev->reset_gpu, KBASE_RESET_GPU_NOT_PENDING);
+               kbase_disjoint_state_down(kbdev);
                wake_up(&kbdev->reset_wait);
                return;
        }
@@ -1129,7 +1233,7 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
        }
        /* Save the HW counters setup */
        if (kbdev->hwcnt.kctx != NULL) {
-               kbase_context *kctx = kbdev->hwcnt.kctx;
+               struct kbase_context *kctx = kbdev->hwcnt.kctx;
                hwcnt_setup.dump_buffer = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO), kctx) & 0xffffffff;
                hwcnt_setup.dump_buffer |= (mali_addr64) kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI), kctx) << 32;
                hwcnt_setup.jm_bm = kbase_reg_read(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN), kctx);
@@ -1160,7 +1264,7 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
        spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
        /* Restore the HW counters setup */
        if (kbdev->hwcnt.kctx != NULL) {
-               kbase_context *kctx = kbdev->hwcnt.kctx;
+               struct kbase_context *kctx = kbdev->hwcnt.kctx;
                kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), (kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT) | PRFCNT_CONFIG_MODE_OFF, kctx);
                kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),     hwcnt_setup.dump_buffer & 0xFFFFFFFF, kctx);
                kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),     hwcnt_setup.dump_buffer >> 32,        kctx);
@@ -1182,7 +1286,7 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
                        kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), hwcnt_setup.tiler_bm, kctx);
        }
        kbdev->hwcnt.state = bckp_state;
-       switch(kbdev->hwcnt.state) {
+       switch (kbdev->hwcnt.state) {
        /* Cases for waking kbasep_cache_clean_worker worker */
        case KBASE_INSTR_STATE_CLEANED:
                /* Cache-clean IRQ occurred, but we reset:
@@ -1229,7 +1333,7 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
        spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
        for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
                int nr_done;
-               kbase_jm_slot *slot = &kbdev->jm_slots[i];
+               struct kbase_jm_slot *slot = &kbdev->jm_slots[i];
 
                nr_done = kbasep_jm_nr_jobs_submitted(slot);
                while (nr_done) {
@@ -1245,7 +1349,7 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
        /* Reprogram the GPU's MMU */
        for (i = 0; i < BASE_MAX_NR_AS; i++) {
                if (js_devdata->runpool_irq.per_as_data[i].kctx) {
-                       kbase_as *as = &kbdev->as[i];
+                       struct kbase_as *as = &kbdev->as[i];
                        mutex_lock(&as->transaction_mutex);
                        kbase_mmu_update(js_devdata->runpool_irq.per_as_data[i].kctx);
                        mutex_unlock(&as->transaction_mutex);
@@ -1253,6 +1357,8 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
        }
 
        atomic_set(&kbdev->reset_gpu, KBASE_RESET_GPU_NOT_PENDING);
+
+       kbase_disjoint_state_down(kbdev);
        wake_up(&kbdev->reset_wait);
        dev_err(kbdev->dev, "Reset complete");
 
@@ -1271,6 +1377,7 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
                kbasep_js_try_run_next_job_nolock(kbdev);
                spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
        }
+
        mutex_unlock(&js_devdata->runpool_mutex);
        mutex_unlock(&kbdev->pm.lock);
 
@@ -1280,7 +1387,7 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
 
 enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer)
 {
-       kbase_device *kbdev = container_of(timer, kbase_device, reset_timer);
+       struct kbase_device *kbdev = container_of(timer, struct kbase_device, reset_timer);
 
        KBASE_DEBUG_ASSERT(kbdev);
 
@@ -1296,7 +1403,7 @@ enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer)
  * immediately instead of waiting for the timeout to elapse
  */
 
-static void kbasep_try_reset_gpu_early_locked(kbase_device *kbdev)
+static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev)
 {
        int i;
        int pending_jobs = 0;
@@ -1305,7 +1412,7 @@ static void kbasep_try_reset_gpu_early_locked(kbase_device *kbdev)
 
        /* Count the number of jobs */
        for (i = 0; i < kbdev->gpu_props.num_job_slots; i++) {
-               kbase_jm_slot *slot = &kbdev->jm_slots[i];
+               struct kbase_jm_slot *slot = &kbdev->jm_slots[i];
                pending_jobs += kbasep_jm_nr_jobs_submitted(slot);
        }
 
@@ -1323,10 +1430,10 @@ static void kbasep_try_reset_gpu_early_locked(kbase_device *kbdev)
        queue_work(kbdev->reset_workq, &kbdev->reset_work);
 }
 
-static void kbasep_try_reset_gpu_early(kbase_device *kbdev)
+static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        js_devdata = &kbdev->js_data;
        spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
@@ -1344,7 +1451,7 @@ static void kbasep_try_reset_gpu_early(kbase_device *kbdev)
  *
  * @return See description
  */
-mali_bool kbase_prepare_to_reset_gpu_locked(kbase_device *kbdev)
+mali_bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev)
 {
        int i;
 
@@ -1355,17 +1462,19 @@ mali_bool kbase_prepare_to_reset_gpu_locked(kbase_device *kbdev)
                return MALI_FALSE;
        }
 
+       kbase_disjoint_state_up(kbdev);
+
        for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
                kbase_job_slot_softstop(kbdev, i, NULL);
 
        return MALI_TRUE;
 }
 
-mali_bool kbase_prepare_to_reset_gpu(kbase_device *kbdev)
+mali_bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev)
 {
        unsigned long flags;
        mali_bool ret;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        js_devdata = &kbdev->js_data;
        spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
@@ -1383,7 +1492,7 @@ KBASE_EXPORT_TEST_API(kbase_prepare_to_reset_gpu)
  * After this function is called (or not called if kbase_prepare_to_reset_gpu returned MALI_FALSE),
  * the caller should wait for kbdev->reset_waitq to be signalled to know when the reset has completed.
  */
-void kbase_reset_gpu(kbase_device *kbdev)
+void kbase_reset_gpu(struct kbase_device *kbdev)
 {
        u32 timeout_ms;
 
@@ -1402,7 +1511,7 @@ void kbase_reset_gpu(kbase_device *kbdev)
 }
 KBASE_EXPORT_TEST_API(kbase_reset_gpu)
 
-void kbase_reset_gpu_locked(kbase_device *kbdev)
+void kbase_reset_gpu_locked(struct kbase_device *kbdev)
 {
        u32 timeout_ms;
 
@@ -1419,3 +1528,4 @@ void kbase_reset_gpu_locked(kbase_device *kbdev)
        /* Try resetting early */
        kbasep_try_reset_gpu_early_locked(kbdev);
 }
+#endif /* KBASE_GPU_RESET_EN */
index bd2b70df009b1a467528842405f0d8d8e6076894..d171833813113f4da7b78a8e388b98ec9f762245 100755 (executable)
  *
  */
 
-static INLINE int kbasep_jm_is_js_free(kbase_device *kbdev, int js, kbase_context *kctx)
+static INLINE int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js, struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(0 <= js && js < kbdev->gpu_props.num_job_slots);
 
-       return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), kctx);
+       return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), kctx);
 }
 
 /**
  * This checks that:
- * - there is enough space in the GPU's buffers (JSn_NEXT and JSn_HEAD registers) to accomodate the job.
+ * - there is enough space in the GPU's buffers (JS_NEXT and JS_HEAD registers) to accomodate the job.
  * - there is enough space to track the job in a our Submit Slots. Note that we have to maintain space to
  *   requeue one job in case the next registers on the hardware need to be cleared.
  */
-static INLINE mali_bool kbasep_jm_is_submit_slots_free(kbase_device *kbdev, int js, kbase_context *kctx)
+static INLINE mali_bool kbasep_jm_is_submit_slots_free(struct kbase_device *kbdev, int js, struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(0 <= js && js < kbdev->gpu_props.num_job_slots);
@@ -76,7 +76,7 @@ static INLINE mali_bool kbasep_jm_is_submit_slots_free(kbase_device *kbdev, int
 /**
  * Initialize a submit slot
  */
-static INLINE void kbasep_jm_init_submit_slot(kbase_jm_slot *slot)
+static INLINE void kbasep_jm_init_submit_slot(struct kbase_jm_slot *slot)
 {
        slot->submitted_nr = 0;
        slot->submitted_head = 0;
@@ -85,10 +85,10 @@ static INLINE void kbasep_jm_init_submit_slot(kbase_jm_slot *slot)
 /**
  * Find the atom at the idx'th element in the queue without removing it, starting at the head with idx==0.
  */
-static INLINE kbase_jd_atom *kbasep_jm_peek_idx_submit_slot(kbase_jm_slot *slot, u8 idx)
+static INLINE struct kbase_jd_atom *kbasep_jm_peek_idx_submit_slot(struct kbase_jm_slot *slot, u8 idx)
 {
        u8 pos;
-       kbase_jd_atom *katom;
+       struct kbase_jd_atom *katom;
 
        KBASE_DEBUG_ASSERT(idx < BASE_JM_SUBMIT_SLOTS);
 
@@ -101,10 +101,10 @@ static INLINE kbase_jd_atom *kbasep_jm_peek_idx_submit_slot(kbase_jm_slot *slot,
 /**
  * Pop front of the submitted
  */
-static INLINE kbase_jd_atom *kbasep_jm_dequeue_submit_slot(kbase_jm_slot *slot)
+static INLINE struct kbase_jd_atom *kbasep_jm_dequeue_submit_slot(struct kbase_jm_slot *slot)
 {
        u8 pos;
-       kbase_jd_atom *katom;
+       struct kbase_jd_atom *katom;
 
        pos = slot->submitted_head & BASE_JM_SUBMIT_SLOTS_MASK;
        katom = slot->submitted[pos];
@@ -122,7 +122,7 @@ static INLINE kbase_jd_atom *kbasep_jm_dequeue_submit_slot(kbase_jm_slot *slot)
 
 /* Pop back of the submitted queue (unsubmit a job)
  */
-static INLINE kbase_jd_atom *kbasep_jm_dequeue_tail_submit_slot(kbase_jm_slot *slot)
+static INLINE struct kbase_jd_atom *kbasep_jm_dequeue_tail_submit_slot(struct kbase_jm_slot *slot)
 {
        u8 pos;
 
@@ -133,7 +133,7 @@ static INLINE kbase_jd_atom *kbasep_jm_dequeue_tail_submit_slot(kbase_jm_slot *s
        return slot->submitted[pos];
 }
 
-static INLINE u8 kbasep_jm_nr_jobs_submitted(kbase_jm_slot *slot)
+static INLINE u8 kbasep_jm_nr_jobs_submitted(struct kbase_jm_slot *slot)
 {
        return slot->submitted_nr;
 }
@@ -141,7 +141,7 @@ static INLINE u8 kbasep_jm_nr_jobs_submitted(kbase_jm_slot *slot)
 /**
  * Push back of the submitted
  */
-static INLINE void kbasep_jm_enqueue_submit_slot(kbase_jm_slot *slot, kbase_jd_atom *katom)
+static INLINE void kbasep_jm_enqueue_submit_slot(struct kbase_jm_slot *slot, struct kbase_jd_atom *katom)
 {
        u8 nr;
        u8 pos;
@@ -170,7 +170,7 @@ static INLINE void kbasep_jm_enqueue_submit_slot(kbase_jm_slot *slot, kbase_jd_a
  *            attempt to use it.
  * @return    MALI_FALSE otherwise, and \a atom is safe to use.
  */
-static INLINE mali_bool kbasep_jm_is_dummy_workaround_job(kbase_device *kbdev, kbase_jd_atom *atom)
+static INLINE mali_bool kbasep_jm_is_dummy_workaround_job(struct kbase_device *kbdev, struct kbase_jd_atom *atom)
 {
        /* Query the set of workaround jobs here */
        /* none exists today */
@@ -185,12 +185,12 @@ static INLINE mali_bool kbasep_jm_is_dummy_workaround_job(kbase_device *kbdev, k
  * The following locking conditions are made on the caller:
  * - it must hold the kbasep_js_device_data::runpoool_irq::lock
  */
-void kbase_job_submit_nolock(kbase_device *kbdev, kbase_jd_atom *katom, int js);
+void kbase_job_submit_nolock(struct kbase_device *kbdev, struct kbase_jd_atom *katom, int js);
 
 /**
  * @brief Complete the head job on a particular job-slot
  */
-void kbase_job_done_slot(kbase_device *kbdev, int s, u32 completion_code, u64 job_tail, ktime_t *end_timestamp);
+void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code, u64 job_tail, ktime_t *end_timestamp);
 
          /** @} *//* end group kbase_jm */
          /** @} *//* end group base_kbase_api */
index aeeea6893afecee0bf9b6a290263227a3c71acfb..0e9071eaf4970ac9d952762fa559d975b043cc60 100755 (executable)
@@ -28,6 +28,7 @@
 
 #include "mali_kbase_jm.h"
 #include <mali_kbase_defs.h>
+#include <mali_kbase_config_defaults.h>
 
 /*
  * Private types
@@ -45,18 +46,18 @@ typedef u32 kbasep_js_release_result;
 /*
  * Private function prototypes
  */
-STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom);
+STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
 
-STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom);
+STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
 
-STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state);
+STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
 
 /** Helper for trace subcodes */
-#if KBASE_TRACE_ENABLE != 0
-STATIC int kbasep_js_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
+#if KBASE_TRACE_ENABLE
+STATIC int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        int as_nr;
        int refcnt = 0;
 
@@ -65,7 +66,8 @@ STATIC int kbasep_js_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
        spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
        as_nr = kctx->as_nr;
        if (as_nr != KBASEP_AS_NR_INVALID) {
-               kbasep_js_per_as_data *js_per_as_data;
+               struct kbasep_js_per_as_data *js_per_as_data;
+
                js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
 
                refcnt = js_per_as_data->as_busy_refcount;
@@ -74,14 +76,14 @@ STATIC int kbasep_js_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
 
        return refcnt;
 }
-#else                          /* KBASE_TRACE_ENABLE != 0 */
-STATIC int kbasep_js_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
+#else                          /* KBASE_TRACE_ENABLE  */
+STATIC int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        CSTD_UNUSED(kbdev);
        CSTD_UNUSED(kctx);
        return 0;
 }
-#endif                         /* KBASE_TRACE_ENABLE != 0 */
+#endif                         /* KBASE_TRACE_ENABLE  */
 
 /*
  * Private types
@@ -113,7 +115,7 @@ enum {
  * This function does not sleep.
  */
 
-STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom)
+STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(katom != NULL);
@@ -128,16 +130,18 @@ STATIC INLINE void kbasep_js_deref_permon_check_and_disable_cycle_counter(kbase_
  *
  * No locks need to be held - locking is handled further down
  *
+ * The L2 Cache must be ON when this function is called
+ *
  * This function does not sleep.
  */
 
-STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(kbase_device *kbdev, kbase_jd_atom *katom)
+STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(katom != NULL);
 
        if (katom->core_req & BASE_JD_REQ_PERMON)
-               kbase_pm_request_gpu_cycle_counter(kbdev);
+               kbase_pm_request_gpu_cycle_counter_l2_is_on(kbdev);
 }
 
 /*
@@ -145,10 +149,11 @@ STATIC INLINE void kbasep_js_ref_permon_check_and_enable_cycle_counter(kbase_dev
  * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
  * - The caller must hold the kbasep_js_device_data::runpool_mutex
  */
-STATIC INLINE void runpool_inc_context_count(kbase_device *kbdev, kbase_context *kctx)
+STATIC INLINE void runpool_inc_context_count(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
+
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
 
@@ -174,10 +179,11 @@ STATIC INLINE void runpool_inc_context_count(kbase_device *kbdev, kbase_context
  * - The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
  * - The caller must hold the kbasep_js_device_data::runpool_mutex
  */
-STATIC INLINE void runpool_dec_context_count(kbase_device *kbdev, kbase_context *kctx)
+STATIC INLINE void runpool_dec_context_count(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
+
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
 
@@ -213,10 +219,11 @@ STATIC INLINE void runpool_dec_context_count(kbase_device *kbdev, kbase_context
  * - When kctx != NULL the caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
  * - When kctx == NULL, then the caller need not hold any jsctx_mutex locks (but it doesn't do any harm to do so).
  */
-STATIC mali_bool check_is_runpool_full(kbase_device *kbdev, kbase_context *kctx)
+STATIC mali_bool check_is_runpool_full(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool is_runpool_full;
+
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
        js_devdata = &kbdev->js_data;
@@ -244,19 +251,19 @@ STATIC base_jd_core_req core_reqs_from_jsn_features(u16 features) /* JS<n>_FEATU
 {
        base_jd_core_req core_req = 0u;
 
-       if ((features & JSn_FEATURE_SET_VALUE_JOB) != 0)
+       if ((features & JS_FEATURE_SET_VALUE_JOB) != 0)
                core_req |= BASE_JD_REQ_V;
 
-       if ((features & JSn_FEATURE_CACHE_FLUSH_JOB) != 0)
+       if ((features & JS_FEATURE_CACHE_FLUSH_JOB) != 0)
                core_req |= BASE_JD_REQ_CF;
 
-       if ((features & JSn_FEATURE_COMPUTE_JOB) != 0)
+       if ((features & JS_FEATURE_COMPUTE_JOB) != 0)
                core_req |= BASE_JD_REQ_CS;
 
-       if ((features & JSn_FEATURE_TILER_JOB) != 0)
+       if ((features & JS_FEATURE_TILER_JOB) != 0)
                core_req |= BASE_JD_REQ_T;
 
-       if ((features & JSn_FEATURE_FRAGMENT_JOB) != 0)
+       if ((features & JS_FEATURE_FRAGMENT_JOB) != 0)
                core_req |= BASE_JD_REQ_FS;
 
        return core_req;
@@ -273,13 +280,14 @@ STATIC base_jd_core_req core_reqs_from_jsn_features(u16 features) /* JS<n>_FEATU
  * The following locking conditions are made on the caller:
  * - it must hold kbasep_js_device_data::runpool_mutex
  *
- * @return a non-NULL pointer to a kbase_as that is not in use by any other context
+ * @return a non-NULL pointer to a struct kbase_as that is not in use by any other context
  */
-STATIC kbase_as *pick_free_addr_space(kbase_device *kbdev)
+STATIC struct kbase_as *pick_free_addr_space(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
-       kbase_as *current_as;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbase_as *current_as;
        long ffs_result;
+
        js_devdata = &kbdev->js_data;
 
        lockdep_assert_held(&js_devdata->runpool_mutex);
@@ -303,9 +311,9 @@ STATIC kbase_as *pick_free_addr_space(kbase_device *kbdev)
  * The following locking conditions are made on the caller:
  * - it must hold kbasep_js_device_data::runpool_mutex
  */
-STATIC INLINE void release_addr_space(kbase_device *kbdev, int kctx_as_nr)
+STATIC INLINE void release_addr_space(struct kbase_device *kbdev, int kctx_as_nr)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        u16 as_bit = (1u << kctx_as_nr);
 
        js_devdata = &kbdev->js_data;
@@ -331,10 +339,10 @@ STATIC INLINE void release_addr_space(kbase_device *kbdev, int kctx_as_nr)
  * - Caller must hold AS transaction mutex
  * - Caller must hold Runpool IRQ lock
  */
-STATIC void assign_and_activate_kctx_addr_space(kbase_device *kbdev, kbase_context *kctx, kbase_as *current_as)
+STATIC void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_as *current_as)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_per_as_data *js_per_as_data;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_per_as_data *js_per_as_data;
        int as_nr;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -371,12 +379,11 @@ STATIC void assign_and_activate_kctx_addr_space(kbase_device *kbdev, kbase_conte
 
        /* Lastly, add the context to the policy's runpool - this really allows it to run jobs */
        kbasep_js_policy_runpool_add_ctx(&js_devdata->policy, kctx);
-
 }
 
-void kbasep_js_try_run_next_job_nolock(kbase_device *kbdev)
+void kbasep_js_try_run_next_job_nolock(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        int js;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -395,12 +402,13 @@ void kbasep_js_try_run_next_job_nolock(kbase_device *kbdev)
 }
 
 /** Hold the kbasep_js_device_data::runpool_irq::lock for this */
-mali_bool kbasep_js_runpool_retain_ctx_nolock(kbase_device *kbdev, kbase_context *kctx)
+mali_bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_per_as_data *js_per_as_data;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_per_as_data *js_per_as_data;
        mali_bool result = MALI_FALSE;
        int as_nr;
+
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
        js_devdata = &kbdev->js_data;
@@ -425,9 +433,9 @@ mali_bool kbasep_js_runpool_retain_ctx_nolock(kbase_device *kbdev, kbase_context
 /*
  * Functions private to KBase ('Protected' functions)
  */
-void kbase_js_try_run_jobs(kbase_device *kbdev)
+void kbase_js_try_run_jobs(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        unsigned long flags;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -445,10 +453,10 @@ void kbase_js_try_run_jobs(kbase_device *kbdev)
        mutex_unlock(&js_devdata->runpool_mutex);
 }
 
-void kbase_js_try_run_jobs_on_slot(kbase_device *kbdev, int js)
+void kbase_js_try_run_jobs_on_slot(struct kbase_device *kbdev, int js)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        js_devdata = &kbdev->js_data;
@@ -465,9 +473,9 @@ void kbase_js_try_run_jobs_on_slot(kbase_device *kbdev, int js)
        mutex_unlock(&js_devdata->runpool_mutex);
 }
 
-mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
+mali_error kbasep_js_devdata_init(struct kbase_device * const kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        mali_error err;
        int i;
        u16 as_present;
@@ -483,7 +491,8 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
        kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
                mali_bool use_workaround_for_security;
-               use_workaround_for_security = (mali_bool) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE);
+
+               use_workaround_for_security = DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE;
                if (use_workaround_for_security != MALI_FALSE) {
                        dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
                        kbdev->nr_user_address_spaces = 1;
@@ -514,8 +523,8 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
        js_devdata->gpu_reset_ticks_cl = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL);
        js_devdata->gpu_reset_ticks_nss = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS);
        js_devdata->ctx_timeslice_ns = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS);
-       js_devdata->cfs_ctx_runtime_init_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES);
-       js_devdata->cfs_ctx_runtime_min_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES);
+       js_devdata->cfs_ctx_runtime_init_slices = DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES;
+       js_devdata->cfs_ctx_runtime_min_slices = DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES;
 
        dev_dbg(kbdev->dev, "JS Config Attribs: ");
        dev_dbg(kbdev->dev, "\tscheduling_tick_ns:%u", js_devdata->scheduling_tick_ns);
@@ -531,13 +540,13 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
        dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u", js_devdata->cfs_ctx_runtime_init_slices);
        dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u", js_devdata->cfs_ctx_runtime_min_slices);
 
-#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS
        dev_dbg(kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns);
 #endif
-#if KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
+#if KBASE_DISABLE_SCHEDULING_HARD_STOPS
        dev_dbg(kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%u at %uns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns);
 #endif
-#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 && KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
+#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS && KBASE_DISABLE_SCHEDULING_HARD_STOPS
        dev_dbg(kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
 #endif
 
@@ -545,6 +554,7 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
        {
                int irq_throttle_time_us = kbdev->gpu_props.irq_throttle_time_us;
                int irq_throttle_cycles = kbasep_js_convert_us_to_gpu_ticks_max_freq(kbdev, irq_throttle_time_us);
+
                atomic_set(&kbdev->irq_throttle_cycles, irq_throttle_cycles);
        }
 
@@ -575,14 +585,14 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
        return MALI_ERROR_NONE;
 }
 
-void kbasep_js_devdata_halt(kbase_device *kbdev)
+void kbasep_js_devdata_halt(struct kbase_device *kbdev)
 {
        CSTD_UNUSED(kbdev);
 }
 
-void kbasep_js_devdata_term(kbase_device *kbdev)
+void kbasep_js_devdata_term(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -601,10 +611,10 @@ void kbasep_js_devdata_term(kbase_device *kbdev)
        js_devdata->init_status = JS_DEVDATA_INIT_NONE;
 }
 
-mali_error kbasep_js_kctx_init(kbase_context * const kctx)
+mali_error kbasep_js_kctx_init(struct kbase_context * const kctx)
 {
-       kbase_device *kbdev;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbase_device *kbdev;
+       struct kbasep_js_kctx_info *js_kctx_info;
        mali_error err;
 
        KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -643,11 +653,11 @@ mali_error kbasep_js_kctx_init(kbase_context * const kctx)
        return MALI_ERROR_NONE;
 }
 
-void kbasep_js_kctx_term(kbase_context *kctx)
+void kbasep_js_kctx_term(struct kbase_context *kctx)
 {
-       kbase_device *kbdev;
-       kbasep_js_kctx_info *js_kctx_info;
-       kbasep_js_policy *js_policy;
+       struct kbase_device *kbdev;
+       struct kbasep_js_kctx_info *js_kctx_info;
+       union kbasep_js_policy *js_policy;
 
        KBASE_DEBUG_ASSERT(kctx != NULL);
 
@@ -675,11 +685,11 @@ void kbasep_js_kctx_term(kbase_context *kctx)
  * - kbasep_js_kctx_info::ctx::jsctx_mutex
  * - kbasep_js_device_data::runpool_mutex
  */
-STATIC void kbasep_js_runpool_evict_next_jobs(kbase_device *kbdev, kbase_context *kctx)
+STATIC void kbasep_js_runpool_evict_next_jobs(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        unsigned long flags;
        int js;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        js_devdata = &kbdev->js_data;
 
@@ -695,10 +705,10 @@ STATIC void kbasep_js_runpool_evict_next_jobs(kbase_device *kbdev, kbase_context
 
        /* Evict jobs from the NEXT registers */
        for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
-               kbase_jm_slot *slot;
-               kbase_jd_atom *tail;
+               struct kbase_jm_slot *slot;
+               struct kbase_jd_atom *tail;
 
-               if (!kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), NULL)) {
+               if (!kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), NULL)) {
                        /* No job in the NEXT register */
                        continue;
                }
@@ -708,17 +718,18 @@ STATIC void kbasep_js_runpool_evict_next_jobs(kbase_device *kbdev, kbase_context
 
                KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, 1);
                /* Clearing job from next registers */
-               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_COMMAND_NEXT), JSn_COMMAND_NOP, NULL);
+               kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), JS_COMMAND_NOP, NULL);
 
                /* Check to see if we did remove a job from the next registers */
-               if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), NULL) != 0) {
+               if (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL) != 0 || kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL) != 0) {
                        /* The job was successfully cleared from the next registers, requeue it */
-                       kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
+                       struct kbase_jd_atom *dequeued_katom = kbasep_jm_dequeue_tail_submit_slot(slot);
+
                        KBASE_DEBUG_ASSERT(dequeued_katom == tail);
 
                        /* Set the next registers to NULL */
-                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_LO), 0, NULL);
-                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JSn_HEAD_NEXT_HI), 0, NULL);
+                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), 0, NULL);
+                       kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), 0, NULL);
 
                        KBASE_TRACE_ADD_SLOT(kbdev, JM_SLOT_EVICT, dequeued_katom->kctx, dequeued_katom, dequeued_katom->jc, js);
 
@@ -750,15 +761,15 @@ STATIC void kbasep_js_runpool_evict_next_jobs(kbase_device *kbdev, kbase_context
  * - it must \em not hold kbasep_jd_device_data::queue_mutex (again, it's used
  * internally).
  */
-STATIC void kbasep_js_runpool_attempt_fast_start_ctx(kbase_device *kbdev, kbase_context *kctx_new)
+STATIC void kbasep_js_runpool_attempt_fast_start_ctx(struct kbase_device *kbdev, struct kbase_context *kctx_new)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_new;
-       kbasep_js_policy *js_policy;
-       kbasep_js_per_as_data *js_per_as_data;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_new;
+       union kbasep_js_policy *js_policy;
+       struct kbasep_js_per_as_data *js_per_as_data;
        int evict_as_nr;
-       kbasep_js_atom_retained_state katom_retained_state;
+       struct kbasep_js_atom_retained_state katom_retained_state;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -780,12 +791,14 @@ STATIC void kbasep_js_runpool_attempt_fast_start_ctx(kbase_device *kbdev, kbase_
 
        /* If the runpool is full and either there is no specified context or the specified context is not dying, then
           attempt to fast start the specified context or evict the first context with no running jobs. */
-       if (check_is_runpool_full(kbdev, kctx_new) && 
-            (!js_kctx_new || (js_kctx_new && !js_kctx_new->ctx.is_dying))) {
+       if (check_is_runpool_full(kbdev, kctx_new) &&
+                       (!js_kctx_new || (js_kctx_new &&
+                       !js_kctx_new->ctx.is_dying))) {
                /* No free address spaces - attempt to evict non-running lower priority context */
                spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
                for (evict_as_nr = 0; evict_as_nr < kbdev->nr_hw_address_spaces; evict_as_nr++) {
-                       kbase_context *kctx_evict;
+                       struct kbase_context *kctx_evict;
+
                        js_per_as_data = &js_devdata->runpool_irq.per_as_data[evict_as_nr];
                        kctx_evict = js_per_as_data->kctx;
 
@@ -798,6 +811,7 @@ STATIC void kbasep_js_runpool_attempt_fast_start_ctx(kbase_device *kbdev, kbase_
                                if ((kctx_new == NULL) || kbasep_js_policy_ctx_has_priority(js_policy, kctx_evict, kctx_new)) {
                                        mali_bool retain_result;
                                        kbasep_js_release_result release_result;
+
                                        KBASE_TRACE_ADD(kbdev, JS_FAST_START_EVICTS_CTX, kctx_evict, NULL, 0u, (uintptr_t)kctx_new);
 
                                        /* Retain the ctx to work on it - this shouldn't be able to fail */
@@ -846,13 +860,13 @@ STATIC void kbasep_js_runpool_attempt_fast_start_ctx(kbase_device *kbdev, kbase_
                mutex_unlock(&js_kctx_new->ctx.jsctx_mutex);
 }
 
-mali_bool kbasep_js_add_job(kbase_context *kctx, kbase_jd_atom *atom)
+mali_bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom)
 {
        unsigned long flags;
-       kbasep_js_kctx_info *js_kctx_info;
-       kbase_device *kbdev;
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_policy *js_policy;
+       struct kbasep_js_kctx_info *js_kctx_info;
+       struct kbase_device *kbdev;
+       struct kbasep_js_device_data *js_devdata;
+       union kbasep_js_policy *js_policy;
 
        mali_bool policy_queue_updated = MALI_FALSE;
 
@@ -941,11 +955,11 @@ mali_bool kbasep_js_add_job(kbase_context *kctx, kbase_jd_atom *atom)
        return policy_queue_updated;
 }
 
-void kbasep_js_remove_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *atom)
+void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom)
 {
-       kbasep_js_kctx_info *js_kctx_info;
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_policy *js_policy;
+       struct kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       union kbasep_js_policy *js_policy;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -965,11 +979,11 @@ void kbasep_js_remove_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_ato
        kbasep_js_policy_deregister_job(js_policy, kctx, atom);
 }
 
-void kbasep_js_remove_cancelled_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom)
+void kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
        unsigned long flags;
-       kbasep_js_atom_retained_state katom_retained_state;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_atom_retained_state katom_retained_state;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool attr_state_changed;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -1000,11 +1014,12 @@ void kbasep_js_remove_cancelled_job(kbase_device *kbdev, kbase_context *kctx, kb
        }
 }
 
-mali_bool kbasep_js_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx)
+mali_bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool result;
+
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        js_devdata = &kbdev->js_data;
 
@@ -1017,12 +1032,12 @@ mali_bool kbasep_js_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx)
        return result;
 }
 
-kbase_context *kbasep_js_runpool_lookup_ctx(kbase_device *kbdev, int as_nr)
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
-       kbase_context *found_kctx = NULL;
-       kbasep_js_per_as_data *js_per_as_data;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbase_context *found_kctx = NULL;
+       struct kbasep_js_per_as_data *js_per_as_data;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
@@ -1053,9 +1068,9 @@ kbase_context *kbasep_js_runpool_lookup_ctx(kbase_device *kbdev, int as_nr)
  * - Slots were previously blocked due to affinity restrictions
  * - Submission during IRQ handling failed
  */
-STATIC void kbasep_js_run_jobs_after_ctx_and_atom_release(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state, mali_bool runpool_ctx_attr_change)
+STATIC void kbasep_js_run_jobs_after_ctx_and_atom_release(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state, mali_bool runpool_ctx_attr_change)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -1115,18 +1130,18 @@ STATIC void kbasep_js_run_jobs_after_ctx_and_atom_release(kbase_device *kbdev, k
  * - Caller holds js_kctx_info->ctx.jsctx_mutex
  * - Caller holds js_devdata->runpool_mutex
  */
-STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state)
+STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_info;
-       kbasep_js_policy *js_policy;
-       kbasep_js_per_as_data *js_per_as_data;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
+       union kbasep_js_policy *js_policy;
+       struct kbasep_js_per_as_data *js_per_as_data;
 
        kbasep_js_release_result release_result = 0u;
        mali_bool runpool_ctx_attr_change = MALI_FALSE;
        int kctx_as_nr;
-       kbase_as *current_as;
+       struct kbase_as *current_as;
        int new_ref_count;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -1169,8 +1184,8 @@ STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_dev
 
        KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_RELEASE_CTX, kctx, NULL, 0u, new_ref_count);
 
-       if (new_ref_count == 1 && kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED
-               && !kbase_pm_is_suspending(kbdev) ) {
+       if (new_ref_count == 1 && kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED &&
+                       !kbase_pm_is_suspending(kbdev)) {
                /* Context is kept scheduled into an address space even when there are no jobs, in this case we have
                 * to handle the situation where all jobs have been evicted from the GPU and submission is disabled.
                 *
@@ -1254,11 +1269,11 @@ STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_dev
        return release_result;
 }
 
-void kbasep_js_runpool_requeue_or_kill_ctx(kbase_device *kbdev, kbase_context *kctx, mali_bool has_pm_ref)
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, mali_bool has_pm_ref)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_policy *js_policy;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       union kbasep_js_policy *js_policy;
+       struct kbasep_js_kctx_info *js_kctx_info;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -1295,10 +1310,10 @@ void kbasep_js_runpool_requeue_or_kill_ctx(kbase_device *kbdev, kbase_context *k
        }
 }
 
-void kbasep_js_runpool_release_ctx_and_katom_retained_state(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state)
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
        kbasep_js_release_result release_result;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -1328,9 +1343,9 @@ void kbasep_js_runpool_release_ctx_and_katom_retained_state(kbase_device *kbdev,
        }
 }
 
-void kbasep_js_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx)
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_atom_retained_state katom_retained_state;
+       struct kbasep_js_atom_retained_state katom_retained_state;
 
        kbasep_js_atom_retained_state_init_invalid(&katom_retained_state);
 
@@ -1339,13 +1354,13 @@ void kbasep_js_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx)
 
 /** Variant of kbasep_js_runpool_release_ctx() that doesn't call into
  * kbasep_js_try_schedule_head_ctx() */
-STATIC void kbasep_js_runpool_release_ctx_no_schedule(kbase_device *kbdev, kbase_context *kctx)
+STATIC void kbasep_js_runpool_release_ctx_no_schedule(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
        kbasep_js_release_result release_result;
-       kbasep_js_atom_retained_state katom_retained_state_struct;
-       kbasep_js_atom_retained_state *katom_retained_state = &katom_retained_state_struct;
+       struct kbasep_js_atom_retained_state katom_retained_state_struct;
+       struct kbasep_js_atom_retained_state *katom_retained_state = &katom_retained_state_struct;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -1374,7 +1389,7 @@ STATIC void kbasep_js_runpool_release_ctx_no_schedule(kbase_device *kbdev, kbase
  * @brief Handle retaining cores for power management and affinity management,
  * ensuring that cores are powered up and won't violate affinity restrictions.
  *
- * This function enters at the following @ref kbase_atom_coreref_state states:
+ * This function enters at the following @ref enum kbase_atom_coreref_state states:
  *
  * - NO_CORES_REQUESTED,
  * - WAITING_FOR_REQUESTED_CORES,
@@ -1395,7 +1410,7 @@ STATIC void kbasep_js_runpool_release_ctx_no_schedule(kbase_device *kbdev, kbase
  * violate affinity restrictions.
  *
  */
-STATIC mali_bool kbasep_js_job_check_ref_cores(kbase_device *kbdev, int js, kbase_jd_atom *katom)
+STATIC mali_bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev, int js, struct kbase_jd_atom *katom)
 {
        /* The most recently checked affinity. Having this at this scope allows us
         * to guarantee that we've checked the affinity in this function call. */
@@ -1418,7 +1433,7 @@ STATIC mali_bool kbasep_js_job_check_ref_cores(kbase_device *kbdev, int js, kbas
                                /* No cores are currently available */
                                /* *** BREAK OUT: No state transition *** */
                                break;
-                       }               
+                       }
 
                        chosen_affinity = MALI_TRUE;
 
@@ -1434,7 +1449,7 @@ STATIC mali_bool kbasep_js_job_check_ref_cores(kbase_device *kbdev, int js, kbas
 
                case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
                        {
-                               kbase_pm_cores_ready cores_ready;
+                               enum kbase_pm_cores_ready cores_ready;
                                KBASE_DEBUG_ASSERT(katom->affinity != 0 || (katom->core_req & BASE_JD_REQ_T));
 
                                cores_ready = kbase_pm_register_inuse_cores(kbdev, katom->core_req & BASE_JD_REQ_T, katom->affinity);
@@ -1470,13 +1485,13 @@ STATIC mali_bool kbasep_js_job_check_ref_cores(kbase_device *kbdev, int js, kbas
                                        KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_CORE_REF_REQUEST_ON_RECHECK_FAILED, katom->kctx, katom, katom->jc, js, (u32) recently_chosen_affinity);
                                        /* *** BREAK OUT: Transition to lower state *** */
                                        break;
-                               }               
+                               }
                                chosen_affinity = MALI_TRUE;
                        }
 
                        /* Now see if this requires a different set of cores */
                        if (recently_chosen_affinity != katom->affinity) {
-                               kbase_pm_cores_ready cores_ready;
+                               enum kbase_pm_cores_ready cores_ready;
 
                                kbase_pm_request_cores(kbdev, katom->core_req & BASE_JD_REQ_T, recently_chosen_affinity);
 
@@ -1541,7 +1556,7 @@ STATIC mali_bool kbasep_js_job_check_ref_cores(kbase_device *kbdev, int js, kbas
        return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY);
 }
 
-void kbasep_js_job_check_deref_cores(kbase_device *kbdev, struct kbase_jd_atom *katom)
+void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(katom != NULL);
@@ -1588,9 +1603,9 @@ void kbasep_js_job_check_deref_cores(kbase_device *kbdev, struct kbase_jd_atom *
 /*
  * Note: this function is quite similar to kbasep_js_try_run_next_job_on_slot()
  */
-mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int js, s8 *submit_count)
+mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(struct kbase_device *kbdev, int js, s8 *submit_count)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool cores_ready;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -1605,7 +1620,7 @@ mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int
                 * and there are jobs to get that match its requirements (see 'break'
                 * statement below) */
                while (*submit_count < KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ && kbasep_jm_is_submit_slots_free(kbdev, js, NULL) != MALI_FALSE) {
-                       kbase_jd_atom *dequeued_atom;
+                       struct kbase_jd_atom *dequeued_atom;
                        mali_bool has_job = MALI_FALSE;
 
                        /* Dequeue a job that matches the requirements */
@@ -1616,16 +1631,19 @@ mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int
                                 * all address spaces, any context whose busy refcount has reached
                                 * zero won't yet be scheduled out whilst we're trying to run jobs
                                 * from it */
-                               kbase_context *parent_ctx = dequeued_atom->kctx;
+                               struct kbase_context *parent_ctx = dequeued_atom->kctx;
                                mali_bool retain_success;
 
                                /* Retain/power up the cores it needs, check if cores are ready */
                                cores_ready = kbasep_js_job_check_ref_cores(kbdev, js, dequeued_atom);
 
-                               if (cores_ready != MALI_TRUE && dequeued_atom->event_code != BASE_JD_EVENT_PM_EVENT) {
-                                       /* The job can't be submitted until the cores are ready, requeue the job */
+                               if (dequeued_atom->event_code == BASE_JD_EVENT_PM_EVENT || cores_ready != MALI_TRUE) {
+                                       /* The job either can't be submitted until the cores are ready, or
+                                        * the job will fail due to the specified core group being unavailable.
+                                        * To avoid recursion this will be handled outside of IRQ content by
+                                        * kbasep_js_try_run_next_job_on_slot_nolock */
                                        kbasep_js_policy_enqueue_job(&kbdev->js_data.policy, dequeued_atom);
-                                       break;
+                                       return MALI_TRUE;
                                }
 
                                /* ASSERT that the Policy picked a job from an allowed context */
@@ -1643,16 +1661,10 @@ mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int
                                /* Check if this job needs the cycle counter enabled before submission */
                                kbasep_js_ref_permon_check_and_enable_cycle_counter(kbdev, dequeued_atom);
 
-                               if (dequeued_atom->event_code == BASE_JD_EVENT_PM_EVENT) {
-                                       dev_warn(kbdev->dev, "Rejecting atom due to BASE_JD_EVENT_PM_EVENT\n");
-                                       /* The job has failed due to the specified core group being unavailable */
-                                       kbase_jd_done(dequeued_atom, js, NULL, 0);
-                               } else {
-                                       /* Submit the job */
-                                       kbase_job_submit_nolock(kbdev, dequeued_atom, js);
+                               /* Submit the job */
+                               kbase_job_submit_nolock(kbdev, dequeued_atom, js);
 
-                                       ++(*submit_count);
-                               }
+                               ++(*submit_count);
                        } else {
                                /* No more jobs - stop submitting for this slot */
                                break;
@@ -1683,9 +1695,9 @@ mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int
        return (mali_bool) (*submit_count >= KBASE_JS_MAX_JOB_SUBMIT_PER_SLOT_PER_IRQ);
 }
 
-void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js)
+void kbasep_js_try_run_next_job_on_slot_nolock(struct kbase_device *kbdev, int js)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool has_job;
        mali_bool cores_ready;
 
@@ -1704,7 +1716,7 @@ void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js)
                 */
                if (kbase_js_can_run_job_on_slot_no_lock(kbdev, js)) {
                        do {
-                               kbase_jd_atom *dequeued_atom;
+                               struct kbase_jd_atom *dequeued_atom;
 
                                /* Dequeue a job that matches the requirements */
                                has_job = kbasep_js_policy_dequeue_job(kbdev, js, &dequeued_atom);
@@ -1714,7 +1726,7 @@ void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js)
                                         * all address spaces, any context whose busy refcount has reached
                                         * zero won't yet be scheduled out whilst we're trying to run jobs
                                         * from it */
-                                       kbase_context *parent_ctx = dequeued_atom->kctx;
+                                       struct kbase_context *parent_ctx = dequeued_atom->kctx;
                                        mali_bool retain_success;
 
                                        /* Retain/power up the cores it needs, check if cores are ready */
@@ -1755,14 +1767,14 @@ void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js)
        }
 }
 
-void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev)
+void kbasep_js_try_schedule_head_ctx(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool has_kctx;
-       kbase_context *head_kctx;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbase_context *head_kctx;
+       struct kbasep_js_kctx_info *js_kctx_info;
        mali_bool is_runpool_full;
-       kbase_as *new_address_space;
+       struct kbase_as *new_address_space;
        unsigned long flags;
        mali_bool head_kctx_suspended = MALI_FALSE;
        int pm_active_err;
@@ -1823,7 +1835,7 @@ void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev)
 
        KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_TRY_SCHEDULE_HEAD_CTX, head_kctx, NULL, 0u, kbasep_js_trace_get_refcnt(kbdev, head_kctx));
 
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
        if (js_devdata->nr_user_contexts_running == 0) {
                /* Only when there are no other contexts submitting jobs:
                 * Latch in run-time job scheduler timeouts that were set through js_timeouts sysfs file */
@@ -1890,6 +1902,7 @@ void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev)
        if (kbase_pm_is_suspending(kbdev)) {
                /* Cause it to leave at some later point */
                mali_bool retained;
+
                retained = kbasep_js_runpool_retain_ctx_nolock(kbdev, head_kctx);
                KBASE_DEBUG_ASSERT(retained);
                kbasep_js_clear_submit_allowed(js_devdata, head_kctx);
@@ -1914,13 +1927,12 @@ void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev)
                 * to prevent a risk of recursion back into this function */
                kbasep_js_runpool_release_ctx_no_schedule(kbdev, head_kctx);
        }
-       return;
 }
 
-void kbasep_js_schedule_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_kctx_info *js_kctx_info;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool is_scheduled;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -1973,13 +1985,13 @@ void kbasep_js_schedule_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
                /* Already scheduled in - We need to retain it to keep the corresponding address space */
                kbasep_js_runpool_retain_ctx(kbdev, kctx);
                mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
-
        }
 }
 
-void kbasep_js_release_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_kctx_info *js_kctx_info;
+
        KBASE_DEBUG_ASSERT(kctx != NULL);
        js_kctx_info = &kctx->jctx.sched_info;
 
@@ -1994,17 +2006,16 @@ void kbasep_js_release_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
        kbasep_js_runpool_release_ctx(kbdev, kctx);
 }
 
-void kbasep_js_job_done_slot_irq(kbase_jd_atom *katom, int slot_nr,
-                                 ktime_t *end_timestamp,
-                                 kbasep_js_atom_done_code done_code)
+void kbasep_js_job_done_slot_irq(struct kbase_jd_atom *katom, int slot_nr,
+               ktime_t *end_timestamp, kbasep_js_atom_done_code done_code)
 {
-       kbase_device *kbdev;
-       kbasep_js_policy *js_policy;
-       kbasep_js_device_data *js_devdata;
+       struct kbase_device *kbdev;
+       union kbasep_js_policy *js_policy;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool submit_retry_needed = MALI_TRUE;      /* If we don't start jobs here, start them from the workqueue */
        ktime_t tick_diff;
        u64 microseconds_spent = 0u;
-       kbase_context *parent_ctx;
+       struct kbase_context *parent_ctx;
 
        KBASE_DEBUG_ASSERT(katom);
        parent_ctx = katom->kctx;
@@ -2075,10 +2086,10 @@ void kbasep_js_job_done_slot_irq(kbase_jd_atom *katom, int slot_nr,
        }
 }
 
-void kbasep_js_suspend(kbase_device *kbdev)
+void kbasep_js_suspend(struct kbase_device *kbdev)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        int i;
        u16 retained = 0u;
        int nr_privileged_ctx = 0;
@@ -2094,8 +2105,9 @@ void kbasep_js_suspend(kbase_device *kbdev)
        /* Retain each of the contexts, so we can cause it to leave even if it had
         * no refcount to begin with */
        for (i = BASE_MAX_NR_AS - 1; i >= 0; --i) {
-               kbasep_js_per_as_data *js_per_as_data = &js_devdata->runpool_irq.per_as_data[i];
-               kbase_context *kctx = js_per_as_data->kctx;
+               struct kbasep_js_per_as_data *js_per_as_data = &js_devdata->runpool_irq.per_as_data[i];
+               struct kbase_context *kctx = js_per_as_data->kctx;
+
                retained = retained << 1;
 
                if (kctx) {
@@ -2116,20 +2128,21 @@ void kbasep_js_suspend(kbase_device *kbdev)
        for (i = 0;
                 i < BASE_MAX_NR_AS;
                 ++i, retained = retained >> 1) {
-               kbasep_js_per_as_data *js_per_as_data = &js_devdata->runpool_irq.per_as_data[i];
-               kbase_context *kctx = js_per_as_data->kctx;
+               struct kbasep_js_per_as_data *js_per_as_data = &js_devdata->runpool_irq.per_as_data[i];
+               struct kbase_context *kctx = js_per_as_data->kctx;
 
                if (retained & 1u)
-                       kbasep_js_runpool_release_ctx(kbdev,kctx);
+                       kbasep_js_runpool_release_ctx(kbdev, kctx);
        }
 
        /* Caller must wait for all Power Manager active references to be dropped */
 }
 
-void kbasep_js_resume(kbase_device *kbdev)
+void kbasep_js_resume(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        int i;
+
        KBASE_DEBUG_ASSERT(kbdev);
        js_devdata = &kbdev->js_data;
 
@@ -2137,8 +2150,7 @@ void kbasep_js_resume(kbase_device *kbdev)
 
        /* Schedule in as many contexts as address spaces. This also starts atoms. */
        for (i = 0 ; i < kbdev->nr_hw_address_spaces; ++i)
-       {
                kbasep_js_try_schedule_head_ctx(kbdev);
-       }
+
        /* JS Resume complete */
 }
index 7e325c2a0b28cdbe0a8f8755d434dd3bd59b36eb..3eb01c38e0572e2e217b63f4b6d8ee9aace8f905 100755 (executable)
 /**
  * @brief Initialize the Job Scheduler
  *
- * The kbasep_js_device_data sub-structure of \a kbdev must be zero
+ * The struct kbasep_js_device_data sub-structure of \a kbdev must be zero
  * initialized before passing to the kbasep_js_devdata_init() function. This is
  * to give efficient error path code.
  */
-mali_error kbasep_js_devdata_init(kbase_device * const kbdev);
+mali_error kbasep_js_devdata_init(struct kbase_device * const kbdev);
 
 /**
  * @brief Halt the Job Scheduler.
@@ -68,7 +68,7 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev);
  * sub-structure was never initialized/failed initialization, to give efficient
  * error-path code.
  *
- * For this to work, the kbasep_js_device_data sub-structure of \a kbdev must
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
  * be zero initialized before passing to the kbasep_js_devdata_init()
  * function. This is to give efficient error path code.
  *
@@ -76,7 +76,7 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev);
  * structures registered with this scheduler.
  *
  */
-void kbasep_js_devdata_halt(kbase_device *kbdev);
+void kbasep_js_devdata_halt(struct kbase_device *kbdev);
 
 /**
  * @brief Terminate the Job Scheduler
@@ -85,44 +85,44 @@ void kbasep_js_devdata_halt(kbase_device *kbdev);
  * sub-structure was never initialized/failed initialization, to give efficient
  * error-path code.
  *
- * For this to work, the kbasep_js_device_data sub-structure of \a kbdev must
+ * For this to work, the struct kbasep_js_device_data sub-structure of \a kbdev must
  * be zero initialized before passing to the kbasep_js_devdata_init()
  * function. This is to give efficient error path code.
  *
  * It is a Programming Error to call this whilst there are still kbase_context
  * structures registered with this scheduler.
  */
-void kbasep_js_devdata_term(kbase_device *kbdev);
+void kbasep_js_devdata_term(struct kbase_device *kbdev);
 
 /**
- * @brief Initialize the Scheduling Component of a kbase_context on the Job Scheduler.
+ * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler.
  *
- * This effectively registers a kbase_context with a Job Scheduler.
+ * This effectively registers a struct kbase_context with a Job Scheduler.
  *
- * It does not register any jobs owned by the kbase_context with the scheduler.
+ * It does not register any jobs owned by the struct kbase_context with the scheduler.
  * Those must be separately registered by kbasep_js_add_job().
  *
- * The kbase_context must be zero intitialized before passing to the
+ * The struct kbase_context must be zero intitialized before passing to the
  * kbase_js_init() function. This is to give efficient error path code.
  */
-mali_error kbasep_js_kctx_init(kbase_context * const kctx);
+mali_error kbasep_js_kctx_init(struct kbase_context * const kctx);
 
 /**
- * @brief Terminate the Scheduling Component of a kbase_context on the Job Scheduler
+ * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler
  *
- * This effectively de-registers a kbase_context from its Job Scheduler
+ * This effectively de-registers a struct kbase_context from its Job Scheduler
  *
- * It is safe to call this on a kbase_context that has never had or failed
+ * It is safe to call this on a struct kbase_context that has never had or failed
  * initialization of its jctx.sched_info member, to give efficient error-path
  * code.
  *
- * For this to work, the kbase_context must be zero intitialized before passing
+ * For this to work, the struct kbase_context must be zero intitialized before passing
  * to the kbase_js_init() function.
  *
  * It is a Programming Error to call this whilst there are still jobs
  * registered with this context.
  */
-void kbasep_js_kctx_term(kbase_context *kctx);
+void kbasep_js_kctx_term(struct kbase_context *kctx);
 
 /**
  * @brief Add a job chain to the Job Scheduler, and take necessary actions to
@@ -165,7 +165,7 @@ void kbasep_js_kctx_term(kbase_context *kctx);
  * so no further action is required from the caller. This is \b always returned
  * when the context is currently scheduled.
  */
-mali_bool kbasep_js_add_job(kbase_context *kctx, kbase_jd_atom *atom);
+mali_bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom);
 
 /**
  * @brief Remove a job chain from the Job Scheduler, except for its 'retained state'.
@@ -195,7 +195,7 @@ mali_bool kbasep_js_add_job(kbase_context *kctx, kbase_jd_atom *atom);
  * - it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
  *
  */
-void kbasep_js_remove_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *atom);
+void kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom);
 
 /**
  * @brief Completely remove a job chain from the Job Scheduler, in the case
@@ -220,7 +220,7 @@ void kbasep_js_remove_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_ato
  * - it must \em not hold kbasep_js_device_data::runpool_mutex (as this could be
  * obtained internally)
  */
-void kbasep_js_remove_cancelled_job(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom);
+void kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
 
 /**
  * @brief Refcount a context as being busy, preventing it from being scheduled
@@ -235,7 +235,7 @@ void kbasep_js_remove_cancelled_job(kbase_device *kbdev, kbase_context *kctx, kb
  * @return value != MALI_FALSE if the retain succeeded, and the context will not be scheduled out.
  * @return MALI_FALSE if the retain failed (because the context is being/has been scheduled out).
  */
-mali_bool kbasep_js_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx);
+mali_bool kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * @brief Refcount a context as being busy, preventing it from being scheduled
@@ -249,7 +249,7 @@ mali_bool kbasep_js_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx)
  * @return value != MALI_FALSE if the retain succeeded, and the context will not be scheduled out.
  * @return MALI_FALSE if the retain failed (because the context is being/has been scheduled out).
  */
-mali_bool kbasep_js_runpool_retain_ctx_nolock(kbase_device *kbdev, kbase_context *kctx);
+mali_bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * @brief Lookup a context in the Run Pool based upon its current address space
@@ -265,10 +265,10 @@ mali_bool kbasep_js_runpool_retain_ctx_nolock(kbase_device *kbdev, kbase_context
  * - it must \em not hold the kbasep_js_device_data::runpoool_irq::lock, because
  * it will be used internally.
  *
- * @return a valid kbase_context on success, which has been refcounted as being busy.
+ * @return a valid struct kbase_context on success, which has been refcounted as being busy.
  * @return NULL on failure, indicating that no context was found in \a as_nr
  */
-kbase_context *kbasep_js_runpool_lookup_ctx(kbase_device *kbdev, int as_nr);
+struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev, int as_nr);
 
 /**
  * @brief Handling the requeuing/killing of a context that was evicted from the
@@ -294,7 +294,7 @@ kbase_context *kbasep_js_runpool_lookup_ctx(kbase_device *kbdev, int as_nr);
  * - it must \em not hold kbasep_jd_device_data::queue_mutex (as this will be
  * obtained internally)
  */
-void kbasep_js_runpool_requeue_or_kill_ctx(kbase_device *kbdev, kbase_context *kctx, mali_bool has_pm_ref);
+void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, mali_bool has_pm_ref);
 
 /**
  * @brief Release a refcount of a context being busy, allowing it to be
@@ -345,7 +345,7 @@ void kbasep_js_runpool_requeue_or_kill_ctx(kbase_device *kbdev, kbase_context *k
  * obtained internally)
  *
  */
-void kbasep_js_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx);
+void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * @brief Variant of kbasep_js_runpool_release_ctx() that handles additional
@@ -363,7 +363,7 @@ void kbasep_js_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx);
  * The locking conditions of this function are the same as those for
  * kbasep_js_runpool_release_ctx()
  */
-void kbasep_js_runpool_release_ctx_and_katom_retained_state(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state);
+void kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
 
 /**
  * @brief Try to submit the next job on a \b particular slot whilst in IRQ
@@ -387,7 +387,7 @@ void kbasep_js_runpool_release_ctx_and_katom_retained_state(kbase_device *kbdev,
  * full of jobs in the HEAD and NEXT registers, or we were able to get enough
  * jobs from the Run Pool to fill the GPU's HEAD and NEXT registers.
  */
-mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int js, s8 *submit_count);
+mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(struct kbase_device *kbdev, int js, s8 *submit_count);
 
 /**
  * @brief Try to submit the next job on a particular slot, outside of IRQ context
@@ -415,7 +415,7 @@ mali_bool kbasep_js_try_run_next_job_on_slot_irq_nolock(kbase_device *kbdev, int
  * kbasep_js_kctx_info::ctx::jsctx_mutex locks.
  *
  */
-void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js);
+void kbasep_js_try_run_next_job_on_slot_nolock(struct kbase_device *kbdev, int js);
 
 /**
  * @brief Try to submit the next job for each slot in the system, outside of IRQ context
@@ -431,7 +431,7 @@ void kbasep_js_try_run_next_job_on_slot_nolock(kbase_device *kbdev, int js);
  * kbasep_js_kctx_info::ctx::jsctx_mutex locks.
  *
  */
-void kbasep_js_try_run_next_job_nolock(kbase_device *kbdev);
+void kbasep_js_try_run_next_job_nolock(struct kbase_device *kbdev);
 
 /**
  * @brief Try to schedule the next context onto the Run Pool
@@ -474,7 +474,7 @@ void kbasep_js_try_run_next_job_nolock(kbase_device *kbdev);
  * be used internally.
  *
  */
-void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev);
+void kbasep_js_try_schedule_head_ctx(struct kbase_device *kbdev);
 
 /**
  * @brief Schedule in a privileged context
@@ -496,7 +496,7 @@ void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev);
  * be used internally.
  *
  */
-void kbasep_js_schedule_privileged_ctx(kbase_device *kbdev, kbase_context *kctx);
+void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * @brief Release a privileged context, allowing it to be scheduled out.
@@ -512,7 +512,7 @@ void kbasep_js_schedule_privileged_ctx(kbase_device *kbdev, kbase_context *kctx)
  * - it must \em not hold the kbase_device::as[n].transaction_mutex (as this will be obtained internally)
  *
  */
-void kbasep_js_release_privileged_ctx(kbase_device *kbdev, kbase_context *kctx);
+void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * @brief Handle the Job Scheduler component for the IRQ of a job finishing
@@ -531,7 +531,7 @@ void kbasep_js_release_privileged_ctx(kbase_device *kbdev, kbase_context *kctx);
  * Normally, the time calculated from end_timestamp is rounded up to the
  * minimum time precision. Therefore, to ensure the job is recorded as not
  * spending any time, then set end_timestamp to NULL. For example, this is necessary when
- * evicting jobs from JSn_HEAD_NEXT (because they didn't actually run).
+ * evicting jobs from JS_HEAD_NEXT (because they didn't actually run).
  *
  * NOTE: It's possible to move the steps (2) and (3) (inc calculating job's time
  * used) into the worker (outside of IRQ context), but this may allow a context
@@ -542,7 +542,7 @@ void kbasep_js_release_privileged_ctx(kbase_device *kbdev, kbase_context *kctx);
  * The following locking conditions are made on the caller:
  * - it must hold kbasep_js_device_data::runpoool_irq::lock
  */
-void kbasep_js_job_done_slot_irq(kbase_jd_atom *katom, int slot_nr,
+void kbasep_js_job_done_slot_irq(struct kbase_jd_atom *katom, int slot_nr,
                                  ktime_t *end_timestamp,
                                  kbasep_js_atom_done_code done_code);
 
@@ -553,7 +553,7 @@ void kbasep_js_job_done_slot_irq(kbase_jd_atom *katom, int slot_nr,
  * - kbasep_js_device_data::runpool_mutex
  * - kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_js_try_run_jobs(kbase_device *kbdev);
+void kbase_js_try_run_jobs(struct kbase_device *kbdev);
 
 /**
  * @brief Try to submit the next job on a specfic slot
@@ -566,7 +566,7 @@ void kbase_js_try_run_jobs(kbase_device *kbdev);
  * will be obtained internally)
  *
  */
-void kbase_js_try_run_jobs_on_slot(kbase_device *kbdev, int js);
+void kbase_js_try_run_jobs_on_slot(struct kbase_device *kbdev, int js);
 
 /**
  * @brief Handle releasing cores for power management and affinity management,
@@ -575,7 +575,7 @@ void kbase_js_try_run_jobs_on_slot(kbase_device *kbdev, int js);
  * This must only be called on an atom that is not currently running, and has
  * not been re-queued onto the context (and so does not need locking)
  *
- * This function enters at the following @ref kbase_atom_coreref_state states:
+ * This function enters at the following @ref enum kbase_atom_coreref_state states:
  * - NO_CORES_REQUESTED
  * - WAITING_FOR_REQUESTED_CORES
  * - RECHECK_AFFINITY
@@ -591,7 +591,7 @@ void kbase_js_try_run_jobs_on_slot(kbase_device *kbdev, int js);
  * @note The corresponding kbasep_js_job_check_ref_cores() is private to the
  * Job Scheduler, and is called automatically when running the next job.
  */
-void kbasep_js_job_check_deref_cores(kbase_device *kbdev, struct kbase_jd_atom *katom);
+void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
 
 /**
  * @brief Suspend the job scheduler during a Power Management Suspend event.
@@ -614,7 +614,7 @@ void kbasep_js_job_check_deref_cores(kbase_device *kbdev, struct kbase_jd_atom *
  * function is guaranteed to complete in a finite time whenever the Job
  * Scheduling Policy implements Job Timeouts (such as those done by CFS).
  */
-void kbasep_js_suspend(kbase_device *kbdev);
+void kbasep_js_suspend(struct kbase_device *kbdev);
 
 /**
  * @brief Resume the Job Scheduler after a Power Management Resume event.
@@ -623,7 +623,7 @@ void kbasep_js_suspend(kbase_device *kbdev);
  * - Schedules contexts back into the runpool
  * - Resumes running atoms on the GPU
  */
-void kbasep_js_resume(kbase_device *kbdev);
+void kbasep_js_resume(struct kbase_device *kbdev);
 
 
 /*
@@ -640,7 +640,7 @@ void kbasep_js_resume(kbase_device *kbdev);
  *
  * The caller must hold kbasep_js_device_data::runpool_irq::lock.
  */
-static INLINE mali_bool kbasep_js_is_submit_allowed(kbasep_js_device_data *js_devdata, kbase_context *kctx)
+static INLINE mali_bool kbasep_js_is_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
 {
        u16 test_bit;
 
@@ -661,7 +661,7 @@ static INLINE mali_bool kbasep_js_is_submit_allowed(kbasep_js_device_data *js_de
  *
  * The caller must hold kbasep_js_device_data::runpool_irq::lock.
  */
-static INLINE void kbasep_js_set_submit_allowed(kbasep_js_device_data *js_devdata, kbase_context *kctx)
+static INLINE void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
 {
        u16 set_bit;
 
@@ -684,7 +684,7 @@ static INLINE void kbasep_js_set_submit_allowed(kbasep_js_device_data *js_devdat
  *
  * The caller must hold kbasep_js_device_data::runpool_irq::lock.
  */
-static INLINE void kbasep_js_clear_submit_allowed(kbasep_js_device_data *js_devdata, kbase_context *kctx)
+static INLINE void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *js_devdata, struct kbase_context *kctx)
 {
        u16 clear_bit;
        u16 clear_mask;
@@ -704,7 +704,7 @@ static INLINE void kbasep_js_clear_submit_allowed(kbasep_js_device_data *js_devd
 /**
  * @brief Manage the 'retry_submit_on_slot' part of a kbase_jd_atom
  */
-static INLINE void kbasep_js_clear_job_retry_submit(kbase_jd_atom *atom)
+static INLINE void kbasep_js_clear_job_retry_submit(struct kbase_jd_atom *atom)
 {
        atom->retry_submit_on_slot = KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID;
 }
@@ -722,7 +722,7 @@ static INLINE void kbasep_js_clear_job_retry_submit(kbase_jd_atom *atom)
  * submitted on some other slot, then call kbasep_js_clear_job_retry_submit()
  * first to silence the ASSERT.
  */
-static INLINE void kbasep_js_set_job_retry_submit_slot(kbase_jd_atom *atom, int js)
+static INLINE void kbasep_js_set_job_retry_submit_slot(struct kbase_jd_atom *atom, int js)
 {
        KBASE_DEBUG_ASSERT(0 <= js && js <= BASE_JM_MAX_NR_SLOTS);
        KBASE_DEBUG_ASSERT(atom->retry_submit_on_slot == KBASEP_JS_RETRY_SUBMIT_SLOT_INVALID
@@ -736,7 +736,7 @@ static INLINE void kbasep_js_set_job_retry_submit_slot(kbase_jd_atom *atom, int
  * atom-related work to be done on releasing with
  * kbasep_js_runpool_release_ctx_and_katom_retained_state()
  */
-static INLINE void kbasep_js_atom_retained_state_init_invalid(kbasep_js_atom_retained_state *retained_state)
+static INLINE void kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state *retained_state)
 {
        retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
        retained_state->core_req = KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
@@ -747,7 +747,7 @@ static INLINE void kbasep_js_atom_retained_state_init_invalid(kbasep_js_atom_ret
  * Copy atom state that can be made available after jd_done_nolock() is called
  * on that atom.
  */
-static INLINE void kbasep_js_atom_retained_state_copy(kbasep_js_atom_retained_state *retained_state, const kbase_jd_atom *katom)
+static INLINE void kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state *retained_state, const struct kbase_jd_atom *katom)
 {
        retained_state->event_code = katom->event_code;
        retained_state->core_req = katom->core_req;
@@ -766,27 +766,27 @@ static INLINE void kbasep_js_atom_retained_state_copy(kbasep_js_atom_retained_st
  * @return    MALI_FALSE if the atom has not finished
  * @return    !=MALI_FALSE if the atom has finished
  */
-static INLINE mali_bool kbasep_js_has_atom_finished(const kbasep_js_atom_retained_state *katom_retained_state)
+static INLINE mali_bool kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state *katom_retained_state)
 {
        return (mali_bool) (katom_retained_state->event_code != BASE_JD_EVENT_STOPPED && katom_retained_state->event_code != BASE_JD_EVENT_REMOVED_FROM_NEXT);
 }
 
 /**
- * @brief Determine whether a kbasep_js_atom_retained_state is valid
+ * @brief Determine whether a struct kbasep_js_atom_retained_state is valid
  *
- * An invalid kbasep_js_atom_retained_state is allowed, and indicates that the
+ * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates that the
  * code should just ignore it.
  *
  * @param[in] katom_retained_state the atom's retained state to check
  * @return    MALI_FALSE if the retained state is invalid, and can be ignored
  * @return    !=MALI_FALSE if the retained state is valid
  */
-static INLINE mali_bool kbasep_js_atom_retained_state_is_valid(const kbasep_js_atom_retained_state *katom_retained_state)
+static INLINE mali_bool kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state *katom_retained_state)
 {
        return (mali_bool) (katom_retained_state->core_req != KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
 }
 
-static INLINE mali_bool kbasep_js_get_atom_retry_submit_slot(const kbasep_js_atom_retained_state *katom_retained_state, int *res)
+static INLINE mali_bool kbasep_js_get_atom_retry_submit_slot(const struct kbasep_js_atom_retained_state *katom_retained_state, int *res)
 {
        int js = katom_retained_state->retry_submit_on_slot;
        *res = js;
@@ -803,10 +803,10 @@ static INLINE mali_bool kbasep_js_get_atom_retry_submit_slot(const kbasep_js_ato
  * @return current refcount of the context if it is scheduled in. The refcount
  * is not guarenteed to be kept constant.
  */
-static INLINE int kbasep_js_debug_check_ctx_refcount(kbase_device *kbdev, kbase_context *kctx)
+static INLINE int kbasep_js_debug_check_ctx_refcount(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        int result = -1;
        int as_nr;
 
@@ -837,16 +837,16 @@ static INLINE int kbasep_js_debug_check_ctx_refcount(kbase_device *kbdev, kbase_
  * - it must \em not hold the kbasep_js_device_data::runpoool_irq::lock, because
  * it will be used internally.
  *
- * @return a valid kbase_context on success, with a refcount that is guarenteed
+ * @return a valid struct kbase_context on success, with a refcount that is guarenteed
  * to be non-zero and unmodified by this function.
  * @return NULL on failure, indicating that no context was found in \a as_nr
  */
-static INLINE kbase_context *kbasep_js_runpool_lookup_ctx_noretain(kbase_device *kbdev, int as_nr)
+static INLINE struct kbase_context *kbasep_js_runpool_lookup_ctx_noretain(struct kbase_device *kbdev, int as_nr)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
-       kbase_context *found_kctx;
-       kbasep_js_per_as_data *js_per_as_data;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbase_context *found_kctx;
+       struct kbasep_js_per_as_data *js_per_as_data;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(0 <= as_nr && as_nr < BASE_MAX_NR_AS);
@@ -871,7 +871,7 @@ static INLINE kbase_context *kbasep_js_runpool_lookup_ctx_noretain(kbase_device
  * e.g.: when you need the number of cycles to guarantee you won't wait for
  * longer than 'us' time (you might have a shorter wait).
  */
-static INLINE u32 kbasep_js_convert_us_to_gpu_ticks_min_freq(kbase_device *kbdev, u32 us)
+static INLINE u32 kbasep_js_convert_us_to_gpu_ticks_min_freq(struct kbase_device *kbdev, u32 us)
 {
        u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_min;
        KBASE_DEBUG_ASSERT(0 != gpu_freq);
@@ -886,7 +886,7 @@ static INLINE u32 kbasep_js_convert_us_to_gpu_ticks_min_freq(kbase_device *kbdev
  * e.g.: When you need the number of cycles to guarantee you'll wait at least
  * 'us' amount of time (but you might wait longer).
  */
-static INLINE u32 kbasep_js_convert_us_to_gpu_ticks_max_freq(kbase_device *kbdev, u32 us)
+static INLINE u32 kbasep_js_convert_us_to_gpu_ticks_max_freq(struct kbase_device *kbdev, u32 us)
 {
        u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_max;
        KBASE_DEBUG_ASSERT(0 != gpu_freq);
@@ -902,7 +902,7 @@ static INLINE u32 kbasep_js_convert_us_to_gpu_ticks_max_freq(kbase_device *kbdev
  * take (you guarantee that you won't wait any longer than this, but it may
  * be shorter).
  */
-static INLINE u32 kbasep_js_convert_gpu_ticks_to_us_min_freq(kbase_device *kbdev, u32 ticks)
+static INLINE u32 kbasep_js_convert_gpu_ticks_to_us_min_freq(struct kbase_device *kbdev, u32 ticks)
 {
        u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_min;
        KBASE_DEBUG_ASSERT(0 != gpu_freq);
@@ -917,7 +917,7 @@ static INLINE u32 kbasep_js_convert_gpu_ticks_to_us_min_freq(kbase_device *kbdev
  * e.g.: When you need to know the best-case wait for 'tick' cycles (you
  * guarantee to be waiting for at least this long, but it may be longer).
  */
-static INLINE u32 kbasep_js_convert_gpu_ticks_to_us_max_freq(kbase_device *kbdev, u32 ticks)
+static INLINE u32 kbasep_js_convert_gpu_ticks_to_us_max_freq(struct kbase_device *kbdev, u32 ticks)
 {
        u32 gpu_freq = kbdev->gpu_props.props.core_props.gpu_freq_khz_max;
        KBASE_DEBUG_ASSERT(0 != gpu_freq);
index 6ded87dfe85dc8b31e9b12bc21240fd15e5fc53a..7a4cae3be870bfad6261fc84fb14d6e853af4df0 100755 (executable)
 #include "mali_kbase_js_affinity.h"
 
 
-STATIC INLINE mali_bool affinity_job_uses_high_cores(kbase_device *kbdev, kbase_jd_atom *katom)
+STATIC INLINE mali_bool affinity_job_uses_high_cores(struct kbase_device *kbdev, struct kbase_jd_atom *katom)
 {
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
-               kbase_context *kctx;
+               struct kbase_context *kctx;
                kbase_context_flags ctx_flags;
 
                kctx = katom->kctx;
@@ -52,7 +52,7 @@ STATIC INLINE mali_bool affinity_job_uses_high_cores(kbase_device *kbdev, kbase_
  * @return MALI_FALSE if a core split is not required
  * @return != MALI_FALSE if a core split is required.
  */
-STATIC INLINE mali_bool kbase_affinity_requires_split(kbase_device *kbdev)
+STATIC INLINE mali_bool kbase_affinity_requires_split(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
@@ -76,7 +76,7 @@ STATIC INLINE mali_bool kbase_affinity_requires_split(kbase_device *kbdev)
        return MALI_FALSE;
 }
 
-mali_bool kbase_js_can_run_job_on_slot_no_lock(kbase_device *kbdev, int js)
+mali_bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, int js)
 {
        /*
         * Here are the reasons for using job slot 2:
@@ -130,7 +130,7 @@ mali_bool kbase_js_can_run_job_on_slot_no_lock(kbase_device *kbdev, int js)
  *   (see notes in loops), but as the functionallity will likely
  *   be modified, optimization has not been addressed.
 */
-mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kbase_jd_atom *katom, int js)
+mali_bool kbase_js_choose_affinity(u64 * const affinity, struct kbase_device *kbdev, struct kbase_jd_atom *katom, int js)
 {
        base_jd_core_req core_req = katom->core_req;
        unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
@@ -145,8 +145,7 @@ mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kb
         * If no cores are currently available (core availability policy is
         * transitioning) then fail.
         */
-       if (0 == core_availability_mask)
-       {
+       if (0 == core_availability_mask) {
                spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
                *affinity = 0;
                return MALI_FALSE;
@@ -154,8 +153,7 @@ mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kb
 
        KBASE_DEBUG_ASSERT(js >= 0);
 
-       if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) == BASE_JD_REQ_T)
-       {
+       if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) == BASE_JD_REQ_T) {
                spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
                /* Tiler only job, bit 0 needed to enable tiler but no shader cores required */
                *affinity = 1;
@@ -173,6 +171,7 @@ mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kb
                        } else {
                                /* js[1], js[2] use core groups 0, 1 for dual-core-group systems */
                                u32 core_group_idx = ((u32) js) - 1;
+
                                KBASE_DEBUG_ASSERT(core_group_idx < num_core_groups);
                                *affinity = kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask & core_availability_mask;
 
@@ -223,7 +222,7 @@ mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kb
        return MALI_TRUE;
 }
 
-STATIC INLINE mali_bool kbase_js_affinity_is_violating(kbase_device *kbdev, u64 *affinities)
+STATIC INLINE mali_bool kbase_js_affinity_is_violating(struct kbase_device *kbdev, u64 *affinities)
 {
        /* This implementation checks whether the two slots involved in Generic thread creation
         * have intersecting affinity. This is due to micro-architectural issues where a job in
@@ -237,6 +236,7 @@ STATIC INLINE mali_bool kbase_js_affinity_is_violating(kbase_device *kbdev, u64
        u64 affinity_set_left;
        u64 affinity_set_right;
        u64 intersection;
+
        KBASE_DEBUG_ASSERT(affinities != NULL);
 
        affinity_set_left = affinities[1];
@@ -255,9 +255,9 @@ STATIC INLINE mali_bool kbase_js_affinity_is_violating(kbase_device *kbdev, u64
        return (mali_bool) (intersection != (u64) 0u);
 }
 
-mali_bool kbase_js_affinity_would_violate(kbase_device *kbdev, int js, u64 affinity)
+mali_bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js, u64 affinity)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        u64 new_affinities[BASE_JM_MAX_NR_SLOTS];
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -271,9 +271,9 @@ mali_bool kbase_js_affinity_would_violate(kbase_device *kbdev, int js, u64 affin
        return kbase_js_affinity_is_violating(kbdev, new_affinities);
 }
 
-void kbase_js_affinity_retain_slot_cores(kbase_device *kbdev, int js, u64 affinity)
+void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js, u64 affinity)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        u64 cores;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -297,12 +297,11 @@ void kbase_js_affinity_retain_slot_cores(kbase_device *kbdev, int js, u64 affini
 
                cores &= ~bit;
        }
-
 }
 
-void kbase_js_affinity_release_slot_cores(kbase_device *kbdev, int js, u64 affinity)
+void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js, u64 affinity)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        u64 cores;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -327,9 +326,9 @@ void kbase_js_affinity_release_slot_cores(kbase_device *kbdev, int js, u64 affin
 
 }
 
-void kbase_js_affinity_slot_blocked_an_atom(kbase_device *kbdev, int js)
+void kbase_js_affinity_slot_blocked_an_atom(struct kbase_device *kbdev, int js)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
@@ -338,9 +337,9 @@ void kbase_js_affinity_slot_blocked_an_atom(kbase_device *kbdev, int js)
        js_devdata->runpool_irq.slots_blocked_on_affinity |= 1u << js;
 }
 
-void kbase_js_affinity_submit_to_blocked_slots(kbase_device *kbdev)
+void kbase_js_affinity_submit_to_blocked_slots(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        u16 slots;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -353,6 +352,7 @@ void kbase_js_affinity_submit_to_blocked_slots(kbase_device *kbdev)
 
        while (slots) {
                int bitnum = fls(slots) - 1;
+
                u16 bit = 1u << bitnum;
                slots &= ~bit;
 
@@ -367,10 +367,10 @@ void kbase_js_affinity_submit_to_blocked_slots(kbase_device *kbdev)
        }
 }
 
-#if KBASE_TRACE_ENABLE != 0
-void kbase_js_debug_log_current_affinities(kbase_device *kbdev)
+#if KBASE_TRACE_ENABLE
+void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        int slot_nr;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -379,4 +379,4 @@ void kbase_js_debug_log_current_affinities(kbase_device *kbdev)
        for (slot_nr = 0; slot_nr < 3; ++slot_nr)
                KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_AFFINITY_CURRENT, NULL, NULL, 0u, slot_nr, (u32) js_devdata->runpool_irq.slot_affinities[slot_nr]);
 }
-#endif                         /* KBASE_TRACE_ENABLE != 0 */
+#endif                         /* KBASE_TRACE_ENABLE  */
index 38de8b31a5c38a937598b12bf8a4b172bb75a4df..83da812802dc8eaa66f2cf10281bb215f2077ac7 100755 (executable)
@@ -56,7 +56,7 @@
  * @param kbdev The kbase device structure of the device
  * @param js    Job slot number to check for allowance
  */
-mali_bool kbase_js_can_run_job_on_slot_no_lock(kbase_device *kbdev, int js);
+mali_bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, int js);
 
 /**
  * @brief Compute affinity for a given job.
@@ -73,7 +73,7 @@ mali_bool kbase_js_can_run_job_on_slot_no_lock(kbase_device *kbdev, int js);
  * @param js    Slot the job chain is being submitted
 
  */
-mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kbase_jd_atom *katom, int js);
+mali_bool kbase_js_choose_affinity(u64 * const affinity, struct kbase_device *kbdev, struct kbase_jd_atom *katom, int js);
 
 /**
  * @brief Determine whether a proposed \a affinity on job slot \a js would
@@ -82,7 +82,7 @@ mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kb
  * The following locks must be held by the caller:
  * - kbasep_js_device_data::runpool_irq::lock
  */
-mali_bool kbase_js_affinity_would_violate(kbase_device *kbdev, int js, u64 affinity);
+mali_bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js, u64 affinity);
 
 /**
  * @brief Affinity tracking: retain cores used by a slot
@@ -90,7 +90,7 @@ mali_bool kbase_js_affinity_would_violate(kbase_device *kbdev, int js, u64 affin
  * The following locks must be held by the caller:
  * - kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_js_affinity_retain_slot_cores(kbase_device *kbdev, int js, u64 affinity);
+void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js, u64 affinity);
 
 /**
  * @brief Affinity tracking: release cores used by a slot
@@ -103,7 +103,7 @@ void kbase_js_affinity_retain_slot_cores(kbase_device *kbdev, int js, u64 affini
  * The following locks must be held by the caller:
  * - kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_js_affinity_release_slot_cores(kbase_device *kbdev, int js, u64 affinity);
+void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js, u64 affinity);
 
 /**
  * @brief Register a slot as blocking atoms due to affinity violations
@@ -117,7 +117,7 @@ void kbase_js_affinity_release_slot_cores(kbase_device *kbdev, int js, u64 affin
  * The following locks must be held by the caller:
  * - kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_js_affinity_slot_blocked_an_atom(kbase_device *kbdev, int js);
+void kbase_js_affinity_slot_blocked_an_atom(struct kbase_device *kbdev, int js);
 
 /**
  * @brief Submit to job slots that have registered that an atom was blocked on
@@ -136,18 +136,18 @@ void kbase_js_affinity_slot_blocked_an_atom(kbase_device *kbdev, int js);
  * - it must hold kbasep_js_device_data::runpool_mutex
  * - it must hold kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_js_affinity_submit_to_blocked_slots(kbase_device *kbdev);
+void kbase_js_affinity_submit_to_blocked_slots(struct kbase_device *kbdev);
 
 /**
  * @brief Output to the Trace log the current tracked affinities on all slots
  */
-#if KBASE_TRACE_ENABLE != 0
-void kbase_js_debug_log_current_affinities(kbase_device *kbdev);
-#else                          /*  KBASE_TRACE_ENABLE != 0 */
-static INLINE void kbase_js_debug_log_current_affinities(kbase_device *kbdev)
+#if KBASE_TRACE_ENABLE
+void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev);
+#else                          /*  KBASE_TRACE_ENABLE  */
+static INLINE void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
 {
 }
-#endif                         /*  KBASE_TRACE_ENABLE != 0 */
+#endif                         /*  KBASE_TRACE_ENABLE  */
 
          /** @} *//* end group kbase_js_affinity */
          /** @} *//* end group base_kbase_api */
index 4da69020165cc759d6e6250b65569134c798c471..aa13bdb7fda3e0092341bd9b7f054308dc39cb3b 100755 (executable)
@@ -17,6 +17,7 @@
 
 
 #include <mali_kbase.h>
+#include <mali_kbase_config.h>
 
 /*
  * Private functions follow
  * or similar is called sometime later.
  * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
  */
-STATIC mali_bool kbasep_js_ctx_attr_runpool_retain_attr(kbase_device *kbdev, kbase_context *kctx, kbasep_js_ctx_attr attribute)
+STATIC mali_bool kbasep_js_ctx_attr_runpool_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
        mali_bool runpool_state_changed = MALI_FALSE;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -83,10 +84,10 @@ STATIC mali_bool kbasep_js_ctx_attr_runpool_retain_attr(kbase_device *kbdev, kba
  * or similar is called sometime later.
  * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
  */
-STATIC mali_bool kbasep_js_ctx_attr_runpool_release_attr(kbase_device *kbdev, kbase_context *kctx, kbasep_js_ctx_attr attribute)
+STATIC mali_bool kbasep_js_ctx_attr_runpool_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_kctx_info *js_kctx_info;
        mali_bool runpool_state_changed = MALI_FALSE;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -125,9 +126,9 @@ STATIC mali_bool kbasep_js_ctx_attr_runpool_release_attr(kbase_device *kbdev, kb
  * This may allow the scheduler to submit more jobs than previously.
  * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
  */
-STATIC mali_bool kbasep_js_ctx_attr_ctx_retain_attr(kbase_device *kbdev, kbase_context *kctx, kbasep_js_ctx_attr attribute)
+STATIC mali_bool kbasep_js_ctx_attr_ctx_retain_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
 {
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_kctx_info *js_kctx_info;
        mali_bool runpool_state_changed = MALI_FALSE;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -162,9 +163,9 @@ STATIC mali_bool kbasep_js_ctx_attr_ctx_retain_attr(kbase_device *kbdev, kbase_c
  * This may allow the scheduler to submit more jobs than previously.
  * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
  */
-STATIC mali_bool kbasep_js_ctx_attr_ctx_release_attr(kbase_device *kbdev, kbase_context *kctx, kbasep_js_ctx_attr attribute)
+STATIC mali_bool kbasep_js_ctx_attr_ctx_release_attr(struct kbase_device *kbdev, struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
 {
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_kctx_info *js_kctx_info;
        mali_bool runpool_state_changed = MALI_FALSE;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -192,9 +193,9 @@ STATIC mali_bool kbasep_js_ctx_attr_ctx_release_attr(kbase_device *kbdev, kbase_
  * More commonly used public functions
  */
 
-void kbasep_js_ctx_attr_set_initial_attrs(kbase_device *kbdev, kbase_context *kctx)
+void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_kctx_info *js_kctx_info;
        mali_bool runpool_state_changed = MALI_FALSE;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -224,16 +225,16 @@ void kbasep_js_ctx_attr_set_initial_attrs(kbase_device *kbdev, kbase_context *kc
        CSTD_UNUSED(runpool_state_changed);
 }
 
-void kbasep_js_ctx_attr_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx)
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        mali_bool runpool_state_changed;
        int i;
 
        /* Retain any existing attributes */
        for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
-               if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (kbasep_js_ctx_attr) i) != MALI_FALSE) {
+               if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != MALI_FALSE) {
                        /* The context is being scheduled in, so update the runpool with the new attributes */
-                       runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (kbasep_js_ctx_attr) i);
+                       runpool_state_changed = kbasep_js_ctx_attr_runpool_retain_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
 
                        /* We don't need to know about state changed, because retaining a
                         * context occurs on scheduling it, and that itself will also try
@@ -243,23 +244,23 @@ void kbasep_js_ctx_attr_runpool_retain_ctx(kbase_device *kbdev, kbase_context *k
        }
 }
 
-mali_bool kbasep_js_ctx_attr_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx)
+mali_bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        mali_bool runpool_state_changed = MALI_FALSE;
        int i;
 
        /* Release any existing attributes */
        for (i = 0; i < KBASEP_JS_CTX_ATTR_COUNT; ++i) {
-               if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (kbasep_js_ctx_attr) i) != MALI_FALSE) {
+               if (kbasep_js_ctx_attr_is_attr_on_ctx(kctx, (enum kbasep_js_ctx_attr) i) != MALI_FALSE) {
                        /* The context is being scheduled out, so update the runpool on the removed attributes */
-                       runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (kbasep_js_ctx_attr) i);
+                       runpool_state_changed |= kbasep_js_ctx_attr_runpool_release_attr(kbdev, kctx, (enum kbasep_js_ctx_attr) i);
                }
        }
 
        return runpool_state_changed;
 }
 
-void kbasep_js_ctx_attr_ctx_retain_atom(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom)
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
        mali_bool runpool_state_changed = MALI_FALSE;
        base_jd_core_req core_req;
@@ -283,7 +284,7 @@ void kbasep_js_ctx_attr_ctx_retain_atom(kbase_device *kbdev, kbase_context *kctx
        CSTD_UNUSED(runpool_state_changed);
 }
 
-mali_bool kbasep_js_ctx_attr_ctx_release_atom(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state)
+mali_bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state)
 {
        mali_bool runpool_state_changed = MALI_FALSE;
        base_jd_core_req core_req;
@@ -296,6 +297,7 @@ mali_bool kbasep_js_ctx_attr_ctx_release_atom(kbase_device *kbdev, kbase_context
                return MALI_FALSE;
 
        if (core_req & BASE_JD_REQ_ONLY_COMPUTE) {
+#if KBASE_PM_EN
                unsigned long flags;
                int device_nr = (core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) ? katom_retained_state->device_nr : 0;
                KBASE_DEBUG_ASSERT(device_nr < 2);
@@ -304,16 +306,17 @@ mali_bool kbasep_js_ctx_attr_ctx_release_atom(kbase_device *kbdev, kbase_context
                kbasep_pm_record_job_status(kbdev);
                kbdev->pm.metrics.active_cl_ctx[device_nr]--;
                spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
-
+#endif
                runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_COMPUTE);
        } else {
+#if KBASE_PM_EN
                unsigned long flags;
 
                spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
                kbasep_pm_record_job_status(kbdev);
                kbdev->pm.metrics.active_gl_ctx--;
                spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
-
+#endif
                runpool_state_changed |= kbasep_js_ctx_attr_ctx_release_attr(kbdev, kctx, KBASEP_JS_CTX_ATTR_NON_COMPUTE);
        }
 
index 6e722297bb81e3b92be706661174b06cab082a7c..6a57538298d0b3f0f47380e03c51b6063b99939b 100755 (executable)
@@ -46,7 +46,7 @@
  * Requires:
  * - Hold the jsctx_mutex
  */
-void kbasep_js_ctx_attr_set_initial_attrs(kbase_device *kbdev, kbase_context *kctx);
+void kbasep_js_ctx_attr_set_initial_attrs(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * Retain all attributes of a context
@@ -59,7 +59,7 @@ void kbasep_js_ctx_attr_set_initial_attrs(kbase_device *kbdev, kbase_context *kc
  * - runpool_irq spinlock
  * - ctx->is_scheduled is true
  */
-void kbasep_js_ctx_attr_runpool_retain_ctx(kbase_device *kbdev, kbase_context *kctx);
+void kbasep_js_ctx_attr_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * Release all attributes of a context
@@ -78,7 +78,7 @@ void kbasep_js_ctx_attr_runpool_retain_ctx(kbase_device *kbdev, kbase_context *k
  * or similar is called sometime later.
  * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
  */
-mali_bool kbasep_js_ctx_attr_runpool_release_ctx(kbase_device *kbdev, kbase_context *kctx);
+mali_bool kbasep_js_ctx_attr_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * Retain all attributes of an atom
@@ -89,7 +89,7 @@ mali_bool kbasep_js_ctx_attr_runpool_release_ctx(kbase_device *kbdev, kbase_cont
  * - jsctx mutex
  * - If the context is scheduled, then runpool_irq spinlock must also be held
  */
-void kbasep_js_ctx_attr_ctx_retain_atom(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom);
+void kbasep_js_ctx_attr_ctx_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
 
 /**
  * Release all attributes of an atom, given its retained state.
@@ -108,15 +108,15 @@ void kbasep_js_ctx_attr_ctx_retain_atom(kbase_device *kbdev, kbase_context *kctx
  * or similar is called sometime later.
  * @return MALI_FALSE indicates no change in ctx attributes state of the runpool.
  */
-mali_bool kbasep_js_ctx_attr_ctx_release_atom(kbase_device *kbdev, kbase_context *kctx, kbasep_js_atom_retained_state *katom_retained_state);
+mali_bool kbasep_js_ctx_attr_ctx_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state);
 
 /**
  * Requires:
  * - runpool_irq spinlock
  */
-static INLINE s8 kbasep_js_ctx_attr_count_on_runpool(kbase_device *kbdev, kbasep_js_ctx_attr attribute)
+static INLINE s8 kbasep_js_ctx_attr_count_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
@@ -129,7 +129,7 @@ static INLINE s8 kbasep_js_ctx_attr_count_on_runpool(kbase_device *kbdev, kbasep
  * Requires:
  * - runpool_irq spinlock
  */
-static INLINE mali_bool kbasep_js_ctx_attr_is_attr_on_runpool(kbase_device *kbdev, kbasep_js_ctx_attr attribute)
+static INLINE mali_bool kbasep_js_ctx_attr_is_attr_on_runpool(struct kbase_device *kbdev, enum kbasep_js_ctx_attr attribute)
 {
        /* In general, attributes are 'on' when they have a non-zero refcount (note: the refcount will never be < 0) */
        return (mali_bool) kbasep_js_ctx_attr_count_on_runpool(kbdev, attribute);
@@ -139,9 +139,9 @@ static INLINE mali_bool kbasep_js_ctx_attr_is_attr_on_runpool(kbase_device *kbde
  * Requires:
  * - jsctx mutex
  */
-static INLINE mali_bool kbasep_js_ctx_attr_is_attr_on_ctx(kbase_context *kctx, kbasep_js_ctx_attr attribute)
+static INLINE mali_bool kbasep_js_ctx_attr_is_attr_on_ctx(struct kbase_context *kctx, enum kbasep_js_ctx_attr attribute)
 {
-       kbasep_js_kctx_info *js_kctx_info;
+       struct kbasep_js_kctx_info *js_kctx_info;
 
        KBASE_DEBUG_ASSERT(kctx != NULL);
        KBASE_DEBUG_ASSERT(attribute < KBASEP_JS_CTX_ATTR_COUNT);
index 981bb1d1ef12978107815365306fa876f04a7230..e9572ba754a9bc637a6492f6a9affe882e2baac1 100755 (executable)
@@ -60,43 +60,43 @@ enum {
 
 typedef u32 kbase_context_flags;
 
-typedef struct kbasep_atom_req {
+struct kbasep_atom_req {
        base_jd_core_req core_req;
        kbase_context_flags ctx_req;
        u32 device_nr;
-} kbasep_atom_req;
+};
 
 #include "mali_kbase_js_policy_cfs.h"
 
 /* Wrapper Interface - doxygen is elsewhere */
-typedef union kbasep_js_policy {
+union kbasep_js_policy {
 #ifdef KBASE_JS_POLICY_AVAILABLE_FCFS
-       kbasep_js_policy_fcfs fcfs;
+       struct kbasep_js_policy_fcfs fcfs;
 #endif
 #ifdef KBASE_JS_POLICY_AVAILABLE_CFS
-       kbasep_js_policy_cfs cfs;
+       struct kbasep_js_policy_cfs cfs;
 #endif
-} kbasep_js_policy;
+};
 
 /* Wrapper Interface - doxygen is elsewhere */
-typedef union kbasep_js_policy_ctx_info {
+union kbasep_js_policy_ctx_info {
 #ifdef KBASE_JS_POLICY_AVAILABLE_FCFS
-       kbasep_js_policy_fcfs_ctx fcfs;
+       struct kbasep_js_policy_fcfs_ctx fcfs;
 #endif
 #ifdef KBASE_JS_POLICY_AVAILABLE_CFS
-       kbasep_js_policy_cfs_ctx cfs;
+       struct kbasep_js_policy_cfs_ctx cfs;
 #endif
-} kbasep_js_policy_ctx_info;
+};
 
 /* Wrapper Interface - doxygen is elsewhere */
-typedef union kbasep_js_policy_job_info {
+union kbasep_js_policy_job_info {
 #ifdef KBASE_JS_POLICY_AVAILABLE_FCFS
-       kbasep_js_policy_fcfs_job fcfs;
+       struct kbasep_js_policy_fcfs_job fcfs;
 #endif
 #ifdef KBASE_JS_POLICY_AVAILABLE_CFS
-       kbasep_js_policy_cfs_job cfs;
+       struct kbasep_js_policy_cfs_job cfs;
 #endif
-} kbasep_js_policy_job_info;
+};
 
 
 /** Callback function run on all of a context's jobs registered with the Job
@@ -146,7 +146,7 @@ typedef void (*kbasep_js_policy_ctx_job_cb)(struct kbase_device *kbdev, struct k
  * - Finding out when there are a mix of @ref BASE_CONTEXT_HINT_ONLY_COMPUTE
  * and ! @ref BASE_CONTEXT_HINT_ONLY_COMPUTE contexts in the runpool
  */
-typedef enum {
+enum kbasep_js_ctx_attr {
        /** Attribute indicating a context that contains Compute jobs. That is,
         * @ref BASE_CONTEXT_HINT_ONLY_COMPUTE is \b set and/or the context has jobs of type
         * @ref BASE_JD_REQ_ONLY_COMPUTE
@@ -193,12 +193,12 @@ typedef enum {
 
        /** Must be the last in the enum */
        KBASEP_JS_CTX_ATTR_COUNT
-} kbasep_js_ctx_attr;
+};
 
 enum {
        /** Bit indicating that new atom should be started because this atom completed */
        KBASE_JS_ATOM_DONE_START_NEW_ATOMS = (1u << 0),
-       /** Bit indicating that the atom was evicted from the JSn_NEXT registers */
+       /** Bit indicating that the atom was evicted from the JS_NEXT registers */
        KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT = (1u << 1)
 };
 
@@ -212,7 +212,7 @@ typedef u32 kbasep_js_atom_done_code;
  * must be held whilst accessing this data (inculding reads and atomic
  * decisions based on the read).
  */
-typedef struct kbasep_js_per_as_data {
+struct kbasep_js_per_as_data {
        /**
         * Ref count of whether this AS is busy, and must not be scheduled out
         *
@@ -223,15 +223,15 @@ typedef struct kbasep_js_per_as_data {
        int as_busy_refcount;
 
        /** Pointer to the current context on this address space, or NULL for no context */
-       kbase_context *kctx;
-} kbasep_js_per_as_data;
+       struct kbase_context *kctx;
+};
 
 /**
  * @brief KBase Device Data Job Scheduler sub-structure
  *
  * This encapsulates the current context of the Job Scheduler on a particular
  * device. This context is global to the device, and is not tied to any
- * particular kbase_context running on the device.
+ * particular struct kbase_context running on the device.
  *
  * nr_contexts_running and as_free are optimized for packing together (by making
  * them smaller types than u32). The operations on them should rarely involve
@@ -240,7 +240,7 @@ typedef struct kbasep_js_per_as_data {
  * the Total License model, it is free to make optimizations based on that (i.e.
  * to remove masking).
  */
-typedef struct kbasep_js_device_data {
+struct kbasep_js_device_data {
        /** Sub-structure to collect together Job Scheduling data used in IRQ context */
        struct runpool_irq {
                /**
@@ -267,7 +267,7 @@ typedef struct kbasep_js_device_data {
                 * 'N' (per_as_data[N].kctx) is allowed to submit jobs.
                 *
                 * It is placed here because it's much more memory efficient than having a mali_bool8 in
-                * kbasep_js_per_as_data to store this flag  */
+                * struct kbasep_js_per_as_data to store this flag  */
                u16 submit_allowed;
 
                /** Context Attributes:
@@ -287,7 +287,7 @@ typedef struct kbasep_js_device_data {
                s8 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT];
 
                /** Data that is unique for each AS */
-               kbasep_js_per_as_data per_as_data[BASE_MAX_NR_AS];
+               struct kbasep_js_per_as_data per_as_data[BASE_MAX_NR_AS];
 
                /*
                 * Affinity management and tracking
@@ -339,7 +339,7 @@ typedef struct kbasep_js_device_data {
         * Refer to the structure defined by the current policy to determine which
         * locks must be held when accessing this.
         */
-       kbasep_js_policy policy;
+       union kbasep_js_policy policy;
 
        /** Core Requirements to match up with base_js_atom's core_req memeber
         * @note This is a write-once member, and so no locking is required to read */
@@ -355,8 +355,8 @@ typedef struct kbasep_js_device_data {
        u32 gpu_reset_ticks_cl;          /**< Value for KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL */
        u32 gpu_reset_ticks_nss;         /**< Value for KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS */
        u32 ctx_timeslice_ns;            /**< Value for KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS */
-       u32 cfs_ctx_runtime_init_slices; /**< Value for KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES */
-       u32 cfs_ctx_runtime_min_slices;  /**< Value for  KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES */
+       u32 cfs_ctx_runtime_init_slices; /**< Value for DEFAULT_JS_CFS_CTX_RUNTIME_INIT_SLICES */
+       u32 cfs_ctx_runtime_min_slices;  /**< Value for  DEFAULT_JS_CFS_CTX_RUNTIME_MIN_SLICES */
 
        /** List of suspended soft jobs */
        struct list_head suspended_soft_jobs_list;
@@ -369,15 +369,15 @@ typedef struct kbasep_js_device_data {
         * only be using this during init/term paths).
         * @note This is a write-once member, and so no locking is required to read */
        int init_status;
-} kbasep_js_device_data;
+};
 
 /**
  * @brief KBase Context Job Scheduling information structure
  *
- * This is a substructure in the kbase_context that encapsulates all the
+ * This is a substructure in the struct kbase_context that encapsulates all the
  * scheduling information.
  */
-typedef struct kbasep_js_kctx_info {
+struct kbasep_js_kctx_info {
        /**
         * Runpool substructure. This must only be accessed whilst the Run Pool
         * mutex ( kbasep_js_device_data::runpool_mutex ) is held.
@@ -385,11 +385,11 @@ typedef struct kbasep_js_kctx_info {
         * In addition, the kbasep_js_device_data::runpool_irq::lock may need to be
         * held for certain sub-members.
         *
-        * @note some of the members could be moved into kbasep_js_device_data for
+        * @note some of the members could be moved into struct kbasep_js_device_data for
         * improved d-cache/tlb efficiency.
         */
        struct {
-               kbasep_js_policy_ctx_info policy_ctx;   /**< Policy-specific context */
+               union kbasep_js_policy_ctx_info policy_ctx;     /**< Policy-specific context */
        } runpool;
 
        /**
@@ -436,14 +436,14 @@ typedef struct kbasep_js_kctx_info {
        /* The initalized-flag is placed at the end, to avoid cache-pollution (we should
         * only be using this during init/term paths) */
        int init_status;
-} kbasep_js_kctx_info;
+};
 
 /** Subset of atom state that can be available after jd_done_nolock() is called
  * on that atom. A copy must be taken via kbasep_js_atom_retained_state_copy(),
  * because the original atom could disappear. */
-typedef struct kbasep_js_atom_retained_state {
+struct kbasep_js_atom_retained_state {
        /** Event code - to determine whether the atom has finished */
-       base_jd_event_code event_code;
+       enum base_jd_event_code event_code;
        /** core requirements */
        base_jd_core_req core_req;
        /** Job Slot to retry submitting to if submission from IRQ handler failed */
@@ -451,7 +451,7 @@ typedef struct kbasep_js_atom_retained_state {
        /* Core group atom was executed on */
        u32 device_nr;
 
-} kbasep_js_atom_retained_state;
+};
 
 /**
  * Value signifying 'no retry on a slot required' for:
index f746f1d9146f75f2b0255f661f83ff6df70ca127..6c777a92000dfaa396a4ea4340ac68231a0a8be1 100755 (executable)
  * @{
  *
  * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
- * the Job Scheduler Policy and its use from the Job Scheduler Core.</b>
+ * the Job Scheduler Policy and its use from the Job Scheduler Core</b>.
  */
 
 /**
@@ -391,31 +391,31 @@ union kbasep_js_policy;
 /**
  * @brief Initialize the Job Scheduler Policy
  */
-mali_error kbasep_js_policy_init(kbase_device *kbdev);
+mali_error kbasep_js_policy_init(struct kbase_device *kbdev);
 
 /**
  * @brief Terminate the Job Scheduler Policy
  */
-void kbasep_js_policy_term(kbasep_js_policy *js_policy);
+void kbasep_js_policy_term(union kbasep_js_policy *js_policy);
 
 /**
  * @addtogroup kbase_js_policy_ctx Job Scheduler Policy, Context Management API
  * @{
  *
  * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
- * the Job Scheduler Policy and its use from the Job Scheduler Core.</b>
+ * the Job Scheduler Policy and its use from the Job Scheduler Core</b>.
  */
 
 /**
  * @brief Job Scheduler Policy Ctx Info structure
  *
- * This structure is embedded in the kbase_context structure. It is used to:
+ * This structure is embedded in the struct kbase_context structure. It is used to:
  * - track information needed for the policy to schedule the context (e.g. time
  * used, OS priority etc.)
- * - link together kbase_contexts into a queue, so that a kbase_context can be
+ * - link together kbase_contexts into a queue, so that a struct kbase_context can be
  * obtained as the container of the policy ctx info. This allows the API to
  * return what "the next context" should be.
- * - obtain other information already stored in the kbase_context for
+ * - obtain other information already stored in the struct kbase_context for
  * scheduling purposes (e.g process ID to get the priority of the originating
  * process)
  */
@@ -424,16 +424,16 @@ union kbasep_js_policy_ctx_info;
 /**
  * @brief Initialize a ctx for use with the Job Scheduler Policy
  *
- * This effectively initializes the kbasep_js_policy_ctx_info structure within
- * the kbase_context (itself located within the kctx->jctx.sched_info structure).
+ * This effectively initializes the union kbasep_js_policy_ctx_info structure within
+ * the struct kbase_context (itself located within the kctx->jctx.sched_info structure).
  */
-mali_error kbasep_js_policy_init_ctx(kbase_device *kbdev, kbase_context *kctx);
+mali_error kbasep_js_policy_init_ctx(struct kbase_device *kbdev, struct kbase_context *kctx);
 
 /**
  * @brief Terminate resources associated with using a ctx in the Job Scheduler
  * Policy.
  */
-void kbasep_js_policy_term_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+void kbasep_js_policy_term_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
 
 /**
  * @brief Enqueue a context onto the Job Scheduler Policy Queue
@@ -449,7 +449,7 @@ void kbasep_js_policy_term_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
  * The caller will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
  * The caller will be holding kbasep_js_device_data::queue_mutex.
  */
-void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+void kbasep_js_policy_enqueue_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
 
 /**
  * @brief Dequeue a context from the Head of the Job Scheduler Policy Queue
@@ -460,7 +460,7 @@ void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kc
  * the kctx dequeued.
  * @return MALI_FALSE if no contexts were available.
  */
-mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_context ** const kctx_ptr);
+mali_bool kbasep_js_policy_dequeue_head_ctx(union kbasep_js_policy *js_policy, struct kbase_context ** const kctx_ptr);
 
 /**
  * @brief Evict a context from the Job Scheduler Policy Queue
@@ -482,7 +482,7 @@ mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_c
  * @return MALI_TRUE if the context was evicted from the Policy Queue
  * @return MALI_FALSE if the context was not found in the Policy Queue
  */
-mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+mali_bool kbasep_js_policy_try_evict_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
 
 /**
  * @brief Call a function on all jobs belonging to a non-queued, non-running
@@ -509,7 +509,7 @@ mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_cont
  * The locking conditions on the caller are as follows:
  * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
  */
-void kbasep_js_policy_foreach_ctx_job(kbasep_js_policy *js_policy, kbase_context *kctx,
+void kbasep_js_policy_foreach_ctx_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx,
        kbasep_js_policy_ctx_job_cb callback, mali_bool detach_jobs);
 
 /**
@@ -537,7 +537,7 @@ void kbasep_js_policy_foreach_ctx_job(kbasep_js_policy *js_policy, kbase_context
  *
  * Due to a spinlock being held, this function must not call any APIs that sleep.
  */
-void kbasep_js_policy_runpool_add_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+void kbasep_js_policy_runpool_add_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
 
 /**
  * @brief Remove a context from the Job Scheduler Policy's Run Pool
@@ -554,7 +554,7 @@ void kbasep_js_policy_runpool_add_ctx(kbasep_js_policy *js_policy, kbase_context
  *
  * Due to a spinlock being held, this function must not call any APIs that sleep.
  */
-void kbasep_js_policy_runpool_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+void kbasep_js_policy_runpool_remove_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
 
 /**
  * @brief Indicate whether a context should be removed from the Run Pool
@@ -564,7 +564,7 @@ void kbasep_js_policy_runpool_remove_ctx(kbasep_js_policy *js_policy, kbase_cont
  *
  * @note This API is called from IRQ context.
  */
-mali_bool kbasep_js_policy_should_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx);
+mali_bool kbasep_js_policy_should_remove_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx);
 
 /**
  * @brief Synchronize with any timers acting upon the runpool
@@ -580,7 +580,7 @@ mali_bool kbasep_js_policy_should_remove_ctx(kbasep_js_policy *js_policy, kbase_
  * - it will be holding kbasep_js_kctx_info::ctx::jsctx_mutex.
  * - it will be holding kbasep_js_device_data::runpool_mutex.
  */
-void kbasep_js_policy_runpool_timers_sync(kbasep_js_policy *js_policy);
+void kbasep_js_policy_runpool_timers_sync(union kbasep_js_policy *js_policy);
 
 
 /**
@@ -598,7 +598,7 @@ void kbasep_js_policy_runpool_timers_sync(kbasep_js_policy *js_policy);
  * cannot be held). Therefore, this function should only be seen as a heuristic
  * guide as to whether \a new_ctx is higher priority than \a current_ctx
  */
-mali_bool kbasep_js_policy_ctx_has_priority(kbasep_js_policy *js_policy, kbase_context *current_ctx, kbase_context *new_ctx);
+mali_bool kbasep_js_policy_ctx_has_priority(union kbasep_js_policy *js_policy, struct kbase_context *current_ctx, struct kbase_context *new_ctx);
 
          /** @} *//* end group kbase_js_policy_ctx */
 
@@ -607,19 +607,19 @@ mali_bool kbasep_js_policy_ctx_has_priority(kbasep_js_policy *js_policy, kbase_c
  * @{
  *
  * <b>Refer to @ref page_kbase_js_policy for an overview and detailed operation of
- * the Job Scheduler Policy and its use from the Job Scheduler Core.</b>
+ * the Job Scheduler Policy and its use from the Job Scheduler Core</b>.
  */
 
 /**
  * @brief Job Scheduler Policy Job Info structure
  *
- * This structure is embedded in the kbase_jd_atom structure. It is used to:
+ * This structure is embedded in the struct kbase_jd_atom structure. It is used to:
  * - track information needed for the policy to schedule the job (e.g. time
  * used, OS priority etc.)
- * - link together jobs into a queue/buffer, so that a kbase_jd_atom can be
+ * - link together jobs into a queue/buffer, so that a struct kbase_jd_atom can be
  * obtained as the container of the policy job info. This allows the API to
  * return what "the next job" should be.
- * - obtain other information already stored in the kbase_context for
+ * - obtain other information already stored in the struct kbase_context for
  * scheduling purposes (e.g user-side relative priority)
  */
 union kbasep_js_policy_job_info;
@@ -627,7 +627,7 @@ union kbasep_js_policy_job_info;
 /**
  * @brief Initialize a job for use with the Job Scheduler Policy
  *
- * This function initializes the kbasep_js_policy_job_info structure within the
+ * This function initializes the union kbasep_js_policy_job_info structure within the
  * kbase_jd_atom. It will only initialize/allocate resources that are specific
  * to the job.
  *
@@ -647,7 +647,7 @@ union kbasep_js_policy_job_info;
  *
  * @return MALI_ERROR_NONE if initialization was correct.
  */
-mali_error kbasep_js_policy_init_job(const kbasep_js_policy *js_policy, const kbase_context *kctx, kbase_jd_atom *katom);
+mali_error kbasep_js_policy_init_job(const union kbasep_js_policy *js_policy, const struct kbase_context *kctx, struct kbase_jd_atom *katom);
 
 /**
  * @brief Register context/policy-wide information for a job on the Job Scheduler Policy.
@@ -668,7 +668,7 @@ mali_error kbasep_js_policy_init_job(const kbasep_js_policy *js_policy, const kb
  * The caller has the following conditions on locking:
  * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
  */
-void kbasep_js_policy_register_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom);
+void kbasep_js_policy_register_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, struct kbase_jd_atom *katom);
 
 /**
  * @brief De-register context/policy-wide information for a on the Job Scheduler Policy.
@@ -680,7 +680,7 @@ void kbasep_js_policy_register_job(kbasep_js_policy *js_policy, kbase_context *k
  * The caller has the following conditions on locking:
  * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
  */
-void kbasep_js_policy_deregister_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom);
+void kbasep_js_policy_deregister_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, struct kbase_jd_atom *katom);
 
 /**
  * @brief Dequeue a Job for a job slot from the Job Scheduler Policy Run Pool
@@ -709,7 +709,7 @@ void kbasep_js_policy_deregister_job(kbasep_js_policy *js_policy, kbase_context
  * - kbasep_js_device_data::runpool_mutex will be held.
  * - kbasep_js_kctx_info::ctx::jsctx_mutex. will be held
  */
-mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev, int job_slot_idx, kbase_jd_atom ** const katom_ptr);
+mali_bool kbasep_js_policy_dequeue_job(struct kbase_device *kbdev, int job_slot_idx, struct kbase_jd_atom ** const katom_ptr);
 
 /**
  * @brief Requeue a Job back into the the Job Scheduler Policy Run Pool
@@ -723,13 +723,13 @@ mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev, int job_slot_idx, kb
  * - kbasep_js_device_data::runpool_mutex will be held.
  * - kbasep_js_kctx_info::ctx::jsctx_mutex will be held.
  */
-void kbasep_js_policy_enqueue_job(kbasep_js_policy *js_policy, kbase_jd_atom *katom);
+void kbasep_js_policy_enqueue_job(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom);
 
 /**
  * @brief Log the result of a job: the time spent on a job/context, and whether
  * the job failed or not.
  *
- * Since a kbase_jd_atom contains a pointer to the kbase_context owning it,
+ * Since a struct kbase_jd_atom contains a pointer to the struct kbase_context owning it,
  * then this can also be used to log time on either/both the job and the
  * containing context.
  *
@@ -756,7 +756,7 @@ void kbasep_js_policy_enqueue_job(kbasep_js_policy *js_policy, kbase_jd_atom *ka
  * @param katom         job dispatch atom
  * @param time_spent_us the time spent by the job, in microseconds (10^-6 seconds).
  */
-void kbasep_js_policy_log_job_result(kbasep_js_policy *js_policy, kbase_jd_atom *katom, u64 time_spent_us);
+void kbasep_js_policy_log_job_result(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom, u64 time_spent_us);
 
          /** @} *//* end group kbase_js_policy_job */
 
index 80919b730d5b3ebc24909b27fc33e55d9c3d3a6e..78ec5f1fb5519ad543250a7817345760b11c23f3 100755 (executable)
@@ -72,7 +72,7 @@
  * algorithm in cached_variant_idx_init picks the least restrictive variant for
  * each job . Note that coherent_group requirement is added to all CS variants as the
  * selection of job-slot does not depend on the coherency requirement. */
-static const kbasep_atom_req core_req_variants[] = {
+static const struct kbasep_atom_req core_req_variants[] = {
        {
         /* 0: Fragment variant */
         (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_FS_AFBC |
@@ -122,7 +122,7 @@ static const kbasep_atom_req core_req_variants[] = {
         0},
 };
 
-static const kbasep_atom_req core_req_variants_8987[] = {
+static const struct kbasep_atom_req core_req_variants_8987[] = {
        {
         /* 0: Fragment variant */
         (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_COHERENT_GROUP),
@@ -294,10 +294,11 @@ static const int weight_of_priority[] = {
  * function should only be seen as a heuristic guide as to the priority weight
  * of the context.
  */
-STATIC u64 priority_weight(kbasep_js_policy_cfs_ctx *ctx_info, u64 time_us)
+STATIC u64 priority_weight(struct kbasep_js_policy_cfs_ctx *ctx_info, u64 time_us)
 {
        u64 time_delta_us;
        int priority;
+
        priority = ctx_info->process_priority + ctx_info->bag_priority;
 
        /* Adjust runtime_us using priority weight if required */
@@ -326,10 +327,10 @@ STATIC u64 priority_weight(kbasep_js_policy_cfs_ctx *ctx_info, u64 time_us)
        return time_delta_us;
 }
 
-#if KBASE_TRACE_ENABLE != 0
-STATIC int kbasep_js_policy_trace_get_refcnt_nolock(kbase_device *kbdev, kbase_context *kctx)
+#if KBASE_TRACE_ENABLE
+STATIC int kbasep_js_policy_trace_get_refcnt_nolock(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        int as_nr;
        int refcnt = 0;
 
@@ -337,7 +338,7 @@ STATIC int kbasep_js_policy_trace_get_refcnt_nolock(kbase_device *kbdev, kbase_c
 
        as_nr = kctx->as_nr;
        if (as_nr != KBASEP_AS_NR_INVALID) {
-               kbasep_js_per_as_data *js_per_as_data;
+               struct kbasep_js_per_as_data *js_per_as_data;
                js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
 
                refcnt = js_per_as_data->as_busy_refcount;
@@ -346,10 +347,10 @@ STATIC int kbasep_js_policy_trace_get_refcnt_nolock(kbase_device *kbdev, kbase_c
        return refcnt;
 }
 
-STATIC INLINE int kbasep_js_policy_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
+STATIC INLINE int kbasep_js_policy_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        unsigned long flags;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        int refcnt = 0;
 
        js_devdata = &kbdev->js_data;
@@ -360,24 +361,24 @@ STATIC INLINE int kbasep_js_policy_trace_get_refcnt(kbase_device *kbdev, kbase_c
 
        return refcnt;
 }
-#else                          /* KBASE_TRACE_ENABLE != 0 */
-STATIC int kbasep_js_policy_trace_get_refcnt_nolock(kbase_device *kbdev, kbase_context *kctx)
+#else                          /* KBASE_TRACE_ENABLE  */
+STATIC int kbasep_js_policy_trace_get_refcnt_nolock(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        CSTD_UNUSED(kbdev);
        CSTD_UNUSED(kctx);
        return 0;
 }
 
-STATIC INLINE int kbasep_js_policy_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
+STATIC INLINE int kbasep_js_policy_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
        CSTD_UNUSED(kbdev);
        CSTD_UNUSED(kctx);
        return 0;
 }
-#endif                         /* KBASE_TRACE_ENABLE != 0 */
+#endif                         /* KBASE_TRACE_ENABLE  */
 
 #ifdef CONFIG_MALI_DEBUG
-STATIC void kbasep_js_debug_check(kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag)
+STATIC void kbasep_js_debug_check(struct kbasep_js_policy_cfs *policy_info, struct kbase_context *kctx, kbasep_js_check check_flag)
 {
        /* This function uses the ternary operator and non-explicit comparisons,
         * because it makes for much shorter, easier to read code */
@@ -385,7 +386,11 @@ STATIC void kbasep_js_debug_check(kbasep_js_policy_cfs *policy_info, kbase_conte
        if (check_flag & KBASEP_JS_CHECKFLAG_QUEUED) {
                mali_bool is_queued;
                mali_bool expect_queued;
-               is_queued = (kbasep_list_member_of(&policy_info->ctx_queue_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
+
+               is_queued = (kbasep_list_member_of(
+                               &policy_info->ctx_queue_head,
+                               &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ?
+                               MALI_TRUE : MALI_FALSE;
 
                if (!is_queued)
                        is_queued = (kbasep_list_member_of(&policy_info->ctx_rt_queue_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
@@ -399,21 +404,23 @@ STATIC void kbasep_js_debug_check(kbasep_js_policy_cfs *policy_info, kbase_conte
        if (check_flag & KBASEP_JS_CHECKFLAG_SCHEDULED) {
                mali_bool is_scheduled;
                mali_bool expect_scheduled;
-               is_scheduled = (kbasep_list_member_of(&policy_info->scheduled_ctxs_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
+
+               is_scheduled = (kbasep_list_member_of(
+                       &policy_info->scheduled_ctxs_head,
+                       &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ?
+                       MALI_TRUE : MALI_FALSE;
 
                expect_scheduled = (check_flag & KBASEP_JS_CHECKFLAG_IS_SCHEDULED) ? MALI_TRUE : MALI_FALSE;
                KBASE_DEBUG_ASSERT_MSG(expect_scheduled == is_scheduled, "Expected context %p to be %s but it was %s\n", kctx, (expect_scheduled) ? "scheduled" : "not scheduled", (is_scheduled) ? "scheduled" : "not scheduled");
-
        }
 
 }
 #else                          /* CONFIG_MALI_DEBUG */
-STATIC void kbasep_js_debug_check(kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag)
+STATIC void kbasep_js_debug_check(struct kbasep_js_policy_cfs *policy_info, struct kbase_context *kctx, kbasep_js_check check_flag)
 {
        CSTD_UNUSED(policy_info);
        CSTD_UNUSED(kctx);
        CSTD_UNUSED(check_flag);
-       return;
 }
 #endif                         /* CONFIG_MALI_DEBUG */
 
@@ -453,9 +460,9 @@ STATIC INLINE u32 get_slot_to_variant_lookup(u32 *bit_array, u32 slot_idx)
  * every context requirement is covered (because some are intentionally not
  * supported, such as KBASE_CTX_FLAG_SUBMIT_DISABLED) */
 #ifdef CONFIG_MALI_DEBUG
-STATIC void debug_check_core_req_variants(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
+STATIC void debug_check_core_req_variants(struct kbase_device *kbdev, struct kbasep_js_policy_cfs *policy_info)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        u32 i;
        int j;
 
@@ -468,6 +475,7 @@ STATIC void debug_check_core_req_variants(kbase_device *kbdev, kbasep_js_policy_
                job_core_req = js_devdata->js_reqs[j];
                for (i = 0; i < policy_info->num_core_req_variants; ++i) {
                        base_jd_core_req var_core_req;
+
                        var_core_req = policy_info->core_req_variants[i].core_req;
 
                        if ((var_core_req & job_core_req) == job_core_req) {
@@ -482,7 +490,7 @@ STATIC void debug_check_core_req_variants(kbase_device *kbdev, kbasep_js_policy_
 }
 #endif
 
-STATIC void build_core_req_variants(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
+STATIC void build_core_req_variants(struct kbase_device *kbdev, struct kbasep_js_policy_cfs *policy_info)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(policy_info != NULL);
@@ -507,7 +515,7 @@ STATIC void build_core_req_variants(kbase_device *kbdev, kbasep_js_policy_cfs *p
        KBASE_DEBUG_CODE(debug_check_core_req_variants(kbdev, policy_info));
 }
 
-STATIC void build_slot_lookups(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
+STATIC void build_slot_lookups(struct kbase_device *kbdev, struct kbasep_js_policy_cfs *policy_info)
 {
        u8 i;
        const u32 *variants_supported_ss_for_this_hw = variants_supported_ss_state;
@@ -531,21 +539,21 @@ STATIC void build_slot_lookups(kbase_device *kbdev, kbasep_js_policy_cfs *policy
 
 }
 
-STATIC mali_error cached_variant_idx_init(const kbasep_js_policy_cfs *policy_info, const kbase_context *kctx, kbase_jd_atom *atom)
+STATIC mali_error cached_variant_idx_init(const struct kbasep_js_policy_cfs *policy_info, const struct kbase_context *kctx, struct kbase_jd_atom *atom)
 {
-       kbasep_js_policy_cfs_job *job_info;
+       struct kbasep_js_policy_cfs_job *job_info;
        u32 i;
        base_jd_core_req job_core_req;
        u32 job_device_nr;
        kbase_context_flags ctx_flags;
-       const kbasep_js_kctx_info *js_kctx_info;
-       const kbase_device *kbdev;
+       const struct kbasep_js_kctx_info *js_kctx_info;
+       const struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(policy_info != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
        KBASE_DEBUG_ASSERT(atom != NULL);
 
-       kbdev = container_of(policy_info, const kbase_device, js_data.policy.cfs);
+       kbdev = container_of(policy_info, const struct kbase_device, js_data.policy.cfs);
        job_info = &atom->sched_info.cfs;
        job_core_req = atom->core_req;
        job_device_nr = atom->device_nr;
@@ -555,7 +563,7 @@ STATIC mali_error cached_variant_idx_init(const kbasep_js_policy_cfs *policy_inf
        /* Initial check for atoms targetting a specific coregroup */
        if ((job_core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) != MALI_FALSE && job_device_nr >= kbdev->gpu_props.num_core_groups) {
                /* device_nr exceeds the number of coregroups - not allowed by
-                * @ref base_jd_atom API contract */
+                * @ref struct base_jd_atom_v2 API contract */
                return MALI_ERROR_FUNCTION_FAILED;
        }
 
@@ -580,15 +588,15 @@ STATIC mali_error cached_variant_idx_init(const kbasep_js_policy_cfs *policy_inf
        return MALI_ERROR_FUNCTION_FAILED;
 }
 
-STATIC mali_bool dequeue_job(kbase_device *kbdev,
-                            kbase_context *kctx,
+STATIC mali_bool dequeue_job(struct kbase_device *kbdev,
+                            struct kbase_context *kctx,
                             u32 variants_supported,
-                            kbase_jd_atom ** const katom_ptr,
+                            struct kbase_jd_atom ** const katom_ptr,
                             int job_slot_idx)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_policy_cfs *policy_info;
-       kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(katom_ptr != NULL);
@@ -604,16 +612,19 @@ STATIC mali_bool dequeue_job(kbase_device *kbdev,
                while (variants_supported != 0) {
                        long variant_idx;
                        struct list_head *job_list;
+
                        variant_idx = ffs(variants_supported) - 1;
                        job_list = &ctx_info->job_list_head[variant_idx];
 
                        if (!list_empty(job_list)) {
                                /* Found a context with a matching job */
                                {
-                                       kbase_jd_atom *front_atom = list_entry(job_list->next, kbase_jd_atom, sched_info.cfs.list);
+                                       struct kbase_jd_atom *front_atom = 
+                                                       list_entry(job_list->next, struct kbase_jd_atom, sched_info.cfs.list);
+
                                        KBASE_TRACE_ADD_SLOT(kbdev, JS_POLICY_DEQUEUE_JOB, front_atom->kctx, front_atom, front_atom->jc, job_slot_idx);
                                }
-                               *katom_ptr = list_entry(job_list->next, kbase_jd_atom, sched_info.cfs.list);
+                               *katom_ptr = list_entry(job_list->next, struct kbase_jd_atom, sched_info.cfs.list);
                                list_del(job_list->next);
 
                                (*katom_ptr)->sched_info.cfs.ticks = 0;
@@ -638,9 +649,9 @@ STATIC mali_bool dequeue_job(kbase_device *kbdev,
 /**
  * Hold the runpool_irq spinlock for this
  */
-STATIC INLINE mali_bool timer_callback_should_run(kbase_device *kbdev)
+STATIC INLINE mali_bool timer_callback_should_run(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_device_data *js_devdata;
        s8 nr_running_ctxs;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -683,23 +694,23 @@ STATIC INLINE mali_bool timer_callback_should_run(kbase_device *kbdev)
 static enum hrtimer_restart timer_callback(struct hrtimer *timer)
 {
        unsigned long flags;
-       kbase_device *kbdev;
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_policy_cfs *policy_info;
+       struct kbase_device *kbdev;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_policy_cfs *policy_info;
        int s;
        mali_bool reset_needed = MALI_FALSE;
 
        KBASE_DEBUG_ASSERT(timer != NULL);
 
-       policy_info = container_of(timer, kbasep_js_policy_cfs, scheduling_timer);
-       kbdev = container_of(policy_info, kbase_device, js_data.policy.cfs);
+       policy_info = container_of(timer, struct kbasep_js_policy_cfs, scheduling_timer);
+       kbdev = container_of(policy_info, struct kbase_device, js_data.policy.cfs);
        js_devdata = &kbdev->js_data;
 
        /* Loop through the slots */
        spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
        for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
-               kbase_jm_slot *slot = &kbdev->jm_slots[s];
-               kbase_jd_atom *atom = NULL;
+               struct kbase_jm_slot *slot = &kbdev->jm_slots[s];
+               struct kbase_jd_atom *atom = NULL;
 
                if (kbasep_jm_nr_jobs_submitted(slot) > 0) {
                        atom = kbasep_jm_peek_idx_submit_slot(slot, 0);
@@ -730,19 +741,36 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
 
                                /* Job is Soft-Stoppable */
                                if (ticks == soft_stop_ticks) {
+                                       int disjoint_threshold =
+                                                       KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
+                                       u32 softstop_flags = 0u;
                                        /* Job has been scheduled for at least js_devdata->soft_stop_ticks ticks.
                                         * Soft stop the slot so we can run other jobs.
                                         */
                                        dev_dbg(kbdev->dev, "Soft-stop");
 
-#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
-                                       kbase_job_slot_softstop(kbdev, s, atom);
+#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+                                       /* nr_user_contexts_running is updated with the runpool_mutex,
+                                        * but we can't take that here.
+                                        *
+                                        * However, if it's about to be increased then the new context
+                                        * can't run any jobs until they take the runpool_irq lock, so
+                                        * it's OK to observe the older value.
+                                        *
+                                        * Similarly, if it's about to be decreased, the last job from
+                                        * another context has already finished, so it's not too bad
+                                        * that we observe the older value and register a disjoint
+                                        * event when we try soft-stopping */
+                                       if (js_devdata->nr_user_contexts_running >= disjoint_threshold)
+                                               softstop_flags |= JS_COMMAND_SW_CAUSES_DISJOINT;
+                                       kbase_job_slot_softstop_swflags(kbdev,
+                                                       s, atom, softstop_flags);
 #endif
                                } else if (ticks == hard_stop_ticks) {
                                        /* Job has been scheduled for at least js_devdata->hard_stop_ticks_ss ticks.
                                         * It should have been soft-stopped by now. Hard stop the slot.
                                         */
-#if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
                                        dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", (unsigned long)ticks, (unsigned long)(js_devdata->scheduling_tick_ns / 1000000u));
                                        kbase_job_slot_hardstop(atom->kctx, s, atom);
 #endif
@@ -752,7 +780,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
                                         */
                                        reset_needed = MALI_TRUE;
                                }
-#else                          /* !CINSTR_DUMPING_ENABLED */
+#else                          /* !CINSTR_DUMPING_ENABLED */
                                /* NOTE: During CINSTR_DUMPING_ENABLED, we use the alternate timeouts, which
                                 * makes the hard-stop and GPU reset timeout much longer. We also ensure that
                                 * we don't soft-stop at all. */
@@ -765,7 +793,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
                                        /* Job has been scheduled for at least js_devdata->hard_stop_ticks_nss ticks.
                                         * Hard stop the slot.
                                         */
-#if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
+#if !KBASE_DISABLE_SCHEDULING_HARD_STOPS
                                        dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", (unsigned long)ticks, (unsigned long)(js_devdata->scheduling_tick_ns / 1000000u));
                                        kbase_job_slot_hardstop(atom->kctx, s, atom);
 #endif
@@ -779,14 +807,14 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
                        }
                }
        }
-
+#if KBASE_GPU_RESET_EN
        if (reset_needed) {
                dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS/NSS timeout hit). Issueing GPU soft-reset to resolve.");
 
                if (kbase_prepare_to_reset_gpu_locked(kbdev))
                        kbase_reset_gpu_locked(kbdev);
        }
-
+#endif /* KBASE_GPU_RESET_EN */
        /* the timer is re-issued if there is contexts in the run-pool */
 
        if (timer_callback_should_run(kbdev) != MALI_FALSE) {
@@ -805,10 +833,10 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
  * Non-private functions
  */
 
-mali_error kbasep_js_policy_init(kbase_device *kbdev)
+mali_error kbasep_js_policy_init(struct kbase_device *kbdev)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_policy_cfs *policy_info;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        js_devdata = &kbdev->js_data;
@@ -835,9 +863,10 @@ mali_error kbasep_js_policy_init(kbase_device *kbdev)
        return MALI_ERROR_NONE;
 }
 
-void kbasep_js_policy_term(kbasep_js_policy *js_policy)
+void kbasep_js_policy_term(union kbasep_js_policy *js_policy)
 {
-       kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_policy_cfs *policy_info;
+
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        policy_info = &js_policy->cfs;
 
@@ -855,11 +884,11 @@ void kbasep_js_policy_term(kbasep_js_policy *js_policy)
        hrtimer_cancel(&policy_info->scheduling_timer);
 }
 
-mali_error kbasep_js_policy_init_ctx(kbase_device *kbdev, kbase_context *kctx)
+mali_error kbasep_js_policy_init_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_policy_cfs_ctx *ctx_info;
-       kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbasep_js_policy_cfs *policy_info;
        u32 i;
        int policy;
 
@@ -904,10 +933,10 @@ mali_error kbasep_js_policy_init_ctx(kbase_device *kbdev, kbase_context *kctx)
        return MALI_ERROR_NONE;
 }
 
-void kbasep_js_policy_term_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+void kbasep_js_policy_term_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx)
 {
-       kbasep_js_policy_cfs_ctx *ctx_info;
-       kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbasep_js_policy_cfs *policy_info;
        u32 i;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
@@ -917,7 +946,7 @@ void kbasep_js_policy_term_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
        ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
 
        {
-               kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
+               struct kbase_device *kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
                KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_TERM_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
        }
 
@@ -932,16 +961,16 @@ void kbasep_js_policy_term_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
  * Context Management
  */
 
-void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+void kbasep_js_policy_enqueue_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx)
 {
-       kbasep_js_policy_cfs *policy_info;
-       kbasep_js_policy_cfs_ctx *ctx_info;
-       kbase_context *head_ctx;
-       kbase_context *list_kctx = NULL;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbase_context *head_ctx;
+       struct kbase_context *list_kctx = NULL;
+       struct kbasep_js_device_data *js_devdata;
        struct list_head *queue_head;
        struct list_head *pos;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        atomic64_t *least_runtime_us;
        u64 head_runtime;
 
@@ -950,7 +979,7 @@ void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kc
 
        policy_info = &js_policy->cfs;
        ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
-       kbdev = container_of(js_policy, kbase_device, js_data.policy);
+       kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
        js_devdata = &kbdev->js_data;
 
        KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_ENQUEUE_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
@@ -980,9 +1009,9 @@ void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kc
                list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, queue_head);
        } else {
                list_for_each(pos, queue_head) {
-                       kbasep_js_policy_cfs_ctx *list_ctx_info;
+                       struct kbasep_js_policy_cfs_ctx *list_ctx_info;
 
-                       list_kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+                       list_kctx = list_entry(pos, struct kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
                        list_ctx_info = &list_kctx->jctx.sched_info.runpool.policy_ctx.cfs;
 
                        if ((kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0)
@@ -997,24 +1026,24 @@ void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kc
        }
 
        /* Ensure least_runtime_us is up to date*/
-       head_ctx = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+       head_ctx = list_entry(queue_head->next, struct kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
        head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
        atomic64_set(least_runtime_us, head_runtime);
 }
 
-mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_context ** const kctx_ptr)
+mali_bool kbasep_js_policy_dequeue_head_ctx(union kbasep_js_policy *js_policy, struct kbase_context ** const kctx_ptr)
 {
-       kbasep_js_policy_cfs *policy_info;
-       kbase_context *head_ctx;
+       struct kbasep_js_policy_cfs *policy_info;
+       struct kbase_context *head_ctx;
        struct list_head *queue_head;
        atomic64_t *least_runtime_us;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(kctx_ptr != NULL);
 
        policy_info = &js_policy->cfs;
-       kbdev = container_of(js_policy, kbase_device, js_data.policy);
+       kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
 
        /* attempt to dequeue from the 'realttime' queue first */
        if (list_empty(&policy_info->ctx_rt_queue_head)) {
@@ -1031,7 +1060,7 @@ mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_c
        }
 
        /* Contexts are dequeued from the front of the queue */
-       *kctx_ptr = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+       *kctx_ptr = list_entry(queue_head->next, struct kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
        /* If dequeuing will empty the list, then set least_runtime_us prior to deletion */
        if (queue_head->next->next == queue_head)
                atomic64_set(least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
@@ -1043,7 +1072,7 @@ mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_c
        if (!list_empty(queue_head)) {
                u64 head_runtime;
 
-               head_ctx = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+               head_ctx = list_entry(queue_head->next, struct kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
 
                /* No need to hold the the runpool_irq.lock here for reading - the
                 * context is definitely not being updated in the runpool at this
@@ -1059,22 +1088,22 @@ mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_c
        return MALI_TRUE;
 }
 
-mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+mali_bool kbasep_js_policy_try_evict_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx)
 {
-       kbasep_js_policy_cfs_ctx *ctx_info;
-       kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbasep_js_policy_cfs *policy_info;
        mali_bool is_present;
        struct list_head *queue_head;
        atomic64_t *least_runtime_us;
        struct list_head *qhead;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
 
        policy_info = &js_policy->cfs;
        ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
-       kbdev = container_of(js_policy, kbase_device, js_data.policy);
+       kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
 
        if (ctx_info->process_rt_policy) {
                queue_head = &policy_info->ctx_rt_queue_head;
@@ -1091,7 +1120,8 @@ mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_cont
        KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, JS_POLICY_TRY_EVICT_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx), is_present);
 
        if (is_present != MALI_FALSE) {
-               kbase_context *head_ctx;
+               struct kbase_context *head_ctx;
+
                qhead = queue_head;
 
                /* If dequeuing will empty the list, then set least_runtime_us prior to deletion */
@@ -1106,7 +1136,7 @@ mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_cont
                if (!list_empty(qhead)) {
                        u64 head_runtime;
 
-                       head_ctx = list_entry(qhead->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+                       head_ctx = list_entry(qhead->next, struct kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
 
                        /* No need to hold the the runpool_irq.lock here for reading - the
                         * context is definitely not being updated in the runpool at this
@@ -1123,18 +1153,18 @@ mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_cont
        return is_present;
 }
 
-void kbasep_js_policy_foreach_ctx_job(kbasep_js_policy *js_policy, kbase_context *kctx,
+void kbasep_js_policy_foreach_ctx_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx,
        kbasep_js_policy_ctx_job_cb callback, mali_bool detach_jobs)
 {
-       kbasep_js_policy_cfs *policy_info;
-       kbasep_js_policy_cfs_ctx *ctx_info;
-       kbase_device *kbdev;
+       struct kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbase_device *kbdev;
        u32 i;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
 
-       kbdev = container_of(js_policy, kbase_device, js_data.policy);
+       kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
        policy_info = &js_policy->cfs;
        ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
 
@@ -1157,17 +1187,17 @@ void kbasep_js_policy_foreach_ctx_job(kbasep_js_policy *js_policy, kbase_context
 
 }
 
-void kbasep_js_policy_runpool_add_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+void kbasep_js_policy_runpool_add_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx)
 {
-       kbasep_js_policy_cfs *policy_info;
-       kbasep_js_device_data *js_devdata;
-       kbase_device *kbdev;
+       struct kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbase_device *kbdev;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
 
        policy_info = &js_policy->cfs;
-       js_devdata = container_of(js_policy, kbasep_js_device_data, policy);
+       js_devdata = container_of(js_policy, struct kbasep_js_device_data, policy);
 
        kbdev = kctx->kbdev;
 
@@ -1189,9 +1219,9 @@ void kbasep_js_policy_runpool_add_ctx(kbasep_js_policy *js_policy, kbase_context
        }
 }
 
-void kbasep_js_policy_runpool_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+void kbasep_js_policy_runpool_remove_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx)
 {
-       kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_policy_cfs *policy_info;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -1199,7 +1229,8 @@ void kbasep_js_policy_runpool_remove_ctx(kbasep_js_policy *js_policy, kbase_cont
        policy_info = &js_policy->cfs;
 
        {
-               kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
+               struct kbase_device *kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
+
                KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_RUNPOOL_REMOVE_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx));
        }
 
@@ -1208,14 +1239,13 @@ void kbasep_js_policy_runpool_remove_ctx(kbasep_js_policy *js_policy, kbase_cont
 
        /* No searching or significant list maintenance required to remove this context */
        list_del(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
-
 }
 
-mali_bool kbasep_js_policy_should_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
+mali_bool kbasep_js_policy_should_remove_ctx(union kbasep_js_policy *js_policy, struct kbase_context *kctx)
 {
-       kbasep_js_policy_cfs_ctx *ctx_info;
-       kbasep_js_policy_cfs *policy_info;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbasep_js_policy_cfs *policy_info;
+       struct kbasep_js_device_data *js_devdata;
        u64 least_runtime_us;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
@@ -1223,7 +1253,7 @@ mali_bool kbasep_js_policy_should_remove_ctx(kbasep_js_policy *js_policy, kbase_
 
        policy_info = &js_policy->cfs;
        ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
-       js_devdata = container_of(js_policy, kbasep_js_device_data, policy);
+       js_devdata = container_of(js_policy, struct kbasep_js_device_data, policy);
 
        if (ctx_info->process_rt_policy)
                least_runtime_us = atomic64_read(&policy_info->rt_least_runtime_us);
@@ -1247,16 +1277,16 @@ mali_bool kbasep_js_policy_should_remove_ctx(kbasep_js_policy *js_policy, kbase_
        return MALI_FALSE;
 }
 
-void kbasep_js_policy_runpool_timers_sync(kbasep_js_policy *js_policy)
+void kbasep_js_policy_runpool_timers_sync(union kbasep_js_policy *js_policy)
 {
-       kbasep_js_policy_cfs *policy_info;
-       kbase_device *kbdev;
-       kbasep_js_device_data *js_devdata;
+       struct kbasep_js_policy_cfs *policy_info;
+       struct kbase_device *kbdev;
+       struct kbasep_js_device_data *js_devdata;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
 
        policy_info = &js_policy->cfs;
-       kbdev = container_of(js_policy, kbase_device, js_data.policy);
+       kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
        js_devdata = &kbdev->js_data;
 
        if (!timer_callback_should_run(kbdev)) {
@@ -1282,9 +1312,9 @@ void kbasep_js_policy_runpool_timers_sync(kbasep_js_policy *js_policy)
  * Job Chain Management
  */
 
-mali_error kbasep_js_policy_init_job(const kbasep_js_policy *js_policy, const kbase_context *kctx, kbase_jd_atom *katom)
+mali_error kbasep_js_policy_init_job(const union kbasep_js_policy *js_policy, const struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
-       const kbasep_js_policy_cfs *policy_info;
+       const struct kbasep_js_policy_cfs *policy_info;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(katom != NULL);
@@ -1297,9 +1327,9 @@ mali_error kbasep_js_policy_init_job(const kbasep_js_policy *js_policy, const kb
        return cached_variant_idx_init(policy_info, kctx, katom);
 }
 
-void kbasep_js_policy_register_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom)
+void kbasep_js_policy_register_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
-       kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(katom != NULL);
@@ -1316,9 +1346,9 @@ void kbasep_js_policy_register_job(kbasep_js_policy *js_policy, kbase_context *k
                ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
 }
 
-void kbasep_js_policy_deregister_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom)
+void kbasep_js_policy_deregister_job(union kbasep_js_policy *js_policy, struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
-       kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        CSTD_UNUSED(js_policy);
@@ -1339,13 +1369,13 @@ void kbasep_js_policy_deregister_job(kbasep_js_policy *js_policy, kbase_context
 }
 KBASE_EXPORT_TEST_API(kbasep_js_policy_deregister_job)
 
-mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev,
+mali_bool kbasep_js_policy_dequeue_job(struct kbase_device *kbdev,
                                       int job_slot_idx,
-                                      kbase_jd_atom ** const katom_ptr)
+                                      struct kbase_jd_atom ** const katom_ptr)
 {
-       kbasep_js_device_data *js_devdata;
-       kbasep_js_policy_cfs *policy_info;
-       kbase_context *kctx;
+       struct kbasep_js_device_data *js_devdata;
+       struct kbasep_js_policy_cfs *policy_info;
+       struct kbase_context *kctx;
        u32 variants_supported;
        struct list_head *pos;
 
@@ -1367,7 +1397,7 @@ mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev,
 
        /* First pass through the runpool we consider the realtime priority jobs */
        list_for_each(pos, &policy_info->scheduled_ctxs_head) {
-               kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+               kctx = list_entry(pos, struct kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
                if (kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy) {
                        if (dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx)) {
                                /* Realtime policy job matched */
@@ -1378,7 +1408,7 @@ mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev,
 
        /* Second pass through the runpool we consider the non-realtime priority jobs */
        list_for_each(pos, &policy_info->scheduled_ctxs_head) {
-               kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
+               kctx = list_entry(pos, struct kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
                if (kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy == MALI_FALSE) {
                        if (dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx)) {
                                /* Non-realtime policy job matched */
@@ -1391,11 +1421,11 @@ mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev,
        return MALI_FALSE;
 }
 
-void kbasep_js_policy_enqueue_job(kbasep_js_policy *js_policy, kbase_jd_atom *katom)
+void kbasep_js_policy_enqueue_job(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom)
 {
-       kbasep_js_policy_cfs_job *job_info;
-       kbasep_js_policy_cfs_ctx *ctx_info;
-       kbase_context *parent_ctx;
+       struct kbasep_js_policy_cfs_job *job_info;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbase_context *parent_ctx;
 
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(katom != NULL);
@@ -1406,16 +1436,16 @@ void kbasep_js_policy_enqueue_job(kbasep_js_policy *js_policy, kbase_jd_atom *ka
        ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
 
        {
-               kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
+               struct kbase_device *kbdev = container_of(js_policy, struct kbase_device, js_data.policy);
                KBASE_TRACE_ADD(kbdev, JS_POLICY_ENQUEUE_JOB, katom->kctx, katom, katom->jc, 0);
        }
        list_add_tail(&katom->sched_info.cfs.list, &ctx_info->job_list_head[job_info->cached_variant_idx]);
 }
 
-void kbasep_js_policy_log_job_result(kbasep_js_policy *js_policy, kbase_jd_atom *katom, u64 time_spent_us)
+void kbasep_js_policy_log_job_result(union kbasep_js_policy *js_policy, struct kbase_jd_atom *katom, u64 time_spent_us)
 {
-       kbasep_js_policy_cfs_ctx *ctx_info;
-       kbase_context *parent_ctx;
+       struct kbasep_js_policy_cfs_ctx *ctx_info;
+       struct kbase_context *parent_ctx;
        KBASE_DEBUG_ASSERT(js_policy != NULL);
        KBASE_DEBUG_ASSERT(katom != NULL);
        CSTD_UNUSED(js_policy);
@@ -1426,12 +1456,14 @@ void kbasep_js_policy_log_job_result(kbasep_js_policy *js_policy, kbase_jd_atom
        ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
 
        ctx_info->runtime_us += priority_weight(ctx_info, time_spent_us);
+
+       katom->time_spent_us += time_spent_us;
 }
 
-mali_bool kbasep_js_policy_ctx_has_priority(kbasep_js_policy *js_policy, kbase_context *current_ctx, kbase_context *new_ctx)
+mali_bool kbasep_js_policy_ctx_has_priority(union kbasep_js_policy *js_policy, struct kbase_context *current_ctx, struct kbase_context *new_ctx)
 {
-       kbasep_js_policy_cfs_ctx *current_ctx_info;
-       kbasep_js_policy_cfs_ctx *new_ctx_info;
+       struct kbasep_js_policy_cfs_ctx *current_ctx_info;
+       struct kbasep_js_policy_cfs_ctx *new_ctx_info;
 
        KBASE_DEBUG_ASSERT(current_ctx != NULL);
        KBASE_DEBUG_ASSERT(new_ctx != NULL);
index 9c4f3c66bb60a48dc8c9fa560cfc9c86c61d4c2f..099f25799b4b8daa152337900cd1a30b3a8cdedc 100755 (executable)
@@ -68,7 +68,7 @@ typedef struct kbasep_js_policy_cfs {
        u32 num_core_req_variants;
 
        /** Variants of the core requirements */
-       kbasep_atom_req core_req_variants[KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS];
+       struct kbasep_atom_req core_req_variants[KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS];
 
        /* Lookups per job slot against which core_req_variants match it */
        u32 slot_to_variant_lookup_ss_state[KBASEP_JS_VARIANT_LOOKUP_WORDS_NEEDED];
index f1f44769c9b9ebf7e9d9fe133b27fdb4d5d11139..2774d836b65c7952cbf6131cca30256aa13c6270 100755 (executable)
@@ -73,44 +73,19 @@ static void kbase_region_tracker_insert(struct kbase_context *kctx, struct kbase
        rb_insert_color(&(new_reg->rblink), &(kctx->reg_rbtree));
 }
 
-/* Find allocated region enclosing range. */
-struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range(kbase_context *kctx, u64 start_pfn, size_t nr_pages)
-{
-       struct rb_node *rbnode;
-       struct kbase_va_region *reg;
-       u64 end_pfn = start_pfn + nr_pages;
-
-       rbnode = kctx->reg_rbtree.rb_node;
-
-       while (rbnode) {
-               u64 tmp_start_pfn, tmp_end_pfn;
-               reg = rb_entry(rbnode, struct kbase_va_region, rblink);
-               tmp_start_pfn = reg->start_pfn;
-               tmp_end_pfn = reg->start_pfn + kbase_reg_current_backed_size(reg);
-
-               /* If start is lower than this, go left. */
-               if (start_pfn < tmp_start_pfn)
-                       rbnode = rbnode->rb_left;
-               /* If end is higher than this, then go right. */
-               else if (end_pfn > tmp_end_pfn)
-                       rbnode = rbnode->rb_right;
-               else    /* Enclosing */
-                       return reg;
-       }
-
-       return NULL;
-}
-
 /* Find allocated region enclosing free range. */
-struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free(kbase_context *kctx, u64 start_pfn, size_t nr_pages)
+static struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free(
+               struct kbase_context *kctx, u64 start_pfn, size_t nr_pages)
 {
        struct rb_node *rbnode;
        struct kbase_va_region *reg;
+
        u64 end_pfn = start_pfn + nr_pages;
 
        rbnode = kctx->reg_rbtree.rb_node;
        while (rbnode) {
                u64 tmp_start_pfn, tmp_end_pfn;
+
                reg = rb_entry(rbnode, struct kbase_va_region, rblink);
                tmp_start_pfn = reg->start_pfn;
                tmp_end_pfn = reg->start_pfn + reg->nr_pages;
@@ -129,7 +104,7 @@ struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free(kb
 }
 
 /* Find region enclosing given address. */
-kbase_va_region *kbase_region_tracker_find_region_enclosing_address(kbase_context *kctx, mali_addr64 gpu_addr)
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, mali_addr64 gpu_addr)
 {
        struct rb_node *rbnode;
        struct kbase_va_region *reg;
@@ -142,6 +117,7 @@ kbase_va_region *kbase_region_tracker_find_region_enclosing_address(kbase_contex
        rbnode = kctx->reg_rbtree.rb_node;
        while (rbnode) {
                u64 tmp_start_pfn, tmp_end_pfn;
+
                reg = rb_entry(rbnode, struct kbase_va_region, rblink);
                tmp_start_pfn = reg->start_pfn;
                tmp_end_pfn = reg->start_pfn + reg->nr_pages;
@@ -162,7 +138,7 @@ kbase_va_region *kbase_region_tracker_find_region_enclosing_address(kbase_contex
 KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address)
 
 /* Find region with given base address */
-kbase_va_region *kbase_region_tracker_find_region_base_address(kbase_context *kctx, mali_addr64 gpu_addr)
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, mali_addr64 gpu_addr)
 {
        u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
        struct rb_node *rbnode;
@@ -191,7 +167,7 @@ kbase_va_region *kbase_region_tracker_find_region_base_address(kbase_context *kc
 KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address)
 
 /* Find region meeting given requirements */
-static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align)
+static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align)
 {
        struct rb_node *rbnode;
        struct kbase_va_region *reg;
@@ -201,11 +177,15 @@ static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(kba
        rbnode = rb_first(&(kctx->reg_rbtree));
        while (rbnode) {
                reg = rb_entry(rbnode, struct kbase_va_region, rblink);
-               if ((reg->nr_pages >= nr_pages) && (reg->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(reg, reg_reqs)) {
-
+               if ((reg->nr_pages >= nr_pages) &&
+                               (reg->flags & KBASE_REG_FREE) &&
+                               kbase_region_tracker_match_zone(reg, reg_reqs)) {
                        /* Check alignment */
                        u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1);
-                       if ((start_pfn >= reg->start_pfn) && (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) && ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1)))
+
+                       if ((start_pfn >= reg->start_pfn) &&
+                                       (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
+                                       ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1)))
                                return reg;
                }
                rbnode = rb_next(rbnode);
@@ -222,7 +202,7 @@ static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(kba
  * region lock held. The associated memory is not released (see
  * kbase_free_alloced_region). Internal use only.
  */
-STATIC mali_error kbase_remove_va_region(kbase_context *kctx, struct kbase_va_region *reg)
+STATIC mali_error kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_region *reg)
 {
        struct rb_node *rbprev;
        struct kbase_va_region *prev = NULL;
@@ -290,7 +270,7 @@ KBASE_EXPORT_TEST_API(kbase_remove_va_region)
 /**
  * @brief Insert a VA region to the list, replacing the current at_reg.
  */
-static mali_error kbase_insert_va_region_nolock(kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
+static mali_error kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
 {
        mali_error err = MALI_ERROR_NONE;
 
@@ -325,7 +305,13 @@ static mali_error kbase_insert_va_region_nolock(kbase_context *kctx, struct kbas
        }
        /* New region splits the old one, so insert and create new */
        else {
-               struct kbase_va_region *new_front_reg = kbase_alloc_free_region(kctx, at_reg->start_pfn, start_pfn - at_reg->start_pfn, at_reg->flags & KBASE_REG_ZONE_MASK);
+               struct kbase_va_region *new_front_reg;
+
+               new_front_reg = kbase_alloc_free_region(kctx,
+                               at_reg->start_pfn,
+                               start_pfn - at_reg->start_pfn,
+                               at_reg->flags & KBASE_REG_ZONE_MASK);
+
                if (new_front_reg) {
                        at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
                        at_reg->start_pfn = start_pfn + nr_pages;
@@ -343,7 +329,9 @@ static mali_error kbase_insert_va_region_nolock(kbase_context *kctx, struct kbas
 /**
  * @brief Add a VA region to the list.
  */
-mali_error kbase_add_va_region(kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align)
+mali_error kbase_add_va_region(struct kbase_context *kctx,
+               struct kbase_va_region *reg, mali_addr64 addr,
+               size_t nr_pages, size_t align)
 {
        struct kbase_va_region *tmp;
        u64 gpu_pfn = addr >> PAGE_SHIFT;
@@ -364,6 +352,7 @@ mali_error kbase_add_va_region(kbase_context *kctx, struct kbase_va_region *reg,
        /* Path 1: Map a specific address. Find the enclosing region, which *must* be free. */
        if (gpu_pfn) {
                struct device *dev = kctx->kbdev->dev;
+
                KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
 
                tmp = kbase_region_tracker_find_region_enclosing_range_free(kctx, gpu_pfn, nr_pages);
@@ -373,7 +362,8 @@ mali_error kbase_add_va_region(kbase_context *kctx, struct kbase_va_region *reg,
                        goto exit;
                }
 
-               if ((!kbase_region_tracker_match_zone(tmp, reg)) || (!(tmp->flags & KBASE_REG_FREE))) {
+               if ((!kbase_region_tracker_match_zone(tmp, reg)) ||
+                               (!(tmp->flags & KBASE_REG_FREE))) {
                        dev_warn(dev, "Zone mismatch: %lu != %lu", tmp->flags & KBASE_REG_ZONE_MASK, reg->flags & KBASE_REG_ZONE_MASK);
                        dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n", tmp->start_pfn, tmp->flags, tmp->nr_pages, gpu_pfn, nr_pages);
                        dev_warn(dev, "in function %s (%p, %p, 0x%llx, 0x%zx, 0x%zx)\n", __func__, kctx, reg, addr, nr_pages, align);
@@ -394,6 +384,7 @@ mali_error kbase_add_va_region(kbase_context *kctx, struct kbase_va_region *reg,
        /* Path 2: Map any free address which meets the requirements.  */
        {
                u64 start_pfn;
+
                tmp = kbase_region_tracker_find_region_meeting_reqs(kctx, reg, nr_pages, align);
                if (!tmp) {
                        err = MALI_ERROR_OUT_OF_GPU_MEMORY;
@@ -412,7 +403,7 @@ KBASE_EXPORT_TEST_API(kbase_add_va_region)
 /**
  * @brief Initialize the internal region tracker data structure.
  */
-static void kbase_region_tracker_ds_init(kbase_context *kctx, struct kbase_va_region *same_va_reg, struct kbase_va_region *exec_reg, struct kbase_va_region *custom_va_reg)
+static void kbase_region_tracker_ds_init(struct kbase_context *kctx, struct kbase_va_region *same_va_reg, struct kbase_va_region *exec_reg, struct kbase_va_region *custom_va_reg)
 {
        kctx->reg_rbtree = RB_ROOT;
        kbase_region_tracker_insert(kctx, same_va_reg);
@@ -424,10 +415,11 @@ static void kbase_region_tracker_ds_init(kbase_context *kctx, struct kbase_va_re
        }
 }
 
-void kbase_region_tracker_term(kbase_context *kctx)
+void kbase_region_tracker_term(struct kbase_context *kctx)
 {
        struct rb_node *rbnode;
        struct kbase_va_region *reg;
+
        do {
                rbnode = rb_first(&(kctx->reg_rbtree));
                if (rbnode) {
@@ -441,7 +433,7 @@ void kbase_region_tracker_term(kbase_context *kctx)
 /**
  * Initialize the region tracker data structure.
  */
-mali_error kbase_region_tracker_init(kbase_context *kctx)
+mali_error kbase_region_tracker_init(struct kbase_context *kctx)
 {
        struct kbase_va_region *same_va_reg;
        struct kbase_va_region *exec_reg = NULL;
@@ -461,13 +453,18 @@ mali_error kbase_region_tracker_init(kbase_context *kctx)
 #ifdef CONFIG_64BIT
        if (is_compat_task())
                same_va_bits = 32;
+       else if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
+               same_va_bits = 33;
 #endif
 
        if (kctx->kbdev->gpu_props.mmu.va_bits < same_va_bits)
                return MALI_ERROR_FUNCTION_FAILED;
 
        /* all have SAME_VA */
-       same_va_reg = kbase_alloc_free_region(kctx, 1, (1ULL << (same_va_bits - PAGE_SHIFT)) - 2, KBASE_REG_ZONE_SAME_VA);
+       same_va_reg = kbase_alloc_free_region(kctx, 1,
+                       (1ULL << (same_va_bits - PAGE_SHIFT)) - 2,
+                       KBASE_REG_ZONE_SAME_VA);
+
        if (!same_va_reg)
                return MALI_ERROR_OUT_OF_MEMORY;
 
@@ -479,20 +476,27 @@ mali_error kbase_region_tracker_init(kbase_context *kctx)
                        kbase_free_alloced_region(same_va_reg);
                        return MALI_ERROR_FUNCTION_FAILED;
                }
-               /* If the current size of TMEM is out of range of the 
+               /* If the current size of TMEM is out of range of the
                 * virtual address space addressable by the MMU then
                 * we should shrink it to fit
                 */
-               if( (KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit )
+               if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit)
                        custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE;
 
-               exec_reg = kbase_alloc_free_region(kctx, KBASE_REG_ZONE_EXEC_BASE, KBASE_REG_ZONE_EXEC_SIZE, KBASE_REG_ZONE_EXEC);
+               exec_reg = kbase_alloc_free_region(kctx,
+                               KBASE_REG_ZONE_EXEC_BASE,
+                               KBASE_REG_ZONE_EXEC_SIZE,
+                               KBASE_REG_ZONE_EXEC);
+
                if (!exec_reg) {
                        kbase_free_alloced_region(same_va_reg);
                        return MALI_ERROR_OUT_OF_MEMORY;
                }
 
-               custom_va_reg = kbase_alloc_free_region(kctx, KBASE_REG_ZONE_CUSTOM_VA_BASE, custom_va_size, KBASE_REG_ZONE_CUSTOM_VA);
+               custom_va_reg = kbase_alloc_free_region(kctx,
+                               KBASE_REG_ZONE_CUSTOM_VA_BASE,
+                               custom_va_size, KBASE_REG_ZONE_CUSTOM_VA);
+
                if (!custom_va_reg) {
                        kbase_free_alloced_region(same_va_reg);
                        kbase_free_alloced_region(exec_reg);
@@ -509,7 +513,8 @@ mali_error kbase_region_tracker_init(kbase_context *kctx)
 
 mali_error kbase_mem_init(struct kbase_device *kbdev)
 {
-       kbasep_mem_device *memdev;
+       struct kbasep_mem_device *memdev;
+
        KBASE_DEBUG_ASSERT(kbdev);
 
        memdev = &kbdev->memdev;
@@ -517,18 +522,18 @@ mali_error kbase_mem_init(struct kbase_device *kbdev)
        /* Initialize memory usage */
        atomic_set(&memdev->used_pages, 0);
 
-       /* nothing to do, zero-inited when kbase_device was created */
+       /* nothing to do, zero-inited when struct kbase_device was created */
        return MALI_ERROR_NONE;
 }
 
-void kbase_mem_halt(kbase_device *kbdev)
+void kbase_mem_halt(struct kbase_device *kbdev)
 {
        CSTD_UNUSED(kbdev);
 }
 
-void kbase_mem_term(kbase_device *kbdev)
+void kbase_mem_term(struct kbase_device *kbdev)
 {
-       kbasep_mem_device *memdev;
+       struct kbasep_mem_device *memdev;
        int pages;
 
        KBASE_DEBUG_ASSERT(kbdev);
@@ -549,14 +554,17 @@ KBASE_EXPORT_TEST_API(kbase_mem_term)
  * @note If GPU resets occur then the counters are reset to zero, the delay may not be as expected.
  */
 #ifndef CONFIG_MALI_NO_MALI
-void kbase_wait_write_flush(kbase_context *kctx)
+void kbase_wait_write_flush(struct kbase_context *kctx)
 {
        u32 base_count = 0;
+
        /* A suspend won't happen here, because we're in a syscall from a userspace thread */
        kbase_pm_context_active(kctx->kbdev);
        kbase_pm_request_gpu_cycle_counter(kctx->kbdev);
+
        while (MALI_TRUE) {
                u32 new_count;
+
                new_count = kbase_reg_read(kctx->kbdev, GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
                /* First time around, just store the count. */
                if (base_count == 0) {
@@ -568,6 +576,7 @@ void kbase_wait_write_flush(kbase_context *kctx)
                if ((new_count - base_count) > 1000)
                        break;
        }
+
        kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
        kbase_pm_context_idle(kctx->kbdev);
 }
@@ -584,7 +593,7 @@ void kbase_wait_write_flush(kbase_context *kctx)
  * zone is KBASE_REG_ZONE_CUSTOM_VA, KBASE_REG_ZONE_SAME_VA, or KBASE_REG_ZONE_EXEC
  *
  */
-struct kbase_va_region *kbase_alloc_free_region(kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone)
+struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone)
 {
        struct kbase_va_region *new_reg;
 
@@ -609,7 +618,7 @@ struct kbase_va_region *kbase_alloc_free_region(kbase_context *kctx, u64 start_p
        new_reg->flags |= KBASE_REG_GROWABLE;
 
        /* Set up default MEMATTR usage */
-       new_reg->flags |= KBASE_REG_MEMATTR_INDEX(ASn_MEMATTR_INDEX_DEFAULT);
+       new_reg->flags |= KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
 
        new_reg->start_pfn = start_pfn;
        new_reg->nr_pages = nr_pages;
@@ -625,7 +634,7 @@ KBASE_EXPORT_TEST_API(kbase_alloc_free_region)
  * The described region must be freed of any mapping.
  *
  * If the region is not flagged as KBASE_REG_FREE, the region's
- * alloc object will be released. 
+ * alloc object will be released.
  * It is a bug if no alloc object exists for non-free regions.
  *
  */
@@ -634,59 +643,51 @@ void kbase_free_alloced_region(struct kbase_va_region *reg)
        KBASE_DEBUG_ASSERT(NULL != reg);
        if (!(reg->flags & KBASE_REG_FREE)) {
                kbase_mem_phy_alloc_put(reg->alloc);
-               KBASE_DEBUG_CODE(
-                                       /* To detect use-after-free in debug builds */
-                                       reg->flags |= KBASE_REG_FREE);
+               /* To detect use-after-free in debug builds */
+               KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
        }
        kfree(reg);
 }
 
 KBASE_EXPORT_TEST_API(kbase_free_alloced_region)
 
-void kbase_mmu_update(kbase_context *kctx)
+void kbase_mmu_update(struct kbase_context *kctx)
 {
-       /* Use GPU implementation-defined caching policy. */
-       u64 mem_attrs;
-       u32 pgd_high;
+       struct kbase_device *kbdev;
+       struct kbase_as *as;
+       struct kbase_mmu_setup *current_setup;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
-       mem_attrs = kctx->mem_attrs;
+
        /* ASSERT that the context has a valid as_nr, which is only the case
         * when it's scheduled in.
         *
         * as_nr won't change because the caller has the runpool_irq lock */
        KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
 
-       pgd_high = sizeof(kctx->pgd) > 4 ? (kctx->pgd >> 32) : 0;
-
-       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_TRANSTAB_LO),
-                       (kctx->pgd & ASn_TRANSTAB_ADDR_SPACE_MASK) |
-                       ASn_TRANSTAB_READ_INNER | ASn_TRANSTAB_ADRMODE_TABLE,
-                       kctx);
+       kbdev = kctx->kbdev;
+       as = &kbdev->as[kctx->as_nr];
+       current_setup = &as->current_setup;
 
-       /* Need to use a conditional expression to avoid
-        * "right shift count >= width of type" error when using an if statement
-        * - although the size_of condition is evaluated at compile time the
-        * unused branch is not removed until after it is type-checked and the
-        * error produced.
-        */
-       pgd_high = sizeof(kctx->pgd) > 4 ? (kctx->pgd >> 32) : 0;
+       /* Use GPU implementation-defined caching policy. */
+       current_setup->memattr = kctx->mem_attrs;
 
-       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_TRANSTAB_HI),
-                       pgd_high, kctx);
+       current_setup->transtab = (u64) kctx->pgd & ((0xFFFFFFFFULL << 32) | AS_TRANSTAB_ADDR_SPACE_MASK);
+       current_setup->transtab |= AS_TRANSTAB_READ_INNER |
+                                  AS_TRANSTAB_ADRMODE_TABLE;
 
-       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_MEMATTR_LO),
-                       mem_attrs        & 0xFFFFFFFFUL, kctx);
-       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_MEMATTR_HI),
-                       (mem_attrs >> 32) & 0xFFFFFFFFUL, kctx);
-       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND),
-                       ASn_COMMAND_UPDATE, kctx);
+       /* Apply the address space setting */
+       kbase_mmu_hw_configure(kbdev, as, kctx);
 }
 
 KBASE_EXPORT_TEST_API(kbase_mmu_update)
 
-void kbase_mmu_disable(kbase_context *kctx)
+void kbase_mmu_disable(struct kbase_context *kctx)
 {
+       struct kbase_device *kbdev;
+       struct kbase_as *as;
+       struct kbase_mmu_setup *current_setup;
+
        KBASE_DEBUG_ASSERT(NULL != kctx);
        /* ASSERT that the context has a valid as_nr, which is only the case
         * when it's scheduled in.
@@ -694,17 +695,23 @@ void kbase_mmu_disable(kbase_context *kctx)
         * as_nr won't change because the caller has the runpool_irq lock */
        KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
 
-       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_TRANSTAB_LO), 0, kctx);
-       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_TRANSTAB_HI), 0, kctx);
-       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UPDATE, kctx);
+       kbdev = kctx->kbdev;
+       as = &kbdev->as[kctx->as_nr];
+       current_setup = &as->current_setup;
+
+       current_setup->transtab = 0ULL;
+
+       /* Apply the address space setting */
+       kbase_mmu_hw_configure(kbdev, as, kctx);
 }
 
 KBASE_EXPORT_TEST_API(kbase_mmu_disable)
 
-mali_error kbase_gpu_mmap(kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align)
+mali_error kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align)
 {
        mali_error err;
        size_t i = 0;
+
        KBASE_DEBUG_ASSERT(NULL != kctx);
        KBASE_DEBUG_ASSERT(NULL != reg);
 
@@ -714,6 +721,7 @@ mali_error kbase_gpu_mmap(kbase_context *kctx, struct kbase_va_region *reg, mali
 
        if (reg->alloc->type == KBASE_MEM_TYPE_ALIAS) {
                u64 stride;
+
                stride = reg->alloc->imported.alias.stride;
                KBASE_DEBUG_ASSERT(reg->alloc->imported.alias.aliased);
                for (i = 0; i < reg->alloc->imported.alias.nents; i++) {
@@ -732,7 +740,7 @@ mali_error kbase_gpu_mmap(kbase_context *kctx, struct kbase_va_region *reg, mali
                                                reg->start_pfn + i * stride,
                                                kctx->aliasing_sink_page,
                                                reg->alloc->imported.alias.aliased[i].length,
-                                               (reg->flags & ~KBASE_REG_MEMATTR_MASK) | KBASE_REG_MEMATTR_INDEX(ASn_MEMATTR_INDEX_WRITE_ALLOC)
+                                               (reg->flags & ~KBASE_REG_MEMATTR_MASK) | KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC)
                                                );
                                if (MALI_ERROR_NONE != err)
                                        goto bad_insert;
@@ -753,6 +761,7 @@ mali_error kbase_gpu_mmap(kbase_context *kctx, struct kbase_va_region *reg, mali
 bad_insert:
        if (reg->alloc->type == KBASE_MEM_TYPE_ALIAS) {
                u64 stride;
+
                stride = reg->alloc->imported.alias.stride;
                KBASE_DEBUG_ASSERT(reg->alloc->imported.alias.aliased);
                while (i--)
@@ -769,7 +778,7 @@ bad_insert:
 
 KBASE_EXPORT_TEST_API(kbase_gpu_mmap)
 
-mali_error kbase_gpu_munmap(kbase_context *kctx, struct kbase_va_region *reg)
+mali_error kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
 {
        mali_error err;
 
@@ -778,6 +787,7 @@ mali_error kbase_gpu_munmap(kbase_context *kctx, struct kbase_va_region *reg)
 
        if (reg->alloc && reg->alloc->type == KBASE_MEM_TYPE_ALIAS) {
                size_t i;
+
                err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages);
                KBASE_DEBUG_ASSERT(reg->alloc->imported.alias.aliased);
                for (i = 0; i < reg->alloc->imported.alias.nents; i++)
@@ -817,24 +827,22 @@ STATIC struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping_of_region(con
 
 KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_of_region)
 
-mali_error kbasep_find_enclosing_cpu_mapping_offset(kbase_context *kctx,
-                                                         mali_addr64 gpu_addr,
-                                                          unsigned long uaddr,
-                                                                  size_t size,
-                                                          mali_size64 *offset)
+mali_error kbasep_find_enclosing_cpu_mapping_offset(
+       struct kbase_context *kctx, mali_addr64 gpu_addr,
+       unsigned long uaddr, size_t size, mali_size64 *offset)
 {
        struct kbase_cpu_mapping *map = NULL;
        const struct kbase_va_region *reg;
        mali_error err = MALI_ERROR_FUNCTION_FAILED;
+
        KBASE_DEBUG_ASSERT(kctx != NULL);
 
        kbase_gpu_vm_lock(kctx);
 
-       reg = kbase_region_tracker_find_region_enclosing_address(kctx,
-                                                                    gpu_addr);
-       if (reg) {
+       reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+       if (reg && !(reg->flags & KBASE_REG_FREE)) {
                map = kbasep_find_enclosing_cpu_mapping_of_region(reg, uaddr,
-                                                                        size);
+                               size);
                if (map) {
                        *offset = (uaddr - PTR_TO_U64(map->vm_start)) +
                                                 (map->page_off << PAGE_SHIFT);
@@ -849,7 +857,22 @@ mali_error kbasep_find_enclosing_cpu_mapping_offset(kbase_context *kctx,
 
 KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_offset)
 
-static mali_error kbase_do_syncset(kbase_context *kctx, struct base_syncset *set, kbase_sync_kmem_fn sync_fn)
+void kbase_sync_single(struct kbase_context *kctx,
+               phys_addr_t pa, size_t size, kbase_sync_kmem_fn sync_fn)
+{
+       struct page *p = pfn_to_page(PFN_DOWN(pa));
+       off_t offset = pa & ~PAGE_MASK;
+       dma_addr_t dma_addr;
+
+       BUG_ON(!p);
+       BUG_ON(offset + size > PAGE_SIZE);
+
+       dma_addr = page_private(p) + offset;
+
+       sync_fn(kctx->kbdev->dev, dma_addr, size, DMA_BIDIRECTIONAL);
+}
+
+static mali_error kbase_do_syncset(struct kbase_context *kctx, struct base_syncset *set, kbase_sync_kmem_fn sync_fn)
 {
        mali_error err = MALI_ERROR_NONE;
        struct basep_syncset *sset = &set->basep_sset;
@@ -857,16 +880,12 @@ static mali_error kbase_do_syncset(kbase_context *kctx, struct base_syncset *set
        struct kbase_cpu_mapping *map;
        unsigned long start;
        size_t size;
-       phys_addr_t base_phy_addr = 0;
        phys_addr_t *pa;
        u64 page_off, page_count;
        u64 i;
-       unsigned int offset_within_page;
-       void *base_virt_addr = 0;
-       size_t area_size = 0;
+       unsigned int offset;
 
        kbase_os_mem_map_lock(kctx);
-
        kbase_gpu_vm_lock(kctx);
 
        /* find the region where the virtual address is contained */
@@ -890,56 +909,42 @@ static mali_error kbase_do_syncset(kbase_context *kctx, struct base_syncset *set
                goto out_unlock;
        }
 
-       offset_within_page = start & (PAGE_SIZE - 1);
+       offset = start & (PAGE_SIZE - 1);
        page_off = map->page_off + ((start - map->vm_start) >> PAGE_SHIFT);
-       page_count = ((size + offset_within_page + (PAGE_SIZE - 1)) & PAGE_MASK) >> PAGE_SHIFT;
+       page_count = (size + offset + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        pa = kbase_get_phy_pages(reg);
 
-       pagefault_disable();
-
-       for (i = 0; i < page_count; i++) {
-               u32 offset = start & (PAGE_SIZE - 1);
-               phys_addr_t paddr = pa[page_off + i] + offset;
+       /* Sync first page */
+       if (pa[page_off]) {
                size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
-               u8 tmp;
-
-               if (copy_from_user(&tmp, (void*)(uintptr_t)start, 1)) {
-                       /* Not accessible */
-                       err = MALI_ERROR_FUNCTION_FAILED;
-                       goto out_enable_pagefaults;
-               }
 
-               if (paddr == base_phy_addr + area_size && start == ((uintptr_t) base_virt_addr + area_size)) {
-                       area_size += sz;
-               } else if (area_size > 0) {
-                       sync_fn(base_phy_addr, base_virt_addr, area_size);
-                       area_size = 0;
-               }
+               kbase_sync_single(kctx, pa[page_off] + offset, sz, sync_fn);
+       }
 
-               if (area_size == 0) {
-                       base_phy_addr = paddr;
-                       base_virt_addr = (void *)(uintptr_t)start;
-                       area_size = sz;
-               }
+       /* Sync middle pages (if any) */
+       for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+               /* we grow upwards, so bail on first non-present page */
+               if (!pa[page_off + i])
+                       break;
 
-               start += sz;
-               size -= sz;
+               kbase_sync_single(kctx, pa[page_off + i], PAGE_SIZE, sync_fn);
        }
 
-       if (area_size > 0)
-               sync_fn(base_phy_addr, base_virt_addr, area_size);
+       /* Sync last page (if any) */
+       if (page_count > 1 && pa[page_off + page_count - 1]) {
+               size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1;
 
-       KBASE_DEBUG_ASSERT(size == 0);
+               kbase_sync_single(kctx, pa[page_off + page_count - 1], sz,
+                       sync_fn);
+       }
 
-out_enable_pagefaults:
-       pagefault_enable();
 out_unlock:
        kbase_gpu_vm_unlock(kctx);
        kbase_os_mem_map_unlock(kctx);
        return err;
 }
 
-mali_error kbase_sync_now(kbase_context *kctx, struct base_syncset *syncset)
+mali_error kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset)
 {
        mali_error err = MALI_ERROR_FUNCTION_FAILED;
        struct basep_syncset *sset;
@@ -951,11 +956,11 @@ mali_error kbase_sync_now(kbase_context *kctx, struct base_syncset *syncset)
 
        switch (sset->type) {
        case BASE_SYNCSET_OP_MSYNC:
-               err = kbase_do_syncset(kctx, syncset, kbase_sync_to_memory);
+               err = kbase_do_syncset(kctx, syncset, dma_sync_single_for_device);
                break;
 
        case BASE_SYNCSET_OP_CSYNC:
-               err = kbase_do_syncset(kctx, syncset, kbase_sync_to_cpu);
+               err = kbase_do_syncset(kctx, syncset, dma_sync_single_for_cpu);
                break;
 
        default:
@@ -969,9 +974,10 @@ mali_error kbase_sync_now(kbase_context *kctx, struct base_syncset *syncset)
 KBASE_EXPORT_TEST_API(kbase_sync_now)
 
 /* vm lock must be held */
-mali_error kbase_mem_free_region(kbase_context *kctx, kbase_va_region *reg)
+mali_error kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg)
 {
        mali_error err;
+
        KBASE_DEBUG_ASSERT(NULL != kctx);
        KBASE_DEBUG_ASSERT(NULL != reg);
        BUG_ON(!mutex_is_locked(&kctx->reg_lock));
@@ -1001,7 +1007,7 @@ KBASE_EXPORT_TEST_API(kbase_mem_free_region)
  * This function implements the free operation on a memory segment.
  * It will loudly fail if called with outstanding mappings.
  */
-mali_error kbase_mem_free(kbase_context *kctx, mali_addr64 gpu_addr)
+mali_error kbase_mem_free(struct kbase_context *kctx, mali_addr64 gpu_addr)
 {
        mali_error err = MALI_ERROR_NONE;
        struct kbase_va_region *reg;
@@ -1017,6 +1023,7 @@ mali_error kbase_mem_free(kbase_context *kctx, mali_addr64 gpu_addr)
        if (gpu_addr >= BASE_MEM_COOKIE_BASE &&
            gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) {
                int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE);
+
                reg = kctx->pending_regions[cookie];
                if (!reg) {
                        err = MALI_ERROR_FUNCTION_FAILED;
@@ -1035,17 +1042,16 @@ mali_error kbase_mem_free(kbase_context *kctx, mali_addr64 gpu_addr)
                /* Validate the region */
                reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
                if (!reg) {
-                       dev_warn(kctx->kbdev->dev,
-                           "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
-                           gpu_addr);
+                       dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
+                                       gpu_addr);
                        err = MALI_ERROR_FUNCTION_FAILED;
                        goto out_unlock;
                }
 
                if ((reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA) {
                        /* SAME_VA must be freed through munmap */
-                       dev_warn(kctx->kbdev->dev,
-                           "%s called on SAME_VA memory 0x%llX", __func__, gpu_addr);
+                       dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__,
+                                       gpu_addr);
                        err = MALI_ERROR_FUNCTION_FAILED;
                        goto out_unlock;
                }
@@ -1063,7 +1069,7 @@ KBASE_EXPORT_TEST_API(kbase_mem_free)
 void kbase_update_region_flags(struct kbase_va_region *reg, unsigned long flags)
 {
        KBASE_DEBUG_ASSERT(NULL != reg);
-       KBASE_DEBUG_ASSERT((flags & ~((1 << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
+       KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
 
        reg->flags |= kbase_cache_enabled(flags, reg->nr_pages);
        /* all memory is now growable */
@@ -1091,7 +1097,6 @@ void kbase_update_region_flags(struct kbase_va_region *reg, unsigned long flags)
                reg->flags |= KBASE_REG_SHARE_IN;
        else if (flags & BASE_MEM_COHERENT_SYSTEM)
                reg->flags |= KBASE_REG_SHARE_BOTH;
-
 }
 KBASE_EXPORT_TEST_API(kbase_update_region_flags)
 
@@ -1131,6 +1136,7 @@ int kbase_free_phy_pages_helper(
 {
        mali_bool syncback;
        phys_addr_t *start_free;
+
        KBASE_DEBUG_ASSERT(alloc);
        KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
        KBASE_DEBUG_ASSERT(alloc->imported.kctx);
@@ -1142,7 +1148,7 @@ int kbase_free_phy_pages_helper(
 
        start_free = alloc->pages + alloc->nents - nr_pages_to_free;
 
-       syncback = alloc->accessed_cached ? MALI_TRUE : MALI_FALSE;
+       syncback = (alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED) ? MALI_TRUE : MALI_FALSE;
 
        kbase_mem_allocator_free(&alloc->imported.kctx->osalloc,
                                  nr_pages_to_free,
@@ -1160,6 +1166,7 @@ int kbase_free_phy_pages_helper(
 void kbase_mem_kref_free(struct kref *kref)
 {
        struct kbase_mem_phy_alloc *alloc;
+
        alloc = container_of(kref, struct kbase_mem_phy_alloc, kref);
 
        switch (alloc->type) {
@@ -1172,13 +1179,14 @@ void kbase_mem_kref_free(struct kref *kref)
                /* just call put on the underlying phy allocs */
                size_t i;
                struct kbase_aliased *aliased;
+
                aliased = alloc->imported.alias.aliased;
                if (aliased) {
-                               for (i = 0; i < alloc->imported.alias.nents; i++)
-                                       if (aliased[i].alloc)
-                                               kbase_mem_phy_alloc_put(aliased[i].alloc);
-                               vfree(aliased);
-               }                               
+                       for (i = 0; i < alloc->imported.alias.nents; i++)
+                               if (aliased[i].alloc)
+                                       kbase_mem_phy_alloc_put(aliased[i].alloc);
+                       vfree(aliased);
+               }
                break;
        }
        case KBASE_MEM_TYPE_RAW:
@@ -1198,6 +1206,7 @@ void kbase_mem_kref_free(struct kref *kref)
 #endif
        case KBASE_MEM_TYPE_TB:{
                void *tb;
+
                tb = alloc->imported.kctx->jctx.tb;
                kbase_device_trace_buffer_uninstall(alloc->imported.kctx);
                vfree(tb);
@@ -1207,7 +1216,12 @@ void kbase_mem_kref_free(struct kref *kref)
                WARN(1, "Unexecpted free of type %d\n", alloc->type);
                break;
        }
-       vfree(alloc);
+
+       /* Free based on allocation type */
+       if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
+               vfree(alloc);
+       else
+               kfree(alloc);
 }
 
 KBASE_EXPORT_TEST_API(kbase_mem_kref_free);
@@ -1243,7 +1257,7 @@ KBASE_EXPORT_TEST_API(kbase_alloc_phy_pages)
 mali_bool kbase_check_alloc_flags(unsigned long flags)
 {
        /* Only known flags should be set. */
-       if (flags & ~((1 << BASE_MEM_FLAGS_NR_BITS) - 1))
+       if (flags & ~((1ul << BASE_MEM_FLAGS_NR_INPUT_BITS) - 1))
                return MALI_FALSE;
 
        /* At least one flag should be set */
@@ -1273,7 +1287,7 @@ mali_bool kbase_check_alloc_flags(unsigned long flags)
 /**
  * @brief Acquire the per-context region list lock
  */
-void kbase_gpu_vm_lock(kbase_context *kctx)
+void kbase_gpu_vm_lock(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kctx != NULL);
        mutex_lock(&kctx->reg_lock);
@@ -1284,7 +1298,7 @@ KBASE_EXPORT_TEST_API(kbase_gpu_vm_lock)
 /**
  * @brief Release the per-context region list lock
  */
-void kbase_gpu_vm_unlock(kbase_context *kctx)
+void kbase_gpu_vm_unlock(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(kctx != NULL);
        mutex_unlock(&kctx->reg_lock);
index 7baa6f87af7fa7013ac455ceea35bc091db2377f..4de2cc65ba3ba467c5934fadaf550fb6a57bb9c8 100755 (executable)
@@ -62,13 +62,12 @@ updates and generates duplicate page faults as the page table information used b
  * A CPU mapping
  */
 typedef struct kbase_cpu_mapping {
-       struct  list_head mappings_list;
-       struct  kbase_mem_phy_alloc *alloc;
-       struct  kbase_context *kctx;
-       struct  kbase_va_region *region;
-       pgoff_t page_off;
-       int     count;
-
+       struct   list_head mappings_list;
+       struct   kbase_mem_phy_alloc *alloc;
+       struct   kbase_context *kctx;
+       struct   kbase_va_region *region;
+       pgoff_t  page_off;
+       int      count;
        unsigned long vm_start;
        unsigned long vm_end;
 } kbase_cpu_mapping;
@@ -90,6 +89,12 @@ struct kbase_aliased {
        u64 length; /* in pages */
 };
 
+/**
+ * @brief Physical pages tracking object properties
+  */
+#define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED  (1ul << 0)
+#define KBASE_MEM_PHY_ALLOC_LARGE            (1ul << 1)
+
 /* physical pages tracking object.
  * Set up to track N pages.
  * N not stored here, the creator holds that info.
@@ -111,7 +116,7 @@ struct kbase_mem_phy_alloc
        /* type of buffer */
        enum kbase_memory_type type;
 
-       int accessed_cached;
+       unsigned long properties;
 
        /* member in union valid based on @a type */
        union {
@@ -157,9 +162,9 @@ static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *
 
 void kbase_mem_kref_free(struct kref * kref);
 
-mali_error kbase_mem_init(kbase_device * kbdev);
-void kbase_mem_halt(kbase_device * kbdev);
-void kbase_mem_term(kbase_device * kbdev);
+mali_error kbase_mem_init(struct kbase_device * kbdev);
+void kbase_mem_halt(struct kbase_device * kbdev);
+void kbase_mem_term(struct kbase_device * kbdev);
 
 static inline struct kbase_mem_phy_alloc * kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc * alloc)
 {
@@ -180,7 +185,7 @@ typedef struct kbase_va_region {
        struct rb_node rblink;
        struct list_head link;
 
-       kbase_context *kctx;    /* Backlink to base context */
+       struct kbase_context *kctx;     /* Backlink to base context */
 
        u64 start_pfn;          /* The PFN in GPU space */
        size_t nr_pages;
@@ -276,18 +281,32 @@ static INLINE size_t kbase_reg_current_backed_size(struct kbase_va_region * reg)
                return 0;
 }
 
+#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
+
 static INLINE struct kbase_mem_phy_alloc * kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
 {
        struct kbase_mem_phy_alloc *alloc;
+       const size_t alloc_size =
+                       sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
 
        /* Prevent nr_pages*sizeof + sizeof(*alloc) from wrapping around. */
-       if (nr_pages > ((((size_t) -1) - sizeof(*alloc)) / sizeof(*alloc->pages)))
+       if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
+                       / sizeof(*alloc->pages)))
                return ERR_PTR(-ENOMEM);
 
-       alloc = vzalloc(sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages);
+       /* Allocate based on the size to reduce internal fragmentation of vmem */
+       if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+               alloc = vzalloc(alloc_size);
+       else
+               alloc = kzalloc(alloc_size, GFP_KERNEL);
+
        if (!alloc)
                return ERR_PTR(-ENOMEM);
 
+       /* Store allocation method */
+       if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
+               alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
+
        kref_init(&alloc->kref);
        atomic_set(&alloc->gpu_mappings, 0);
        alloc->nents = 0;
@@ -332,6 +351,28 @@ static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
        return new_val;
 }
 
+/**
+ * @brief Initialize low-level memory access for a kbase device
+ *
+ * Performs any low-level setup needed for a kbase device to access memory on
+ * the device.
+ *
+ * @param kbdev kbase device to initialize memory access for
+ * @return 0 on success, Linux error code on failure
+ */
+int kbase_mem_lowlevel_init(struct kbase_device *kbdev);
+
+
+/**
+ * @brief Terminate low-level memory access for a kbase device
+ *
+ * Perform any low-level cleanup needed to clean
+ * after @ref kbase_mem_lowlevel_init
+ *
+ * @param kbdev kbase device to clean up for
+ */
+void kbase_mem_lowlevel_term(struct kbase_device *kbdev);
+
 /**
  * @brief Initialize an OS based memory allocator.
  *
@@ -345,9 +386,13 @@ static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
  *
  * @param allocator Allocator object to initialize
  * @param max_size Maximum number of pages to keep on the freelist.
- * @return MALI_ERROR_NONE on success, an error code indicating what failed on error.
+ * @param kbdev The kbase device this allocator is used with
+ * @return MALI_ERROR_NONE on success, an error code indicating what failed on
+ * error.
  */
-mali_error kbase_mem_allocator_init(kbase_mem_allocator * allocator, unsigned int max_size);
+mali_error kbase_mem_allocator_init(struct kbase_mem_allocator *allocator,
+                                   unsigned int max_size,
+                                   struct kbase_device *kbdev);
 
 /**
  * @brief Allocate memory via an OS based memory allocator.
@@ -357,7 +402,7 @@ mali_error kbase_mem_allocator_init(kbase_mem_allocator * allocator, unsigned in
  * @param[out] pages Pointer to an array where the physical address of the allocated pages will be stored
  * @return MALI_ERROR_NONE if the pages were allocated, an error code indicating what failed on error
  */
-mali_error kbase_mem_allocator_alloc(kbase_mem_allocator * allocator, size_t nr_pages, phys_addr_t *pages);
+mali_error kbase_mem_allocator_alloc(struct kbase_mem_allocator * allocator, size_t nr_pages, phys_addr_t *pages);
 
 /**
  * @brief Free memory obtained for an OS based memory allocator.
@@ -367,7 +412,7 @@ mali_error kbase_mem_allocator_alloc(kbase_mem_allocator * allocator, size_t nr_
  * @param[in] pages Pointer to an array holding the physical address of the paghes to free.
  * @param[in] sync_back MALI_TRUE case the memory should be synced back
  */
-void kbase_mem_allocator_free(kbase_mem_allocator * allocator, size_t nr_pages, phys_addr_t *pages, mali_bool sync_back);
+void kbase_mem_allocator_free(struct kbase_mem_allocator * allocator, size_t nr_pages, phys_addr_t *pages, mali_bool sync_back);
 
 /**
  * @brief Terminate an OS based memory allocator.
@@ -378,81 +423,79 @@ void kbase_mem_allocator_free(kbase_mem_allocator * allocator, size_t nr_pages,
  *
  * @param[in] allocator Allocator to terminate
  */
-void kbase_mem_allocator_term(kbase_mem_allocator * allocator);
+void kbase_mem_allocator_term(struct kbase_mem_allocator * allocator);
 
 
 
-mali_error kbase_region_tracker_init(kbase_context *kctx);
-void kbase_region_tracker_term(kbase_context *kctx);
+mali_error kbase_region_tracker_init(struct kbase_context *kctx);
+void kbase_region_tracker_term(struct kbase_context *kctx);
 
-struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range(kbase_context *kctx, u64 start_pgoff, size_t nr_pages);
-
-struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(kbase_context *kctx, mali_addr64 gpu_addr);
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, mali_addr64 gpu_addr);
 
 /**
  * @brief Check that a pointer is actually a valid region.
  *
  * Must be called with context lock held.
  */
-struct kbase_va_region *kbase_region_tracker_find_region_base_address(kbase_context *kctx, mali_addr64 gpu_addr);
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, mali_addr64 gpu_addr);
 
-struct kbase_va_region *kbase_alloc_free_region(kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
+struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
 void kbase_free_alloced_region(struct kbase_va_region *reg);
-mali_error kbase_add_va_region(kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align);
+mali_error kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align);
 
-mali_error kbase_gpu_mmap(kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align);
+mali_error kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align);
 mali_bool kbase_check_alloc_flags(unsigned long flags);
 void kbase_update_region_flags(struct kbase_va_region *reg, unsigned long flags);
 
-void kbase_gpu_vm_lock(kbase_context *kctx);
-void kbase_gpu_vm_unlock(kbase_context *kctx);
+void kbase_gpu_vm_lock(struct kbase_context *kctx);
+void kbase_gpu_vm_unlock(struct kbase_context *kctx);
 
 int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
 
-mali_error kbase_mmu_init(kbase_context *kctx);
-void kbase_mmu_term(kbase_context *kctx);
+mali_error kbase_mmu_init(struct kbase_context *kctx);
+void kbase_mmu_term(struct kbase_context *kctx);
 
-phys_addr_t kbase_mmu_alloc_pgd(kbase_context *kctx);
-void kbase_mmu_free_pgd(kbase_context *kctx);
-mali_error kbase_mmu_insert_pages(kbase_context *kctx, u64 vpfn,
+phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx);
+void kbase_mmu_free_pgd(struct kbase_context *kctx);
+mali_error kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
                                  phys_addr_t *phys, size_t nr,
                                  unsigned long flags);
-mali_error kbase_mmu_insert_single_page(kbase_context *kctx, u64 vpfn,
+mali_error kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
                                        phys_addr_t phys, size_t nr,
                                        unsigned long flags);
 
-mali_error kbase_mmu_teardown_pages(kbase_context *kctx, u64 vpfn, size_t nr);
-mali_error kbase_mmu_update_pages(kbase_context* kctx, u64 vpfn, phys_addr_t* phys, size_t nr, unsigned long flags);
+mali_error kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr);
+mali_error kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t* phys, size_t nr, unsigned long flags);
 
 /**
  * @brief Register region and map it on the GPU.
  *
  * Call kbase_add_va_region() and map the region on the GPU.
  */
-mali_error kbase_gpu_mmap(kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align);
+mali_error kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, mali_addr64 addr, size_t nr_pages, size_t align);
 
 /**
  * @brief Remove the region from the GPU and unregister it.
  *
  * Must be called with context lock held.
  */
-mali_error kbase_gpu_munmap(kbase_context *kctx, struct kbase_va_region *reg);
+mali_error kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
 
 /**
  * The caller has the following locking conditions:
  * - It must hold kbase_as::transaction_mutex on kctx's address space
  * - It must hold the kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_mmu_update(kbase_context *kctx);
+void kbase_mmu_update(struct kbase_context *kctx);
 
 /**
  * The caller has the following locking conditions:
  * - It must hold kbase_as::transaction_mutex on kctx's address space
  * - It must hold the kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_mmu_disable(kbase_context *kctx);
+void kbase_mmu_disable(struct kbase_context *kctx);
 
-void kbase_mmu_interrupt(kbase_device *kbdev, u32 irq_stat);
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
 
 /** Dump the MMU tables to a buffer
  *
@@ -469,11 +512,13 @@ void kbase_mmu_interrupt(kbase_device *kbdev, u32 irq_stat);
  * @return The address of the buffer containing the MMU dump or NULL on error (including if the @c nr_pages is too
  * small)
  */
-void *kbase_mmu_dump(kbase_context *kctx, int nr_pages);
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
 
-mali_error kbase_sync_now(kbase_context *kctx, base_syncset *syncset);
-void kbase_pre_job_sync(kbase_context *kctx, base_syncset *syncsets, size_t nr);
-void kbase_post_job_sync(kbase_context *kctx, base_syncset *syncsets, size_t nr);
+mali_error kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset);
+void kbase_sync_single(struct kbase_context *kctx, phys_addr_t pa,
+               size_t size, kbase_sync_kmem_fn sync_fn);
+void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
+void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
 
 /**
  * Set attributes for imported tmem region
@@ -482,12 +527,12 @@ void kbase_post_job_sync(kbase_context *kctx, base_syncset *syncsets, size_t nr)
  * of imported external memory
  *
  * @param[in]  kctx        The kbase context which the tmem belongs to
- * @param[in]  gpu_adr     The base address of the tmem region
+ * @param[in]  gpu_addr     The base address of the tmem region
  * @param[in]  attributes   The attributes of tmem region to be set
  *
  * @return MALI_ERROR_NONE on success.  Any other value indicates failure.
  */
-mali_error kbase_tmem_set_attributes(kbase_context *kctx, mali_addr64 gpu_adr, u32  attributes );
+mali_error kbase_tmem_set_attributes(struct kbase_context *kctx, mali_addr64 gpu_addr, u32  attributes);
 
 /**
  * Get attributes of imported tmem region
@@ -495,18 +540,18 @@ mali_error kbase_tmem_set_attributes(kbase_context *kctx, mali_addr64 gpu_adr, u
  * This function retrieves the attributes of imported external memory
  *
  * @param[in]  kctx        The kbase context which the tmem belongs to
- * @param[in]  gpu_adr     The base address of the tmem region
+ * @param[in]  gpu_addr     The base address of the tmem region
  * @param[out] attributes   The actual attributes of tmem region
  *
  * @return MALI_ERROR_NONE on success.  Any other value indicates failure.
  */
-mali_error kbase_tmem_get_attributes(kbase_context *kctx, mali_addr64 gpu_adr, u32 * const attributes );
+mali_error kbase_tmem_get_attributes(struct kbase_context *kctx, mali_addr64 gpu_addr, u32 * const attributes);
 
 /* OS specific functions */
-mali_error kbase_mem_free(kbase_context *kctx, mali_addr64 gpu_addr);
-mali_error kbase_mem_free_region(kbase_context *kctx, struct kbase_va_region *reg);
-void kbase_os_mem_map_lock(kbase_context *kctx);
-void kbase_os_mem_map_unlock(kbase_context *kctx);
+mali_error kbase_mem_free(struct kbase_context *kctx, mali_addr64 gpu_addr);
+mali_error kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
+void kbase_os_mem_map_lock(struct kbase_context *kctx);
+void kbase_os_mem_map_unlock(struct kbase_context *kctx);
 
 /**
  * @brief Update the memory allocation counters for the current process
@@ -518,7 +563,7 @@ void kbase_os_mem_map_unlock(kbase_context *kctx);
  * @param[in] pages The desired delta to apply to the memory usage counters.
  */
 
-void kbasep_os_process_page_usage_update( struct kbase_context * kctx, int pages );
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
 
 /**
  * @brief Add to the memory allocation counters for the current process
@@ -530,9 +575,9 @@ void kbasep_os_process_page_usage_update( struct kbase_context * kctx, int pages
  * @param[in] pages The desired delta to apply to the memory usage counters.
  */
 
-static INLINE void kbase_process_page_usage_inc( struct kbase_context *kctx, int pages )
+static INLINE void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
 {
-       kbasep_os_process_page_usage_update( kctx, pages );
+       kbasep_os_process_page_usage_update(kctx, pages);
 }
 
 /**
@@ -545,9 +590,9 @@ static INLINE void kbase_process_page_usage_inc( struct kbase_context *kctx, int
  * @param[in] pages The desired delta to apply to the memory usage counters.
  */
 
-static INLINE void kbase_process_page_usage_dec( struct kbase_context *kctx, int pages )
+static INLINE void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
 {
-       kbasep_os_process_page_usage_update( kctx, 0 - pages );
+       kbasep_os_process_page_usage_update(kctx, 0 - pages);
 }
 
 /**
@@ -571,15 +616,15 @@ static INLINE void kbase_process_page_usage_dec( struct kbase_context *kctx, int
  * @return MALI_ERROR_NONE if offset was obtained successfully. Error code
  *         otherwise.
  */
-mali_error kbasep_find_enclosing_cpu_mapping_offset(kbase_context *kctx,
+mali_error kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx,
                                                        mali_addr64 gpu_addr,
                                                        unsigned long uaddr,
                                                        size_t size,
                                                        mali_size64 *offset);
 
 enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
-void kbase_as_poking_timer_retain_atom(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom);
-void kbase_as_poking_timer_release_atom(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom);
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
 
 /**
 * @brief Allocates physical pages.
@@ -604,12 +649,24 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc * alloc, size_t nr_p
 int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc * alloc, size_t nr_pages_to_free);
 
 #ifdef CONFIG_MALI_NO_MALI
-static inline void kbase_wait_write_flush(kbase_context *kctx)
+static inline void kbase_wait_write_flush(struct kbase_context *kctx)
 {
 }
 #else
-void kbase_wait_write_flush(kbase_context *kctx);
+void kbase_wait_write_flush(struct kbase_context *kctx);
 #endif
 
+/**
+* @brief Process a bus or page fault.
+*
+* This function will process a fault on a specific address space
+*
+* @param[in] kbdev   The @ref kbase_device the fault happened on
+* @param[in] kctx    The @ref kbase_context for the faulting address space if
+*                    one was found.
+* @param[in] as      The address space that has the fault
+*/
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
+               struct kbase_context *kctx, struct kbase_as *as);
 
 #endif                         /* _KBASE_MEM_H_ */
index f05320087b65ef75f0528605d0005d03b960adbf..1541abde95249ce24d40dbb2d5e93fc0894d376b 100755 (executable)
  * Base kernel memory APIs
  */
 #include <mali_kbase.h>
+#include <linux/dma-mapping.h>
 #include <linux/highmem.h>
 #include <linux/mempool.h>
 #include <linux/mm.h>
 #include <linux/atomic.h>
+#include <linux/version.h>
+
+int kbase_mem_lowlevel_init(struct kbase_device *kbdev)
+{
+       return 0;
+}
+
+void kbase_mem_lowlevel_term(struct kbase_device *kbdev)
+{
+       return;
+}
 
 static unsigned long kbase_mem_allocator_count(struct shrinker *s,
                                                struct shrink_control *sc)
 {
-       kbase_mem_allocator *allocator;
-       allocator = container_of(s, kbase_mem_allocator, free_list_reclaimer);
+       struct kbase_mem_allocator *allocator;
+
+       allocator = container_of(s, struct kbase_mem_allocator, free_list_reclaimer);
        return atomic_read(&allocator->free_list_size);
 }
 
 static unsigned long kbase_mem_allocator_scan(struct shrinker *s,
                                                struct shrink_control *sc)
 {
-       kbase_mem_allocator *allocator;
+       struct kbase_mem_allocator *allocator;
        int i;
        int freed;
 
-       allocator = container_of(s, kbase_mem_allocator, free_list_reclaimer);
+       allocator = container_of(s, struct kbase_mem_allocator, free_list_reclaimer);
 
        might_sleep();
 
@@ -59,6 +72,7 @@ static unsigned long kbase_mem_allocator_scan(struct shrinker *s,
                p = list_first_entry(&allocator->free_list_head,
                                        struct page, lru);
                list_del(&p->lru);
+               ClearPagePrivate(p);
                __free_page(p);
        }
        mutex_unlock(&allocator->free_list_lock);
@@ -68,7 +82,7 @@ static unsigned long kbase_mem_allocator_scan(struct shrinker *s,
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
 static int kbase_mem_allocator_shrink(struct shrinker *s,
-                                       struct shrink_control *sc)
+               struct shrink_control *sc)
 {
        if (sc->nr_to_scan == 0)
                return kbase_mem_allocator_count(s, sc);
@@ -77,13 +91,16 @@ static int kbase_mem_allocator_shrink(struct shrinker *s,
 }
 #endif
 
-mali_error kbase_mem_allocator_init(kbase_mem_allocator *const allocator,
-                                       unsigned int max_size)
+mali_error kbase_mem_allocator_init(struct kbase_mem_allocator *const allocator,
+               unsigned int max_size, struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(NULL != allocator);
+       KBASE_DEBUG_ASSERT(kbdev);
 
        INIT_LIST_HEAD(&allocator->free_list_head);
 
+       allocator->kbdev = kbdev;
+
        mutex_init(&allocator->free_list_lock);
 
        atomic_set(&allocator->free_list_size, 0);
@@ -109,17 +126,22 @@ mali_error kbase_mem_allocator_init(kbase_mem_allocator *const allocator,
 }
 KBASE_EXPORT_TEST_API(kbase_mem_allocator_init)
 
-void kbase_mem_allocator_term(kbase_mem_allocator *allocator)
+void kbase_mem_allocator_term(struct kbase_mem_allocator *allocator)
 {
        KBASE_DEBUG_ASSERT(NULL != allocator);
 
        unregister_shrinker(&allocator->free_list_reclaimer);
        mutex_lock(&allocator->free_list_lock);
-       while (!list_empty(&allocator->free_list_head))
-       {
-               struct page * p;
-               p = list_first_entry(&allocator->free_list_head, struct page, lru);
+       while (!list_empty(&allocator->free_list_head)) {
+               struct page *p;
+
+               p = list_first_entry(&allocator->free_list_head, struct page,
+                                    lru);
                list_del(&p->lru);
+               dma_unmap_page(allocator->kbdev->dev, page_private(p),
+                              PAGE_SIZE,
+                              DMA_BIDIRECTIONAL);
+               ClearPagePrivate(p);
                __free_page(p);
        }
        atomic_set(&allocator->free_list_size, 0);
@@ -128,10 +150,10 @@ void kbase_mem_allocator_term(kbase_mem_allocator *allocator)
 }
 KBASE_EXPORT_TEST_API(kbase_mem_allocator_term)
 
-mali_error kbase_mem_allocator_alloc(kbase_mem_allocator *allocator, size_t nr_pages, phys_addr_t *pages)
+mali_error kbase_mem_allocator_alloc(struct kbase_mem_allocator *allocator, size_t nr_pages, phys_addr_t *pages)
 {
-       struct page * p;
-       void * mp;
+       struct page *p;
+       void *mp;
        int i;
        int num_from_free_list;
        struct list_head from_free_list = LIST_HEAD_INIT(from_free_list);
@@ -154,8 +176,7 @@ mali_error kbase_mem_allocator_alloc(kbase_mem_allocator *allocator, size_t nr_p
        i = 0;
 
        /* Allocate as many pages from the pool of already allocated pages. */
-       list_for_each_entry(p, &from_free_list, lru)
-       {
+       list_for_each_entry(p, &from_free_list, lru) {
                pages[i] = PFN_PHYS(page_to_pfn(p));
                i++;
        }
@@ -164,33 +185,48 @@ mali_error kbase_mem_allocator_alloc(kbase_mem_allocator *allocator, size_t nr_p
                return MALI_ERROR_NONE;
 
        /* If not all pages were sourced from the pool, request new ones. */
-       for (; i < nr_pages; i++)
-       {
+       for (; i < nr_pages; i++) {
+               dma_addr_t dma_addr;
+#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+               /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
+               p = alloc_page(GFP_USER);
+#else
                p = alloc_page(GFP_HIGHUSER);
+#endif
                if (NULL == p)
-               {
                        goto err_out_roll_back;
-               }
                mp = kmap(p);
-               if (NULL == mp)
-               {
+               if (NULL == mp) {
                        __free_page(p);
                        goto err_out_roll_back;
                }
                memset(mp, 0x00, PAGE_SIZE); /* instead of __GFP_ZERO, so we can do cache maintenance */
-               kbase_sync_to_memory(PFN_PHYS(page_to_pfn(p)), mp, PAGE_SIZE);
                kunmap(p);
+
+               dma_addr = dma_map_page(allocator->kbdev->dev, p, 0, PAGE_SIZE,
+                                       DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(allocator->kbdev->dev, dma_addr)) {
+                       __free_page(p);
+                       goto err_out_roll_back;
+               }
+
+               SetPagePrivate(p);
+               set_page_private(p, dma_addr);
                pages[i] = PFN_PHYS(page_to_pfn(p));
+               BUG_ON(dma_addr != pages[i]);
        }
 
        return MALI_ERROR_NONE;
 
 err_out_roll_back:
-       while (i--)
-       {
-               struct page * p;
+       while (i--) {
+               struct page *p;
                p = pfn_to_page(PFN_DOWN(pages[i]));
                pages[i] = (phys_addr_t)0;
+               dma_unmap_page(allocator->kbdev->dev, page_private(p),
+                              PAGE_SIZE,
+                              DMA_BIDIRECTIONAL);
+               ClearPagePrivate(p);
                __free_page(p);
        }
 
@@ -198,7 +234,7 @@ err_out_roll_back:
 }
 KBASE_EXPORT_TEST_API(kbase_mem_allocator_alloc)
 
-void kbase_mem_allocator_free(kbase_mem_allocator *allocator, size_t nr_pages, phys_addr_t *pages, mali_bool sync_back)
+void kbase_mem_allocator_free(struct kbase_mem_allocator *allocator, size_t nr_pages, phys_addr_t *pages, mali_bool sync_back)
 {
        int i = 0;
        int page_count = 0;
@@ -215,41 +251,37 @@ void kbase_mem_allocator_free(kbase_mem_allocator *allocator, size_t nr_pages, p
        * or get too many on the free list, but the max_size is just a ballpark so it is ok
        * providing that tofree doesn't exceed nr_pages
        */
-       tofree = MAX((int)allocator->free_list_max_size - atomic_read(&allocator->free_list_size),0);
+       tofree = MAX((int)allocator->free_list_max_size - atomic_read(&allocator->free_list_size), 0);
        tofree = nr_pages - MIN(tofree, nr_pages);
-       for (; i < tofree; i++)
-       {
-               if (likely(0 != pages[i]))
-               {
-                       struct page * p;
+       for (; i < tofree; i++) {
+               if (likely(0 != pages[i])) {
+                       struct page *p;
 
                        p = pfn_to_page(PFN_DOWN(pages[i]));
+                       dma_unmap_page(allocator->kbdev->dev, page_private(p),
+                                      PAGE_SIZE,
+                                      DMA_BIDIRECTIONAL);
+                       ClearPagePrivate(p);
                        pages[i] = (phys_addr_t)0;
                        __free_page(p);
                }
        }
 
-       for (; i < nr_pages; i++)
-       {
-               if (likely(0 != pages[i]))
-               {
-                       struct page * p;
+       for (; i < nr_pages; i++) {
+               if (likely(0 != pages[i])) {
+                       struct page *p;
 
                        p = pfn_to_page(PFN_DOWN(pages[i]));
                        pages[i] = (phys_addr_t)0;
-                       /* Sync back the memory to ensure that future cache invalidations
-                        * don't trample on memory.
+                       /* Sync back the memory to ensure that future cache
+                        * invalidations don't trample on memory.
                         */
-                       if( sync_back )
-                       {
-                               void* mp = kmap(p);
-                               if( NULL != mp)
-                               {
-                                       kbase_sync_to_cpu(PFN_PHYS(page_to_pfn(p)), mp, PAGE_SIZE);
-                                       kunmap(p);
-                               }
-
-                       }
+                       if (sync_back)
+                               dma_sync_single_for_cpu(allocator->kbdev->dev,
+                                               page_private(p),
+                                               PAGE_SIZE,
+                                               DMA_BIDIRECTIONAL);
+
                        list_add(&p->lru, &new_free_list_items);
                        page_count++;
                }
index 5929b14a12cd608b7a5b385f3f24df875cbb5cf8..cb0e153b13bf4e00e6a3179596b275342a2bd9db 100755 (executable)
 #include <linux/slab.h>
 
 /* raw page handling */
-typedef struct kbase_mem_allocator
+struct kbase_mem_allocator
 {
+       struct kbase_device *kbdev;
        atomic_t            free_list_size;
        unsigned int        free_list_max_size;
        struct mutex        free_list_lock;
        struct list_head    free_list_head;
        struct shrinker     free_list_reclaimer;
-} kbase_mem_allocator;
-
+};
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_alloc_carveout.c b/drivers/gpu/arm/midgard/mali_kbase_mem_alloc_carveout.c
new file mode 100755 (executable)
index 0000000..e520c78
--- /dev/null
@@ -0,0 +1,403 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem.c
+ * Base kernel memory APIs
+ */
+#include <mali_kbase.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/mempool.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+
+
+/* This code does not support having multiple kbase devices, or rmmod/insmod */
+
+static unsigned long kbase_carveout_start_pfn = ~0UL;
+static unsigned long kbase_carveout_end_pfn;
+static LIST_HEAD(kbase_carveout_free_list);
+static DEFINE_MUTEX(kbase_carveout_free_list_lock);
+static unsigned int kbase_carveout_pages;
+static atomic_t kbase_carveout_used_pages;
+static atomic_t kbase_carveout_system_pages;
+
+static struct page *kbase_carveout_get_page(struct kbase_mem_allocator *allocator)
+{
+       struct page *p = NULL;
+
+       mutex_lock(&kbase_carveout_free_list_lock);
+       if (!list_empty(&kbase_carveout_free_list)) {
+               p = list_first_entry(&kbase_carveout_free_list, struct page, lru);
+               list_del(&p->lru);
+               atomic_inc(&kbase_carveout_used_pages);
+       }
+       mutex_unlock(&kbase_carveout_free_list_lock);
+
+       if (!p) {
+               dma_addr_t dma_addr;
+#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+               /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
+               p = alloc_page(GFP_USER);
+#else
+               p = alloc_page(GFP_HIGHUSER);
+#endif
+               if (!p)
+                       goto out;
+
+               dma_addr = dma_map_page(allocator->kbdev->dev, p, 0, PAGE_SIZE,
+                               DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(allocator->kbdev->dev, dma_addr)) {
+                       __free_page(p);
+                       p = NULL;
+                       goto out;
+               }
+
+               SetPagePrivate(p);
+               set_page_private(p, dma_addr);
+               BUG_ON(dma_addr != PFN_PHYS(page_to_pfn(p)));
+               atomic_inc(&kbase_carveout_system_pages);
+       }
+out:
+       return p;
+}
+
+static void kbase_carveout_put_page(struct page *p,
+                                   struct kbase_mem_allocator *allocator)
+{
+       if (page_to_pfn(p) >= kbase_carveout_start_pfn &&
+                       page_to_pfn(p) <= kbase_carveout_end_pfn) {
+               mutex_lock(&kbase_carveout_free_list_lock);
+               list_add(&p->lru, &kbase_carveout_free_list);
+               atomic_dec(&kbase_carveout_used_pages);
+               mutex_unlock(&kbase_carveout_free_list_lock);
+       } else {
+               dma_unmap_page(allocator->kbdev->dev, page_private(p),
+                               PAGE_SIZE,
+                               DMA_BIDIRECTIONAL);
+               ClearPagePrivate(p);
+               __free_page(p);
+               atomic_dec(&kbase_carveout_system_pages);
+       }
+}
+
+static int kbase_carveout_seq_show(struct seq_file *s, void *data)
+{
+       seq_printf(s, "carveout pages: %u\n", kbase_carveout_pages);
+       seq_printf(s, "used carveout pages: %u\n",
+                       atomic_read(&kbase_carveout_used_pages));
+       seq_printf(s, "used system pages: %u\n",
+                       atomic_read(&kbase_carveout_system_pages));
+       return 0;
+}
+
+static int kbasep_carveout_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, kbase_carveout_seq_show, NULL);
+}
+
+static const struct file_operations kbase_carveout_debugfs_fops = {
+       .open           = kbasep_carveout_debugfs_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
+static int kbase_carveout_init(struct device *dev)
+{
+       unsigned long pfn;
+       static int once;
+
+       mutex_lock(&kbase_carveout_free_list_lock);
+       BUG_ON(once);
+       once = 1;
+
+       for (pfn = kbase_carveout_start_pfn; pfn <= kbase_carveout_end_pfn; pfn++) {
+               struct page *p = pfn_to_page(pfn);
+               dma_addr_t dma_addr;
+
+               dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE,
+                               DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(dev, dma_addr))
+                       goto out_rollback;
+
+               SetPagePrivate(p);
+               set_page_private(p, dma_addr);
+               BUG_ON(dma_addr != PFN_PHYS(page_to_pfn(p)));
+
+               list_add_tail(&p->lru, &kbase_carveout_free_list);
+       }
+
+       mutex_unlock(&kbase_carveout_free_list_lock);
+
+       debugfs_create_file("kbase_carveout", S_IRUGO, NULL, NULL,
+                   &kbase_carveout_debugfs_fops);
+
+       return 0;
+
+out_rollback:
+       while (!list_empty(&kbase_carveout_free_list)) {
+               struct page *p;
+
+               p = list_first_entry(&kbase_carveout_free_list, struct page, lru);
+               dma_unmap_page(dev, page_private(p),
+                               PAGE_SIZE,
+                               DMA_BIDIRECTIONAL);
+               ClearPagePrivate(p);
+               list_del(&p->lru);
+       }
+
+       mutex_unlock(&kbase_carveout_free_list_lock);
+       return -ENOMEM;
+}
+
+int __init kbase_carveout_mem_reserve(phys_addr_t size)
+{
+       phys_addr_t mem;
+
+#if defined(CONFIG_ARM) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
+       /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
+       mem = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ACCESSIBLE);
+#else
+       mem = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+#endif
+       if (mem == 0) {
+               pr_warn("%s: Failed to allocate %d for kbase carveout\n",
+                               __func__, size);
+               return -ENOMEM;
+       }
+
+       kbase_carveout_start_pfn = PFN_DOWN(mem);
+       kbase_carveout_end_pfn = PFN_DOWN(mem + size - 1);
+       kbase_carveout_pages = kbase_carveout_end_pfn - kbase_carveout_start_pfn + 1;
+
+       return 0;
+}
+
+int kbase_mem_lowlevel_init(struct kbase_device *kbdev)
+{
+       return kbase_carveout_init(kbdev->dev);
+}
+
+void kbase_mem_lowlevel_term(struct kbase_device *kbdev)
+{
+}
+
+STATIC int kbase_mem_allocator_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+       struct kbase_mem_allocator *allocator;
+       int i;
+       int freed;
+
+       allocator = container_of(s, struct kbase_mem_allocator, free_list_reclaimer);
+
+       if (sc->nr_to_scan == 0)
+               return atomic_read(&allocator->free_list_size);
+
+       might_sleep();
+
+       mutex_lock(&allocator->free_list_lock);
+       i = MIN(atomic_read(&allocator->free_list_size), sc->nr_to_scan);
+       freed = i;
+
+       atomic_sub(i, &allocator->free_list_size);
+
+       while (i--) {
+               struct page *p;
+
+               BUG_ON(list_empty(&allocator->free_list_head));
+               p = list_first_entry(&allocator->free_list_head, struct page, lru);
+               list_del(&p->lru);
+               kbase_carveout_put_page(p, allocator);
+       }
+       mutex_unlock(&allocator->free_list_lock);
+       return atomic_read(&allocator->free_list_size);
+}
+
+mali_error kbase_mem_allocator_init(struct kbase_mem_allocator * const allocator,
+               unsigned int max_size, struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(NULL != allocator);
+       KBASE_DEBUG_ASSERT(kbdev);
+
+       INIT_LIST_HEAD(&allocator->free_list_head);
+
+       allocator->kbdev = kbdev;
+
+       mutex_init(&allocator->free_list_lock);
+
+       atomic_set(&allocator->free_list_size, 0);
+
+       allocator->free_list_max_size = max_size;
+       allocator->free_list_reclaimer.shrink = kbase_mem_allocator_shrink;
+       allocator->free_list_reclaimer.seeks = DEFAULT_SEEKS;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) /* Kernel versions prior to 3.1 : struct shrinker does not define batch */
+       allocator->free_list_reclaimer.batch = 0;
+#endif
+
+       register_shrinker(&allocator->free_list_reclaimer);
+
+       return MALI_ERROR_NONE;
+}
+
+void kbase_mem_allocator_term(struct kbase_mem_allocator *allocator)
+{
+       KBASE_DEBUG_ASSERT(NULL != allocator);
+
+       unregister_shrinker(&allocator->free_list_reclaimer);
+
+       while (!list_empty(&allocator->free_list_head)) {
+               struct page *p;
+
+               p = list_first_entry(&allocator->free_list_head, struct page,
+                               lru);
+               list_del(&p->lru);
+
+               kbase_carveout_put_page(p, allocator);
+       }
+       mutex_destroy(&allocator->free_list_lock);
+}
+
+
+mali_error kbase_mem_allocator_alloc(struct kbase_mem_allocator *allocator, size_t nr_pages, phys_addr_t *pages)
+{
+       struct page *p;
+       void *mp;
+       int i;
+       int num_from_free_list;
+       struct list_head from_free_list = LIST_HEAD_INIT(from_free_list);
+
+       might_sleep();
+
+       KBASE_DEBUG_ASSERT(NULL != allocator);
+
+       /* take from the free list first */
+       mutex_lock(&allocator->free_list_lock);
+       num_from_free_list = MIN(nr_pages, atomic_read(&allocator->free_list_size));
+       atomic_sub(num_from_free_list, &allocator->free_list_size);
+       for (i = 0; i < num_from_free_list; i++) {
+               BUG_ON(list_empty(&allocator->free_list_head));
+               p = list_first_entry(&allocator->free_list_head, struct page, lru);
+               list_move(&p->lru, &from_free_list);
+       }
+       mutex_unlock(&allocator->free_list_lock);
+       i = 0;
+
+       /* Allocate as many pages from the pool of already allocated pages. */
+       list_for_each_entry(p, &from_free_list, lru)
+       {
+               pages[i] = PFN_PHYS(page_to_pfn(p));
+               i++;
+       }
+
+       if (i == nr_pages)
+               return MALI_ERROR_NONE;
+
+       /* If not all pages were sourced from the pool, request new ones. */
+       for (; i < nr_pages; i++) {
+               p = kbase_carveout_get_page(allocator);
+               if (NULL == p)
+                       goto err_out_roll_back;
+
+               mp = kmap(p);
+               if (NULL == mp) {
+                       kbase_carveout_put_page(p, allocator);
+                       goto err_out_roll_back;
+               }
+               memset(mp, 0x00, PAGE_SIZE); /* instead of __GFP_ZERO, so we can
+                                               do cache maintenance */
+               dma_sync_single_for_device(allocator->kbdev->dev,
+                                          page_private(p),
+                                          PAGE_SIZE,
+                                          DMA_BIDIRECTIONAL);
+               kunmap(p);
+               pages[i] = PFN_PHYS(page_to_pfn(p));
+       }
+
+       return MALI_ERROR_NONE;
+
+err_out_roll_back:
+       while (i--) {
+               struct page *p;
+
+               p = pfn_to_page(PFN_DOWN(pages[i]));
+               pages[i] = (phys_addr_t)0;
+               kbase_carveout_put_page(p, allocator);
+       }
+
+       return MALI_ERROR_OUT_OF_MEMORY;
+}
+
+void kbase_mem_allocator_free(struct kbase_mem_allocator *allocator, u32 nr_pages, phys_addr_t *pages, mali_bool sync_back)
+{
+       int i = 0;
+       int page_count = 0;
+       int tofree;
+
+       LIST_HEAD(new_free_list_items);
+
+       KBASE_DEBUG_ASSERT(NULL != allocator);
+
+       might_sleep();
+
+       /* Starting by just freeing the overspill.
+       * As we do this outside of the lock we might spill too many pages
+       * or get too many on the free list, but the max_size is just a ballpark so it is ok
+       * providing that tofree doesn't exceed nr_pages
+       */
+       tofree = MAX((int)allocator->free_list_max_size - atomic_read(&allocator->free_list_size), 0);
+       tofree = nr_pages - MIN(tofree, nr_pages);
+       for (; i < tofree; i++) {
+               if (likely(0 != pages[i])) {
+                       struct page *p;
+
+                       p = pfn_to_page(PFN_DOWN(pages[i]));
+                       pages[i] = (phys_addr_t)0;
+                       kbase_carveout_put_page(p, allocator);
+               }
+       }
+
+       for (; i < nr_pages; i++) {
+               if (likely(0 != pages[i])) {
+                       struct page *p;
+
+                       p = pfn_to_page(PFN_DOWN(pages[i]));
+                       pages[i] = (phys_addr_t)0;
+                       /* Sync back the memory to ensure that future cache
+                        * invalidations don't trample on memory.
+                        */
+                       if (sync_back)
+                               dma_sync_single_for_cpu(allocator->kbdev->dev,
+                                               page_private(p),
+                                               PAGE_SIZE,
+                                               DMA_BIDIRECTIONAL);
+                       list_add(&p->lru, &new_free_list_items);
+                       page_count++;
+               }
+       }
+       mutex_lock(&allocator->free_list_lock);
+       list_splice(&new_free_list_items, &allocator->free_list_head);
+       atomic_add(page_count, &allocator->free_list_size);
+       mutex_unlock(&allocator->free_list_lock);
+}
+KBASE_EXPORT_TEST_API(kbase_mem_allocator_free)
+
index 088fd559fcb5db5fd2eff50692557219f7b3eace..3b2cfb75c8aae5113b59009909a83a22189e0fd9 100755 (executable)
@@ -43,7 +43,7 @@
 static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
 static const struct vm_operations_struct kbase_vm_ops;
 
-struct kbase_va_region *kbase_mem_alloc(kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment)
+struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va, u16 *va_alignment)
 {
        int zone;
        int gpu_pc_bits;
@@ -64,9 +64,13 @@ struct kbase_va_region *kbase_mem_alloc(kbase_context *kctx, u64 va_pages, u64 c
 
        if (0 == va_pages) {
                dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!");
-               goto zero_size;
+               goto bad_size;
        }
 
+       if (va_pages > (UINT64_MAX / PAGE_SIZE))
+               /* 64-bit address range is the max */
+               goto bad_size;
+
 #if defined(CONFIG_64BIT)
        if (is_compat_task())
                cpu_va_bits = 32;
@@ -115,7 +119,8 @@ struct kbase_va_region *kbase_mem_alloc(kbase_context *kctx, u64 va_pages, u64 c
 
        if (kbase_alloc_phy_pages(reg, va_pages, commit_pages)) {
                dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)", 
-                             (unsigned long long)commit_pages, (unsigned long long)va_pages);
+                               (unsigned long long)commit_pages,
+                               (unsigned long long)va_pages);
                goto no_mem;
        }
 
@@ -166,13 +171,13 @@ prepare_failed:
 no_region:
 bad_ex_size:
 bad_flags:
-zero_size:
+bad_size:
        return NULL;
 }
 
-mali_error kbase_mem_query(kbase_context *kctx, mali_addr64 gpu_addr, int query, u64 * const out)
+mali_error kbase_mem_query(struct kbase_context *kctx, mali_addr64 gpu_addr, int query, u64 * const out)
 {
-       kbase_va_region *reg;
+       struct kbase_va_region *reg;
        mali_error ret = MALI_ERROR_FUNCTION_FAILED;
 
        KBASE_DEBUG_ASSERT(kctx);
@@ -182,43 +187,49 @@ mali_error kbase_mem_query(kbase_context *kctx, mali_addr64 gpu_addr, int query,
 
        /* Validate the region */
        reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
-       if (!reg || (reg->flags & KBASE_REG_FREE) )
+       if (!reg || (reg->flags & KBASE_REG_FREE))
                goto out_unlock;
 
        switch (query) {
-               case KBASE_MEM_QUERY_COMMIT_SIZE:
-                       if (reg->alloc->type != KBASE_MEM_TYPE_ALIAS) {
-                               *out = kbase_reg_current_backed_size(reg);
-                       } else {
-                               size_t i;
-                               struct kbase_aliased *aliased;
-                               *out = 0;
-                               aliased = reg->alloc->imported.alias.aliased;
-                               for (i = 0; i < reg->alloc->imported.alias.nents; i++)
-                                       *out += aliased[i].length;
-                       }
-                       break;
-               case KBASE_MEM_QUERY_VA_SIZE:
-                       *out = reg->nr_pages;
-                       break;
-               case KBASE_MEM_QUERY_FLAGS:
-               {
+       case KBASE_MEM_QUERY_COMMIT_SIZE:
+               if (reg->alloc->type != KBASE_MEM_TYPE_ALIAS) {
+                       *out = kbase_reg_current_backed_size(reg);
+               } else {
+                       size_t i;
+                       struct kbase_aliased *aliased;
                        *out = 0;
-                       if( KBASE_REG_GPU_WR & reg->flags )
-                               *out |= BASE_MEM_PROT_GPU_WR;
-                       if( KBASE_REG_GPU_RD & reg->flags )
-                               *out |= BASE_MEM_PROT_GPU_RD;
-                       if( !(KBASE_REG_GPU_NX & reg->flags) )
-                               *out |= BASE_MEM_PROT_GPU_EX;
-                       if( KBASE_REG_SHARE_BOTH & reg->flags )
-                               *out |= BASE_MEM_COHERENT_SYSTEM;
-                       if ( KBASE_REG_SHARE_IN & reg->flags )
-                               *out |= BASE_MEM_COHERENT_LOCAL;
-                       break;
+                       aliased = reg->alloc->imported.alias.aliased;
+                       for (i = 0; i < reg->alloc->imported.alias.nents; i++)
+                               *out += aliased[i].length;
                }
-               default:
-                       *out = 0;
-                       goto out_unlock;
+               break;
+       case KBASE_MEM_QUERY_VA_SIZE:
+               *out = reg->nr_pages;
+               break;
+       case KBASE_MEM_QUERY_FLAGS:
+       {
+               *out = 0;
+               if (KBASE_REG_CPU_WR & reg->flags)
+                       *out |= BASE_MEM_PROT_CPU_WR;
+               if (KBASE_REG_CPU_RD & reg->flags)
+                       *out |= BASE_MEM_PROT_CPU_RD;
+               if (KBASE_REG_CPU_CACHED & reg->flags)
+                       *out |= BASE_MEM_CACHED_CPU;
+               if (KBASE_REG_GPU_WR & reg->flags)
+                       *out |= BASE_MEM_PROT_GPU_WR;
+               if (KBASE_REG_GPU_RD & reg->flags)
+                       *out |= BASE_MEM_PROT_GPU_RD;
+               if (!(KBASE_REG_GPU_NX & reg->flags))
+                       *out |= BASE_MEM_PROT_GPU_EX;
+               if (KBASE_REG_SHARE_BOTH & reg->flags)
+                       *out |= BASE_MEM_COHERENT_SYSTEM;
+               if (KBASE_REG_SHARE_IN & reg->flags)
+                       *out |= BASE_MEM_COHERENT_LOCAL;
+               break;
+       }
+       default:
+               *out = 0;
+               goto out_unlock;
        }
 
        ret = MALI_ERROR_NONE;
@@ -228,9 +239,9 @@ out_unlock:
        return ret;
 }
 
-mali_error kbase_mem_flags_change(kbase_context *kctx, mali_addr64 gpu_addr, unsigned int flags, unsigned int mask)
+mali_error kbase_mem_flags_change(struct kbase_context *kctx, mali_addr64 gpu_addr, unsigned int flags, unsigned int mask)
 {
-       kbase_va_region *reg;
+       struct kbase_va_region *reg;
        mali_error ret = MALI_ERROR_FUNCTION_FAILED;
        unsigned int real_flags = 0;
        unsigned int prev_flags = 0;
@@ -252,9 +263,9 @@ mali_error kbase_mem_flags_change(kbase_context *kctx, mali_addr64 gpu_addr, uns
                goto out;
 
        /* convert flags */
-       if( BASE_MEM_COHERENT_SYSTEM & flags )
+       if (BASE_MEM_COHERENT_SYSTEM & flags)
                real_flags |= KBASE_REG_SHARE_BOTH;
-       else if ( BASE_MEM_COHERENT_LOCAL & flags )
+       else if (BASE_MEM_COHERENT_LOCAL & flags)
                real_flags |= KBASE_REG_SHARE_IN;
 
        /* now we can lock down the context, and find the region */
@@ -262,17 +273,16 @@ mali_error kbase_mem_flags_change(kbase_context *kctx, mali_addr64 gpu_addr, uns
 
        /* Validate the region */
        reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
-       if (!reg || (reg->flags & KBASE_REG_FREE) )
+       if (!reg || (reg->flags & KBASE_REG_FREE))
                goto out_unlock;
 
        /* limit to imported memory */
-       if ( (reg->alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) &&
+       if ((reg->alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) &&
             (reg->alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
                goto out_unlock;
 
        /* no change? */
-       if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH)))
-       {
+       if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) {
                ret = MALI_ERROR_NONE;
                goto out_unlock;
        }
@@ -283,8 +293,7 @@ mali_error kbase_mem_flags_change(kbase_context *kctx, mali_addr64 gpu_addr, uns
        reg->flags |= real_flags;
 
        /* Currently supporting only imported memory */
-       switch(reg->alloc->type)
-       {
+       switch (reg->alloc->type) {
 #ifdef CONFIG_UMP
                case KBASE_MEM_TYPE_IMPORTED_UMP:
                        ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_phy_pages(reg), reg->alloc->nents, reg->flags);
@@ -313,8 +322,10 @@ out:
        return ret;
 }
 
+#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_TOTAL_BITS)
+
 #ifdef CONFIG_UMP
-static struct kbase_va_region *kbase_mem_from_ump(kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags)
+static struct kbase_va_region *kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags)
 {
        struct kbase_va_region *reg;
        ump_dd_handle umph;
@@ -345,6 +356,10 @@ static struct kbase_va_region *kbase_mem_from_ump(kbase_context *kctx, ump_secur
        if (!*va_pages)
                goto bad_size;
 
+       if (*va_pages > (UINT64_MAX / PAGE_SIZE))
+               /* 64-bit address range is the max */
+               goto bad_size;
+
        if (*flags & BASE_MEM_SAME_VA)
                reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
        else
@@ -359,25 +374,30 @@ static struct kbase_va_region *kbase_mem_from_ump(kbase_context *kctx, ump_secur
        reg->alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP);
        if (IS_ERR_OR_NULL(reg->alloc))
                goto no_alloc_obj;
-       
+
        reg->alloc->imported.ump_handle = umph;
 
        reg->flags &= ~KBASE_REG_FREE;
        reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */
        reg->flags &= ~KBASE_REG_GROWABLE;      /* UMP cannot be grown */
 
+       /* Override import flags based on UMP flags */
+       *flags &= ~(BASE_MEM_CACHED_CPU);
+       *flags &= ~(BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR);
+       *flags &= ~(BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR);
+
        if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
            (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) {
                reg->flags |= KBASE_REG_CPU_CACHED;
                *flags |= BASE_MEM_CACHED_CPU;
        }
 
-       if (cpu_flags & UMP_PROT_DEVICE_WR) {
+       if (cpu_flags & UMP_PROT_CPU_WR) {
                reg->flags |= KBASE_REG_CPU_WR;
                *flags |= BASE_MEM_PROT_CPU_WR;
        }
 
-       if (cpu_flags & UMP_PROT_DEVICE_RD) {
+       if (cpu_flags & UMP_PROT_CPU_RD) {
                reg->flags |= KBASE_REG_CPU_RD;
                *flags |= BASE_MEM_PROT_CPU_RD;
        }
@@ -417,12 +437,11 @@ bad_size:
        ump_dd_release(umph);
 bad_id:
        return NULL;
-
 }
 #endif                         /* CONFIG_UMP */
 
 #ifdef CONFIG_DMA_SHARED_BUFFER
-static struct kbase_va_region *kbase_mem_from_umm(kbase_context *kctx, int fd, u64 *va_pages, u64 *flags)
+static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags)
 {
        struct kbase_va_region *reg;
        struct dma_buf *dma_buf;
@@ -440,13 +459,17 @@ static struct kbase_va_region *kbase_mem_from_umm(kbase_context *kctx, int fd, u
        if (!*va_pages)
                goto bad_size;
 
+       if (*va_pages > (UINT64_MAX / PAGE_SIZE))
+               /* 64-bit address range is the max */
+               goto bad_size;
+
        /* ignore SAME_VA */
        *flags &= ~BASE_MEM_SAME_VA;
 
 #ifdef CONFIG_64BIT
        if (!is_compat_task()) {
                /* 64-bit tasks must MMAP anyway, but not expose this address to clients */
-               *flags |= KBASE_MEM_NEED_MMAP;
+               *flags |= BASE_MEM_NEED_MMAP;
                reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
        } else {
 #else
@@ -505,11 +528,11 @@ no_buf:
 }
 #endif  /* CONFIG_DMA_SHARED_BUFFER */
 
-u64 kbase_mem_alias(kbase_context *kctx, u64 *flags, u64 stride,
+u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
                    u64 nents, struct base_mem_aliasing_info *ai,
                    u64 *num_pages)
 {
-       kbase_va_region *reg;
+       struct kbase_va_region *reg;
        u64 gpu_va;
        size_t i;
 
@@ -523,7 +546,7 @@ u64 kbase_mem_alias(kbase_context *kctx, u64 *flags, u64 stride,
                   BASE_MEM_HINT_GPU_RD | BASE_MEM_HINT_GPU_WR |
                   BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_LOCAL);
 
-       if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR) )) {
+       if (!(*flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR))) {
                dev_warn(kctx->kbdev->dev,
                                "kbase_mem_alias called with bad flags (%llx)",
                                (unsigned long long)*flags);
@@ -536,6 +559,10 @@ u64 kbase_mem_alias(kbase_context *kctx, u64 *flags, u64 stride,
        if (!nents)
                goto bad_nents;
 
+       if ((nents * stride) > (UINT64_MAX / PAGE_SIZE))
+               /* 64-bit address range is the max */
+               goto bad_size;
+
        /* calculate the number of pages this alias will cover */
        *num_pages = nents * stride;
 
@@ -543,7 +570,7 @@ u64 kbase_mem_alias(kbase_context *kctx, u64 *flags, u64 stride,
        if (!is_compat_task()) {
                /* 64-bit tasks must MMAP anyway, but not expose this address to
                 * clients */
-               *flags |= KBASE_MEM_NEED_MMAP;
+               *flags |= BASE_MEM_NEED_MMAP;
                reg = kbase_alloc_free_region(kctx, 0, *num_pages,
                                              KBASE_REG_ZONE_SAME_VA);
        } else {
@@ -586,6 +613,7 @@ u64 kbase_mem_alias(kbase_context *kctx, u64 *flags, u64 stride,
                } else {
                        struct kbase_va_region *aliasing_reg;
                        struct kbase_mem_phy_alloc *alloc;
+
                        aliasing_reg = kbase_region_tracker_find_region_base_address(kctx, (ai[i].handle >> PAGE_SHIFT) << PAGE_SHIFT);
 
                        /* validate found region */
@@ -623,8 +651,7 @@ u64 kbase_mem_alias(kbase_context *kctx, u64 *flags, u64 stride,
        if (!is_compat_task()) {
                /* Bind to a cookie */
                if (!kctx->cookies) {
-                       dev_err(kctx->kbdev->dev, "No cookies "
-                                               "available for allocation!");
+                       dev_err(kctx->kbdev->dev, "No cookies available for allocation!");
                        goto no_cookie;
                }
                /* return a cookie */
@@ -668,15 +695,16 @@ no_aliased_array:
 no_alloc_obj:
        kfree(reg);
 no_reg:
+bad_size:
 bad_nents:
 bad_stride:
 bad_flags:
        return 0;
 }
 
-int kbase_mem_import(kbase_context *kctx, base_mem_import_type type, int handle, mali_addr64 * gpu_va, u64 * va_pages, u64 * flags)
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, int handle, mali_addr64 *gpu_va, u64 *va_pages, u64 *flags)
 {
-       kbase_va_region * reg;
+       struct kbase_va_region *reg;
 
        KBASE_DEBUG_ASSERT(kctx);
        KBASE_DEBUG_ASSERT(gpu_va);
@@ -710,7 +738,7 @@ int kbase_mem_import(kbase_context *kctx, base_mem_import_type type, int handle,
        kbase_gpu_vm_lock(kctx);
 
        /* mmap needed to setup VA? */
-       if (*flags & (BASE_MEM_SAME_VA | KBASE_MEM_NEED_MMAP)) {
+       if (*flags & (BASE_MEM_SAME_VA | BASE_MEM_NEED_MMAP)) {
                /* Bind to a cookie */
                if (!kctx->cookies)
                        goto no_cookie;
@@ -738,6 +766,9 @@ int kbase_mem_import(kbase_context *kctx, base_mem_import_type type, int handle,
                *gpu_va = reg->start_pfn << PAGE_SHIFT;
        }
 
+       /* clear out private flags */
+       *flags &= ((1UL << BASE_MEM_FLAGS_NR_TOTAL_BITS) - 1);
+
        kbase_gpu_vm_unlock(kctx);
 
        return 0;
@@ -755,7 +786,6 @@ no_reg:
 }
 
 
-
 static int zap_range_nolock(struct mm_struct *mm,
                const struct vm_operations_struct *vm_ops,
                unsigned long start, unsigned long end)
@@ -791,12 +821,12 @@ try_next:
        return err;
 }
 
-int kbase_mem_commit(kbase_context * kctx, mali_addr64 gpu_addr, u64 new_pages, base_backing_threshold_status * failure_reason)
+int kbase_mem_commit(struct kbase_context *kctx, mali_addr64 gpu_addr, u64 new_pages, enum base_backing_threshold_status *failure_reason)
 {
        u64 old_pages;
        u64 delta;
        int res = -EINVAL;
-       kbase_va_region *reg;
+       struct kbase_va_region *reg;
        phys_addr_t *phy_pages;
 
        KBASE_DEBUG_ASSERT(kctx);
@@ -863,7 +893,7 @@ int kbase_mem_commit(kbase_context * kctx, mali_addr64 gpu_addr, u64 new_pages,
                }
        } else {
                /* shrinking */
-               struct kbase_cpu_mapping * mapping;
+               struct kbase_cpu_mapping *mapping;
                mali_error err;
 
                /* first, unmap from any mappings affected */
@@ -915,12 +945,12 @@ out_unlock:
        up_read(&current->mm->mmap_sem);
 
        return res;
-
 }
 
 STATIC void kbase_cpu_vm_open(struct vm_area_struct *vma)
 {
        struct kbase_cpu_mapping *map = vma->vm_private_data;
+
        KBASE_DEBUG_ASSERT(map);
        KBASE_DEBUG_ASSERT(map->count > 0);
        /* non-atomic as we're under Linux' mm lock */
@@ -930,6 +960,7 @@ STATIC void kbase_cpu_vm_open(struct vm_area_struct *vma)
 STATIC void kbase_cpu_vm_close(struct vm_area_struct *vma)
 {
        struct kbase_cpu_mapping *map = vma->vm_private_data;
+
        KBASE_DEBUG_ASSERT(map);
        KBASE_DEBUG_ASSERT(map->count > 0);
 
@@ -969,8 +1000,10 @@ STATIC int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        KBASE_DEBUG_ASSERT(map->kctx);
        KBASE_DEBUG_ASSERT(map->alloc);
 
-       /* we don't use vmf->pgoff as it's affected by our mmap with offset being a GPU VA or a cookie */
-       rel_pgoff = ((unsigned long)vmf->virtual_address - map->vm_start) >> PAGE_SHIFT;
+       /* we don't use vmf->pgoff as it's affected by our mmap with
+        * offset being a GPU VA or a cookie */
+       rel_pgoff = ((unsigned long)vmf->virtual_address - map->vm_start)
+                       >> PAGE_SHIFT;
 
        kbase_gpu_vm_lock(map->kctx);
        if (map->page_off + rel_pgoff >= map->alloc->nents)
@@ -978,10 +1011,10 @@ STATIC int kbase_cpu_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* insert all valid pages from the fault location */
        for (i = rel_pgoff;
-                          i < MIN((vma->vm_end - vma->vm_start) >> PAGE_SHIFT,
-                                    map->alloc->nents - map->page_off); i++) {
+            i < MIN((vma->vm_end - vma->vm_start) >> PAGE_SHIFT,
+            map->alloc->nents - map->page_off); i++) {
                int ret = vm_insert_pfn(vma, map->vm_start + (i << PAGE_SHIFT),
-                              PFN_DOWN(map->alloc->pages[map->page_off + i]));
+                   PFN_DOWN(map->alloc->pages[map->page_off + i]));
                if (ret < 0 && ret != -EBUSY)
                        goto locked_bad_fault;
        }
@@ -1032,7 +1065,7 @@ static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vm
         * See MIDBASE-1057
         */
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
        vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO;
 #else
        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
@@ -1073,7 +1106,6 @@ static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vm
                goto out;
        }
 
-
        map->page_off = start_off;
        map->region = free_on_close ? reg : NULL;
        map->kctx = reg->kctx;
@@ -1088,7 +1120,7 @@ static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vm
        map->count = 1; /* start with one ref */
 
        if (reg->flags & KBASE_REG_CPU_CACHED)
-               map->alloc->accessed_cached = 1;
+               map->alloc->properties |= KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
 
        list_add(&map->mappings_list, &map->alloc->mappings);
 
@@ -1096,7 +1128,7 @@ static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vm
        return err;
 }
 
-static int kbase_trace_buffer_mmap(kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr)
+static int kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr)
 {
        struct kbase_va_region *new_reg;
        u32 nr_pages;
@@ -1173,10 +1205,9 @@ out_no_region:
        }
 out:
        return err;
-
 }
 
-static int kbase_mmu_dump_mmap(kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
+static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
 {
        struct kbase_va_region *new_reg;
        void *kaddr;
@@ -1232,14 +1263,14 @@ out:
 }
 
 
-void kbase_os_mem_map_lock(kbase_context *kctx)
+void kbase_os_mem_map_lock(struct kbase_context *kctx)
 {
        struct mm_struct *mm = current->mm;
        (void)kctx;
        down_read(&mm->mmap_sem);
 }
 
-void kbase_os_mem_map_unlock(kbase_context *kctx)
+void kbase_os_mem_map_unlock(struct kbase_context *kctx)
 {
        struct mm_struct *mm = current->mm;
        (void)kctx;
@@ -1248,7 +1279,7 @@ void kbase_os_mem_map_unlock(kbase_context *kctx)
 
 int kbase_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       kbase_context *kctx = file->private_data;
+       struct kbase_context *kctx = file->private_data;
        struct kbase_va_region *reg;
        void *kaddr = NULL;
        size_t nr_pages;
@@ -1319,10 +1350,10 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
                /* SAME_VA stuff, fetch the right region */
                int gpu_pc_bits;
                int cookie = vma->vm_pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
+
                gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
                reg = kctx->pending_regions[cookie];
                if (NULL != reg) {
-
                        if (reg->flags & KBASE_REG_ALIGNED) {
                                /* nr_pages must be able to hold alignment pages
                                 * plus actual pages */
@@ -1460,14 +1491,13 @@ out:
 
 KBASE_EXPORT_TEST_API(kbase_mmap)
 
-void kbasep_os_process_page_usage_update( kbase_context *kctx, int pages )
+void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
 {
        struct mm_struct *mm;
 
        rcu_read_lock();
        mm = rcu_dereference(kctx->process_mm);
-       if (mm)
-       {
+       if (mm) {
                atomic_add(pages, &kctx->nonmapped_pages);
 #ifdef SPLIT_RSS_COUNTING
                add_mm_counter(mm, MM_FILEPAGES, pages);
@@ -1480,15 +1510,14 @@ void kbasep_os_process_page_usage_update( kbase_context *kctx, int pages )
        rcu_read_unlock();
 }
 
-static void kbasep_os_process_page_usage_drain(kbase_context * kctx)
+static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx)
 {
        int pages;
-       struct mm_struct * mm;
+       struct mm_struct *mm;
 
        spin_lock(&kctx->mm_update_lock);
        mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
-       if (!mm)
-       {
+       if (!mm) {
                spin_unlock(&kctx->mm_update_lock);
                return;
        }
@@ -1509,7 +1538,8 @@ static void kbasep_os_process_page_usage_drain(kbase_context * kctx)
 
 static void kbase_special_vm_close(struct vm_area_struct *vma)
 {
-       kbase_context * kctx;
+       struct kbase_context *kctx;
+
        kctx = vma->vm_private_data;
        kbasep_os_process_page_usage_drain(kctx);
 }
@@ -1518,12 +1548,11 @@ static const struct vm_operations_struct kbase_vm_special_ops = {
        .close = kbase_special_vm_close,
 };
 
-static int kbase_tracking_page_setup(struct kbase_context * kctx, struct vm_area_struct * vma)
+static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma)
 {
        /* check that this is the only tracking page */
        spin_lock(&kctx->mm_update_lock);
-       if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock)))
-       {
+       if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) {
                spin_unlock(&kctx->mm_update_lock);
                return -EFAULT;
        }
@@ -1534,7 +1563,7 @@ static int kbase_tracking_page_setup(struct kbase_context * kctx, struct vm_area
 
        /* no real access */
        vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
 #else
        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO;
@@ -1544,7 +1573,7 @@ static int kbase_tracking_page_setup(struct kbase_context * kctx, struct vm_area
 
        return 0;
 }
-void *kbase_va_alloc(kbase_context *kctx, u32 size, kbase_hwc_dma_mapping *handle)
+void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle)
 {
        int i;
        int res;
@@ -1567,6 +1596,7 @@ void *kbase_va_alloc(kbase_context *kctx, u32 size, kbase_hwc_dma_mapping *handl
        if (size == 0)
                goto err;
 
+       /* All the alloc calls return zeroed memory */
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
        dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
        va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, &attrs);
@@ -1576,8 +1606,6 @@ void *kbase_va_alloc(kbase_context *kctx, u32 size, kbase_hwc_dma_mapping *handl
        if (!va)
                goto err;
 
-       memset(va, 0x0, size);
-
        /* Store the state so we can free it later. */
        handle->cpu_va = va;
        handle->dma_pa = dma_pa;
@@ -1597,9 +1625,8 @@ void *kbase_va_alloc(kbase_context *kctx, u32 size, kbase_hwc_dma_mapping *handl
 
        page_array = kbase_get_phy_pages(reg);
 
-       for (i = 0; i < pages; i++) {
+       for (i = 0; i < pages; i++)
                page_array[i] = dma_pa + (i << PAGE_SHIFT);
-       }
 
        reg->alloc->nents = pages;
 
@@ -1626,7 +1653,7 @@ err:
 }
 KBASE_EXPORT_SYMBOL(kbase_va_alloc);
 
-void kbase_va_free(kbase_context *kctx, kbase_hwc_dma_mapping *handle)
+void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle)
 {
        struct kbase_va_region *reg;
        mali_error err;
index e7482a5cd450e5d073b95126b87b4e566d8e3f53..1dacacc8f0d47c27a9303a1f9d2ea2b117657afa 100755 (executable)
 #ifndef _KBASE_MEM_LINUX_H_
 #define _KBASE_MEM_LINUX_H_
 
-/* This define is used by the gator kernel module compile to select which DDK
- * API calling convention to use. If not defined (legacy DDK) gator assumes
- * version 1. The version to DDK release mapping is:
- *     Version 1 API: DDK versions r1px, r2px
- *     Version 2 API: DDK versions r3px and newer
- **/
-#define MALI_DDK_GATOR_API_VERSION 2
-
 /** A HWC dump mapping */
 typedef struct kbase_hwc_dma_mapping {
        void       *cpu_va;
@@ -40,12 +32,12 @@ typedef struct kbase_hwc_dma_mapping {
        size_t      size;
 } kbase_hwc_dma_mapping;
 
-struct kbase_va_region * kbase_mem_alloc(kbase_context * kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 * flags, u64 * gpu_va, u16 * va_alignment);
-mali_error kbase_mem_query(kbase_context *kctx, mali_addr64 gpu_addr, int query, u64 * const pages);
-int kbase_mem_import(kbase_context *kctx, base_mem_import_type type, int handle, mali_addr64 * gpu_va, u64 * va_pages, u64 * flags);
-u64 kbase_mem_alias(kbase_context *kctx, u64* flags, u64 stride, u64 nents, struct base_mem_aliasing_info* ai, u64 * num_pages);
-mali_error kbase_mem_flags_change(kbase_context *kctx, mali_addr64 gpu_addr, unsigned int flags, unsigned int mask);
-int kbase_mem_commit(kbase_context * kctx, mali_addr64 gpu_addr, u64 new_pages, base_backing_threshold_status * failure_reason);
+struct kbase_va_region * kbase_mem_alloc(struct kbase_context * kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 * flags, u64 * gpu_va, u16 * va_alignment);
+mali_error kbase_mem_query(struct kbase_context *kctx, mali_addr64 gpu_addr, int query, u64 * const pages);
+int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, int handle, mali_addr64 * gpu_va, u64 * va_pages, u64 * flags);
+u64 kbase_mem_alias(struct kbase_context *kctx, u64* flags, u64 stride, u64 nents, struct base_mem_aliasing_info* ai, u64 * num_pages);
+mali_error kbase_mem_flags_change(struct kbase_context *kctx, mali_addr64 gpu_addr, unsigned int flags, unsigned int mask);
+int kbase_mem_commit(struct kbase_context * kctx, mali_addr64 gpu_addr, u64 new_pages, enum base_backing_threshold_status * failure_reason);
 int kbase_mmap(struct file *file, struct vm_area_struct *vma);
 
 /** @brief Allocate memory from kernel space and map it onto the GPU
@@ -55,13 +47,13 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma);
  * @param handle An opaque structure used to contain the state needed to free the memory
  * @return the VA for kernel space and GPU MMU
  */
-void *kbase_va_alloc(kbase_context *kctx, u32 size, kbase_hwc_dma_mapping *handle);
+void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle);
 
 /** @brief Free/unmap memory allocated by kbase_va_alloc
  *
  * @param kctx   The context used for the allocation/mapping
  * @param handle An opaque structure returned by the kbase_va_alloc function.
  */
-void kbase_va_free(kbase_context *kctx, kbase_hwc_dma_mapping *handle);
+void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle);
 
 #endif                         /* _KBASE_MEM_LINUX_H_ */
index c88a3f13880a7927bd1d79c9bd5078f313e5e433..2b3985ea4fca6320abc4fa459a39c2d7dc34a89f 100755 (executable)
@@ -24,6 +24,8 @@
 #error "Don't include this file directly, use mali_kbase.h instead"
 #endif
 
+#include <linux/dma-mapping.h>
+
 /**
  * @brief Flags for kbase_phy_allocator_pages_alloc
  */
 #define KBASE_PHY_PAGES_POISON_VALUE  0xFD /** Value to fill the memory with when KBASE_PHY_PAGES_FLAG_POISON is set */
 
 /**
- * A pointer to a cache synchronization function, either kbase_sync_to_cpu()
- * or kbase_sync_to_memory().
- */
-typedef void (*kbase_sync_kmem_fn) (phys_addr_t, void *, size_t);
-
-/**
- * @brief Synchronize a memory area for other system components usage
- *
- * Performs the necessary memory coherency operations on a given memory area,
- * such that after the call, changes in memory are correctly seen by other
- * system components. Any change made to memory after that call may not be seen
- * by other system components.
- *
- * In effect:
- * - all CPUs will perform a cache clean operation on their inner & outer data caches
- * - any write buffers are drained (including that of outer cache controllers)
- *
- * This function waits until all operations have completed.
- *
- * The area is restricted to one page or less and must not cross a page boundary.
- * The offset within the page is aligned to cache line size and size is ensured
- * to be a multiple of the cache line size.
- *
- * Both physical and virtual address of the area need to be provided to support OS
- * cache flushing APIs that either use the virtual or the physical address. When
- * called from OS specific code it is allowed to only provide the address that
- * is actually used by the specific OS and leave the other address as 0.
- *
- * @param[in] paddr  physical address
- * @param[in] vaddr  CPU virtual address valid in the current user VM or the kernel VM
- * @param[in] sz     size of the area, <= PAGE_SIZE.
+ * A pointer to a cache synchronization function, either dma_sync_single_for_cpu
+ * or dma_sync_single_for_device.
  */
-void kbase_sync_to_memory(phys_addr_t paddr, void *vaddr, size_t sz);
+typedef void (*kbase_sync_kmem_fn) (struct device *, dma_addr_t, size_t size,
+                                  enum dma_data_direction);
 
-/**
- * @brief Synchronize a memory area for CPU usage
- *
- * Performs the necessary memory coherency operations on a given memory area,
- * such that after the call, changes in memory are correctly seen by any CPU.
- * Any change made to this area by any CPU before this call may be lost.
- *
- * In effect:
- * - all CPUs will perform a cache clean & invalidate operation on their inner &
- *   outer data caches.
- *
- * @note Stricly only an invalidate operation is required but by cleaning the cache
- * too we prevent loosing changes made to the memory area due to software bugs. By
- * having these changes cleaned from the cache it allows us to catch the memory
- * area getting corrupted with the help of watch points. In correct operation the
- * clean & invalidate operation would not be more expensive than an invalidate
- * operation. Also note that for security reasons, it is dangerous to expose a
- * cache 'invalidate only' operation to user space.
- *
- * - any read buffers are flushed (including that of outer cache controllers)
- *
- * This function waits until all operations have completed.
- *
- * The area is restricted to one page or less and must not cross a page boundary.
- * The offset within the page is aligned to cache line size and size is ensured
- * to be a multiple of the cache line size.
- *
- * Both physical and virtual address of the area need to be provided to support OS
- * cache flushing APIs that either use the virtual or the physical address. When
- * called from OS specific code it is allowed to only provide the address that
- * is actually used by the specific OS and leave the other address as 0.
- *
- * @param[in] paddr  physical address
- * @param[in] vaddr  CPU virtual address valid in the current user VM or the kernel VM
- * @param[in] sz     size of the area, <= PAGE_SIZE.
- */
-void kbase_sync_to_cpu(phys_addr_t paddr, void *vaddr, size_t sz);
 
 #endif                         /* _KBASE_LOWLEVEL_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.c
new file mode 100755 (executable)
index 0000000..b7d709e
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase_gpu_memory_debugfs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+/* mam_profile file name max length 22 based on format <int>_<int>\0 */
+#define KBASEP_DEBUGFS_FNAME_SIZE_MAX (10+1+10+1)
+
+void kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+               size_t size)
+{
+       spin_lock(&kctx->mem_profile_lock);
+       kfree(kctx->mem_profile_data);
+       kctx->mem_profile_data = data;
+       kctx->mem_profile_size = size;
+       spin_unlock(&kctx->mem_profile_lock);
+}
+
+/** Show callback for the @c mem_profile debugfs file.
+ *
+ * This function is called to get the contents of the @c mem_profile debugfs
+ * file. This is a report of current memory usage and distribution in userspace.
+ *
+ * @param sfile The debugfs entry
+ * @param data Data associated with the entry
+ *
+ * @return 0 if successfully prints data in debugfs entry file
+ *         -1 if it encountered an error
+ */
+static int kbasep_mem_profile_seq_show(struct seq_file *sfile, void *data)
+{
+       struct kbase_context *kctx = sfile->private;
+
+       KBASE_DEBUG_ASSERT(kctx != NULL);
+
+       spin_lock(&kctx->mem_profile_lock);
+       seq_write(sfile, kctx->mem_profile_data, kctx->mem_profile_size);
+       seq_putc(sfile, '\n');
+       spin_unlock(&kctx->mem_profile_lock);
+
+       return 0;
+}
+
+/*
+ *  File operations related to debugfs entry for mem_profile
+ */
+STATIC int kbasep_mem_profile_debugfs_open(struct inode *in, struct file *file)
+{
+       return single_open(file, kbasep_mem_profile_seq_show, in->i_private);
+}
+
+static const struct file_operations kbasep_mem_profile_debugfs_fops = {
+       .open = kbasep_mem_profile_debugfs_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+mali_error kbasep_mem_profile_debugfs_add(struct kbase_context *kctx)
+{
+       char name[KBASEP_DEBUGFS_FNAME_SIZE_MAX];
+
+       KBASE_DEBUG_ASSERT(kctx != NULL);
+
+       spin_lock_init(&kctx->mem_profile_lock);
+
+       scnprintf(name, KBASEP_DEBUGFS_FNAME_SIZE_MAX, "%d_%d", kctx->pid,
+                       kctx->id);
+
+       kctx->mem_dentry = debugfs_create_file(name, S_IRUGO,
+                       kctx->kbdev->memory_profile_directory,
+                       kctx, &kbasep_mem_profile_debugfs_fops);
+       if (IS_ERR(kctx->mem_dentry))
+               goto error_out;
+
+       return MALI_ERROR_NONE;
+
+error_out:
+       return MALI_ERROR_FUNCTION_FAILED;
+}
+
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx)
+{
+       KBASE_DEBUG_ASSERT(kctx != NULL);
+
+       spin_lock(&kctx->mem_profile_lock);
+       kfree(kctx->mem_profile_data);
+       kctx->mem_profile_data = NULL;
+       spin_unlock(&kctx->mem_profile_lock);
+
+       if (IS_ERR(kctx->mem_dentry))
+               return;
+       debugfs_remove(kctx->mem_dentry);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+/**
+ * @brief Stub function for when debugfs is disabled
+ */
+mali_error kbasep_mem_profile_debugfs_add(struct kbase_context *ctx)
+{
+       return MALI_ERROR_NONE;
+}
+
+/**
+ * @brief Stub function for when debugfs is disabled
+ */
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *ctx)
+{
+}
+
+/**
+ * @brief Stub function for when debugfs is disabled
+ */
+void kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+               size_t size)
+{
+       kfree(data);
+}
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs.h
new file mode 100755 (executable)
index 0000000..ef908c8
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file mali_kbase_mem_profile_debugfs.h
+ * Header file for mem profiles entries in debugfs
+ *
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_H
+#define _KBASE_MEM_PROFILE_DEBUGFS_H
+
+#include <mali_kbase.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+/**
+ * @brief Add new entry to Mali memory profile debugfs
+ */
+mali_error kbasep_mem_profile_debugfs_add(struct kbase_context *kctx);
+
+/**
+ * @brief Remove entry from Mali memory profile debugfs
+ */
+void kbasep_mem_profile_debugfs_remove(struct kbase_context *kctx);
+
+/**
+ * @brief Insert data to debugfs file, so it can be read by userspce
+ *
+ * Function takes ownership of @c data and frees it later when new data
+ * are inserted.
+ *
+ * @param kctx Context to which file data should be inserted
+ * @param data NULL-terminated string to be inserted to mem_profile file,
+               without trailing new line character
+ * @param size @c buf length
+ */
+void kbasep_mem_profile_debugfs_insert(struct kbase_context *kctx, char *data,
+               size_t size);
+
+#endif  /*_KBASE_MEM_PROFILE_DEBUGFS_H*/
+
index 5bfc8a2ffdc6928194a779fd394b8bc39ab21f29..ada488ff30bc01bf9c77eeee6fe4dc3bc23593f7 100755 (executable)
@@ -23,6 +23,7 @@
  */
 
 /* #define DEBUG    1 */
+#include <linux/dma-mapping.h>
 #include <mali_kbase.h>
 #include <mali_midg_regmap.h>
 #include <mali_kbase_gator.h>
@@ -32,6 +33,7 @@
 
 #include <mali_kbase_defs.h>
 #include <mali_kbase_hw.h>
+#include <mali_kbase_mmu_hw.h>
 
 #define KBASE_MMU_PAGE_ENTRIES 512
 
  *        a 4kB physical page.
  */
 
-static void kbase_mmu_report_fault_and_kill(kbase_context *kctx, kbase_as *as);
-static u64 lock_region(kbase_device *kbdev, u64 pfn, size_t num_pages);
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as);
+
 
 /* Helper Function to perform assignment of page table entries, to ensure the use of
  * strd, which is required on LPAE systems.
  */
 
-static inline void page_table_entry_set( kbase_device * kbdev, u64 * pte, u64 phy )
+static inline void page_table_entry_set(struct kbase_device *kbdev, u64 *pte, u64 phy)
 {
 #ifdef CONFIG_64BIT
        *pte = phy;
@@ -73,17 +75,12 @@ static inline void page_table_entry_set( kbase_device * kbdev, u64 * pte, u64 ph
                                "strd r0, r1, [%[pte]]\n\t"
                                : "=m" (*pte)
                                : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy)
-                               : "r0", "r1" );
+                               : "r0", "r1");
 #else
 #error "64-bit atomic write must be implemented for your architecture"
 #endif
 }
 
-static void ksync_kern_vrange_gpu(phys_addr_t paddr, void *vaddr, size_t size)
-{
-       kbase_sync_to_memory(paddr, vaddr, size);
-}
-
 static size_t make_multiple(size_t minimum, size_t multiple)
 {
        size_t remainder = minimum % multiple;
@@ -93,34 +90,23 @@ static size_t make_multiple(size_t minimum, size_t multiple)
                return minimum + multiple - remainder;
 }
 
-static void mmu_mask_reenable(kbase_device *kbdev, kbase_context *kctx, kbase_as *as)
-{
-       unsigned long flags;
-       u32 mask;
-       spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
-       mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx);
-       mask |= ((1UL << as->number) | (1UL << (MMU_REGS_BUS_ERROR_FLAG(as->number))));
-       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), mask, kctx);
-       spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
-}
-
 static void page_fault_worker(struct work_struct *data)
 {
        u64 fault_pfn;
        size_t new_pages;
        size_t fault_rel_pfn;
-       kbase_as *faulting_as;
+       struct kbase_as *faulting_as;
        int as_no;
-       kbase_context *kctx;
-       kbase_device *kbdev;
-       kbase_va_region *region;
+       struct kbase_context *kctx;
+       struct kbase_device *kbdev;
+       struct kbase_va_region *region;
        mali_error err;
 
-       faulting_as = container_of(data, kbase_as, work_pagefault);
+       faulting_as = container_of(data, struct kbase_as, work_pagefault);
        fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
        as_no = faulting_as->number;
 
-       kbdev = container_of(faulting_as, kbase_device, as[as_no]);
+       kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
 
        /* Grab the context that was already refcounted in kbase_mmu_interrupt().
         * Therefore, it cannot be scheduled out of this AS until we explicitly release it
@@ -130,20 +116,26 @@ static void page_fault_worker(struct work_struct *data)
 
        if (kctx == NULL) {
                /* Only handle this if not already suspended */
-               if ( !kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+               if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+                       struct kbase_mmu_setup *current_setup = &faulting_as->current_setup;
+
                        /* Address space has no context, terminate the work */
-                       u32 reg;
 
                        /* AS transaction begin */
                        mutex_lock(&faulting_as->transaction_mutex);
-                       reg = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), NULL);
-                       reg = (reg & (~(u32) MMU_TRANSTAB_ADRMODE_MASK)) | ASn_TRANSTAB_ADRMODE_UNMAPPED;
-                       kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), reg, NULL);
-                       kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_UPDATE, NULL);
+
+                       /* Switch to unmapped mode */
+                       current_setup->transtab &= ~(u64)MMU_TRANSTAB_ADRMODE_MASK;
+                       current_setup->transtab |= AS_TRANSTAB_ADRMODE_UNMAPPED;
+
+                       /* Apply new address space settings */
+                       kbase_mmu_hw_configure(kbdev, faulting_as, kctx);
+
                        mutex_unlock(&faulting_as->transaction_mutex);
                        /* AS transaction end */
 
-                       mmu_mask_reenable(kbdev, NULL, faulting_as);
+                       kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+                                                KBASE_MMU_FAULT_TYPE_PAGE);
                        kbase_pm_context_idle(kbdev);
                }
                return;
@@ -162,7 +154,7 @@ static void page_fault_worker(struct work_struct *data)
                goto fault_done;
        }
 
-       if ((((faulting_as->fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_READ) && !(region->flags & KBASE_REG_GPU_RD)) || (((faulting_as->fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_WRITE) && !(region->flags & KBASE_REG_GPU_WR)) || (((faulting_as->fault_status & ASn_FAULTSTATUS_ACCESS_TYPE_MASK) == ASn_FAULTSTATUS_ACCESS_TYPE_EX) && (region->flags & KBASE_REG_GPU_NX))) {
+       if ((((faulting_as->fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) == AS_FAULTSTATUS_ACCESS_TYPE_READ) && !(region->flags & KBASE_REG_GPU_RD)) || (((faulting_as->fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) == AS_FAULTSTATUS_ACCESS_TYPE_WRITE) && !(region->flags & KBASE_REG_GPU_WR)) || (((faulting_as->fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) == AS_FAULTSTATUS_ACCESS_TYPE_EX) && (region->flags & KBASE_REG_GPU_NX))) {
                dev_warn(kbdev->dev, "Access permissions don't match: region->flags=0x%lx", region->flags);
                kbase_gpu_vm_unlock(kctx);
                kbase_mmu_report_fault_and_kill(kctx, faulting_as);
@@ -176,7 +168,8 @@ static void page_fault_worker(struct work_struct *data)
 
        if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
                dev_warn(kbdev->dev, "Page fault in allocated region of growable TMEM: Ignoring");
-               mmu_mask_reenable(kbdev, kctx, faulting_as);
+               kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+                                        KBASE_MMU_FAULT_TYPE_PAGE);
                kbase_gpu_vm_unlock(kctx);
                goto fault_done;
        }
@@ -189,25 +182,21 @@ static void page_fault_worker(struct work_struct *data)
 
        if (0 == new_pages) {
                /* Duplicate of a fault we've already handled, nothing to do */
-               mmu_mask_reenable(kbdev, kctx, faulting_as);
+               kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+                                        KBASE_MMU_FAULT_TYPE_PAGE);
                kbase_gpu_vm_unlock(kctx);
                goto fault_done;
        }
 
        if (MALI_ERROR_NONE == kbase_alloc_phy_pages_helper(region->alloc, new_pages)) {
+               u32 op;
+
                /* alloc success */
-               mali_addr64 lock_addr;
                KBASE_DEBUG_ASSERT(kbase_reg_current_backed_size(region) <= region->nr_pages);
 
                /* AS transaction begin */
                mutex_lock(&faulting_as->transaction_mutex);
 
-               /* Lock the VA region we're about to update */
-               lock_addr = lock_region(kbdev, faulting_as->fault_addr >> PAGE_SHIFT, new_pages);
-               kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_LO), lock_addr & 0xFFFFFFFFUL, kctx);
-               kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_LOCKADDR_HI), lock_addr >> 32, kctx);
-               kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_LOCK, kctx);
-
                /* set up the new pages */
                err = kbase_mmu_insert_pages(kctx, region->start_pfn + kbase_reg_current_backed_size(region) - new_pages, &kbase_get_phy_pages(region)[kbase_reg_current_backed_size(region) - new_pages], new_pages, region->flags);
                if (MALI_ERROR_NONE != err) {
@@ -225,32 +214,21 @@ static void page_fault_worker(struct work_struct *data)
 
                /* flush L2 and unlock the VA (resumes the MMU) */
                if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
-                       kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH, kctx);
+                       op = AS_COMMAND_FLUSH;
                else
-                       kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_FLUSH_PT, kctx);
-
-               /* wait for the flush to complete */
-               while (kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_STATUS), kctx) & 1)
-                       ;
-
-               if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
-                       /* Issue an UNLOCK command to ensure that valid page tables are re-read by the GPU after an update.
-                          Note that, the FLUSH command should perform all the actions necessary, however the bus logs show
-                          that if multiple page faults occur within an 8 page region the MMU does not always re-read the
-                          updated page table entries for later faults or is only partially read, it subsequently raises the
-                          page fault IRQ for the same addresses, the unlock ensures that the MMU cache is flushed, so updates
-                          can be re-read.  As the region is now unlocked we need to issue 2 UNLOCK commands in order to flush the
-                          MMU/uTLB, see PRLAM-8812.
-                        */
-                       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
-                       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
-               }
+                       op = AS_COMMAND_FLUSH_PT;
+
+               kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx,
+                                         faulting_as->fault_addr >> PAGE_SHIFT,
+                                         new_pages,
+                                         op, 1);
 
                mutex_unlock(&faulting_as->transaction_mutex);
                /* AS transaction end */
 
                /* reenable this in the mask */
-               mmu_mask_reenable(kbdev, kctx, faulting_as);
+               kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+                                        KBASE_MMU_FAULT_TYPE_PAGE);
                kbase_gpu_vm_unlock(kctx);
        } else {
                /* failed to extend, handle as a normal PF */
@@ -258,16 +236,20 @@ static void page_fault_worker(struct work_struct *data)
                kbase_mmu_report_fault_and_kill(kctx, faulting_as);
        }
 
- fault_done:
-       /* By this point, the fault was handled in some way, so release the ctx refcount */
+fault_done:
+       /*
+        * By this point, the fault was handled in some way,
+        * so release the ctx refcount
+        */
        kbasep_js_runpool_release_ctx(kbdev, kctx);
 }
 
-phys_addr_t kbase_mmu_alloc_pgd(kbase_context *kctx)
+phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx)
 {
        phys_addr_t pgd;
        u64 *page;
        int i;
+       struct page *p;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
        kbase_atomic_add_pages(1, &kctx->used_pages);
@@ -276,17 +258,21 @@ phys_addr_t kbase_mmu_alloc_pgd(kbase_context *kctx)
        if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(kctx->pgd_allocator, 1, &pgd))
                goto sub_pages;
 
-       page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+       p = pfn_to_page(PFN_DOWN(pgd));
+       page = kmap(p);
        if (NULL == page)
                goto alloc_free;
 
        kbase_process_page_usage_inc(kctx, 1);
 
        for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++)
-               page_table_entry_set( kctx->kbdev, &page[i], ENTRY_IS_INVAL );
+               page_table_entry_set(kctx->kbdev, &page[i], ENTRY_IS_INVAL);
 
        /* Clean the full page */
-       ksync_kern_vrange_gpu(pgd, page, KBASE_MMU_PAGE_ENTRIES * sizeof(u64));
+       dma_sync_single_for_device(kctx->kbdev->dev,
+                                  page_private(p),
+                                  PAGE_SIZE,
+                                  DMA_TO_DEVICE);
        kunmap(pfn_to_page(PFN_DOWN(pgd)));
        return pgd;
 
@@ -320,10 +306,11 @@ static u64 mmu_phyaddr_to_ate(phys_addr_t phy, u64 flags)
 }
 
 /* Given PGD PFN for level N, return PGD PFN for level N+1 */
-static phys_addr_t mmu_get_next_pgd(kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
+static phys_addr_t mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
 {
        u64 *page;
        phys_addr_t target_pgd;
+       struct page *p;
 
        KBASE_DEBUG_ASSERT(pgd);
        KBASE_DEBUG_ASSERT(NULL != kctx);
@@ -337,7 +324,8 @@ static phys_addr_t mmu_get_next_pgd(kbase_context *kctx, phys_addr_t pgd, u64 vp
        vpfn >>= (3 - level) * 9;
        vpfn &= 0x1FF;
 
-       page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+       p = pfn_to_page(PFN_DOWN(pgd));
+       page = kmap(p);
        if (NULL == page) {
                dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n");
                return 0;
@@ -349,21 +337,25 @@ static phys_addr_t mmu_get_next_pgd(kbase_context *kctx, phys_addr_t pgd, u64 vp
                target_pgd = kbase_mmu_alloc_pgd(kctx);
                if (!target_pgd) {
                        dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
-                       kunmap(pfn_to_page(PFN_DOWN(pgd)));
+                       kunmap(p);
                        return 0;
                }
 
-               page_table_entry_set( kctx->kbdev, &page[vpfn], mmu_phyaddr_to_pte(target_pgd) );
+               page_table_entry_set(kctx->kbdev, &page[vpfn],
+                               mmu_phyaddr_to_pte(target_pgd));
 
-               ksync_kern_vrange_gpu(pgd + (vpfn * sizeof(u64)), page + vpfn, sizeof(u64));
+               dma_sync_single_for_device(kctx->kbdev->dev,
+                                          page_private(p),
+                                          PAGE_SIZE,
+                                          DMA_TO_DEVICE);
                /* Rely on the caller to update the address space flags. */
        }
 
-       kunmap(pfn_to_page(PFN_DOWN(pgd)));
+       kunmap(p);
        return target_pgd;
 }
 
-static phys_addr_t mmu_get_bottom_pgd(kbase_context *kctx, u64 vpfn)
+static phys_addr_t mmu_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn)
 {
        phys_addr_t pgd;
        int l;
@@ -382,7 +374,7 @@ static phys_addr_t mmu_get_bottom_pgd(kbase_context *kctx, u64 vpfn)
        return pgd;
 }
 
-static phys_addr_t mmu_insert_pages_recover_get_next_pgd(kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
+static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level)
 {
        u64 *page;
        phys_addr_t target_pgd;
@@ -411,7 +403,7 @@ static phys_addr_t mmu_insert_pages_recover_get_next_pgd(kbase_context *kctx, ph
        return target_pgd;
 }
 
-static phys_addr_t mmu_insert_pages_recover_get_bottom_pgd(kbase_context *kctx, u64 vpfn)
+static phys_addr_t mmu_insert_pages_recover_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn)
 {
        phys_addr_t pgd;
        int l;
@@ -427,7 +419,7 @@ static phys_addr_t mmu_insert_pages_recover_get_bottom_pgd(kbase_context *kctx,
        return pgd;
 }
 
-static void mmu_insert_pages_failure_recovery(kbase_context *kctx, u64 vpfn,
+static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx, u64 vpfn,
                                              size_t nr)
 {
        phys_addr_t pgd;
@@ -444,6 +436,7 @@ static void mmu_insert_pages_failure_recovery(kbase_context *kctx, u64 vpfn,
                unsigned int i;
                unsigned int index = vpfn & 0x1FF;
                unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+               struct page *p;
 
                if (count > nr)
                        count = nr;
@@ -451,7 +444,9 @@ static void mmu_insert_pages_failure_recovery(kbase_context *kctx, u64 vpfn,
                pgd = mmu_insert_pages_recover_get_bottom_pgd(kctx, vpfn);
                KBASE_DEBUG_ASSERT(0 != pgd);
 
-               pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
+               p = pfn_to_page(PFN_DOWN(pgd));
+
+               pgd_page = kmap_atomic(p);
                KBASE_DEBUG_ASSERT(NULL != pgd_page);
 
                /* Invalidate the entries we added */
@@ -462,9 +457,9 @@ static void mmu_insert_pages_failure_recovery(kbase_context *kctx, u64 vpfn,
                vpfn += count;
                nr -= count;
 
-               ksync_kern_vrange_gpu(pgd + (index * sizeof(u64)),
-                                     pgd_page + index, count * sizeof(u64));
-
+               dma_sync_single_for_device(kctx->kbdev->dev,
+                                          page_private(p),
+                                          PAGE_SIZE, DMA_TO_DEVICE);
                kunmap_atomic(pgd_page);
        }
 }
@@ -500,7 +495,7 @@ static u64 kbase_mmu_get_mmu_flags(unsigned long flags)
 /*
  * Map the single page 'phys' 'nr' of times, starting at GPU PFN 'vpfn'
  */
-mali_error kbase_mmu_insert_single_page(kbase_context *kctx, u64 vpfn,
+mali_error kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
                                        phys_addr_t phys, size_t nr,
                                        unsigned long flags)
 {
@@ -527,6 +522,7 @@ mali_error kbase_mmu_insert_single_page(kbase_context *kctx, u64 vpfn,
                unsigned int i;
                unsigned int index = vpfn & 0x1FF;
                unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+               struct page *p;
 
                if (count > nr)
                        count = nr;
@@ -553,7 +549,8 @@ mali_error kbase_mmu_insert_single_page(kbase_context *kctx, u64 vpfn,
                        return MALI_ERROR_FUNCTION_FAILED;
                }
 
-               pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+               p = pfn_to_page(PFN_DOWN(pgd));
+               pgd_page = kmap(p);
                if (!pgd_page) {
                        dev_warn(kctx->kbdev->dev,
                                               "kbase_mmu_insert_pages: "
@@ -578,10 +575,14 @@ mali_error kbase_mmu_insert_single_page(kbase_context *kctx, u64 vpfn,
                vpfn += count;
                nr -= count;
 
-               ksync_kern_vrange_gpu(pgd + (index * sizeof(u64)),
-                                     pgd_page + index, count * sizeof(u64));
+               dma_sync_single_for_device(kctx->kbdev->dev,
+                                          page_private(p) +
+                                          (index * sizeof(u64)),
+                                          count * sizeof(u64),
+                                          DMA_TO_DEVICE);
 
-               kunmap(pfn_to_page(PFN_DOWN(pgd)));
+
+               kunmap(p);
                /* We have started modifying the page table.
                 * If further pages need inserting and fail we need to undo what
                 * has already taken place */
@@ -594,7 +595,7 @@ mali_error kbase_mmu_insert_single_page(kbase_context *kctx, u64 vpfn,
 /*
  * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn'
  */
-mali_error kbase_mmu_insert_pages(kbase_context *kctx, u64 vpfn,
+mali_error kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
                                  phys_addr_t *phys, size_t nr,
                                  unsigned long flags)
 {
@@ -620,6 +621,7 @@ mali_error kbase_mmu_insert_pages(kbase_context *kctx, u64 vpfn,
                unsigned int i;
                unsigned int index = vpfn & 0x1FF;
                unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+               struct page *p;
 
                if (count > nr)
                        count = nr;
@@ -646,7 +648,8 @@ mali_error kbase_mmu_insert_pages(kbase_context *kctx, u64 vpfn,
                        return MALI_ERROR_FUNCTION_FAILED;
                }
 
-               pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+               p = pfn_to_page(PFN_DOWN(pgd));
+               pgd_page = kmap(p);
                if (!pgd_page) {
                        dev_warn(kctx->kbdev->dev,
                                               "kbase_mmu_insert_pages: "
@@ -674,10 +677,13 @@ mali_error kbase_mmu_insert_pages(kbase_context *kctx, u64 vpfn,
                vpfn += count;
                nr -= count;
 
-               ksync_kern_vrange_gpu(pgd + (index * sizeof(u64)),
-                                     pgd_page + index, count * sizeof(u64));
+               dma_sync_single_for_device(kctx->kbdev->dev,
+                                          page_private(p) +
+                                          (index * sizeof(u64)),
+                                          count * sizeof(u64),
+                                          DMA_TO_DEVICE);
 
-               kunmap(pfn_to_page(PFN_DOWN(pgd)));
+               kunmap(p);
                /* We have started modifying the page table. If further pages
                 * need inserting and fail we need to undo what has already
                 * taken place */
@@ -699,9 +705,9 @@ KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages)
  * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
  * information.
  */
-static void kbase_mmu_flush(kbase_context *kctx, u64 vpfn, size_t nr)
+static void kbase_mmu_flush(struct kbase_context *kctx, u64 vpfn, size_t nr)
 {
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        mali_bool ctx_is_in_runpool;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
@@ -718,45 +724,29 @@ static void kbase_mmu_flush(kbase_context *kctx, u64 vpfn, size_t nr)
                /* Second level check is to try to only do this when jobs are running. The refcount is
                 * a heuristic for this. */
                if (kbdev->js_data.runpool_irq.per_as_data[kctx->as_nr].as_busy_refcount >= 2) {
-                       /* Lock the VA region we're about to update */
-                       u64 lock_addr = lock_region(kbdev, vpfn, nr);
-                       unsigned int max_loops = KBASE_AS_FLUSH_MAX_LOOPS;
+                       int ret;
+                       u32 op;
 
                        /* AS transaction begin */
                        mutex_lock(&kbdev->as[kctx->as_nr].transaction_mutex);
-                       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_LOCKADDR_LO), lock_addr & 0xFFFFFFFFUL, kctx);
-                       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_LOCKADDR_HI), lock_addr >> 32, kctx);
-                       kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_LOCK, kctx);
 
-                       /* flush L2 and unlock the VA */
                        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367))
-                               kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_FLUSH, kctx);
+                               op = AS_COMMAND_FLUSH;
                        else
-                               kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_FLUSH_MEM, kctx);
-
-                       /* wait for the flush to complete */
-                       while (--max_loops && kbase_reg_read(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_STATUS), kctx) & ASn_STATUS_FLUSH_ACTIVE)
-                               ;
-
-                       if (!max_loops) {
+                               op = AS_COMMAND_FLUSH_MEM;
+
+                       ret = kbase_mmu_hw_do_operation(kbdev,
+                                                       &kbdev->as[kctx->as_nr],
+                                                       kctx, vpfn, nr,
+                                                       op, 0);
+#if KBASE_GPU_RESET_EN
+                       if (ret) {
                                /* Flush failed to complete, assume the GPU has hung and perform a reset to recover */
                                dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n");
                                if (kbase_prepare_to_reset_gpu(kbdev))
                                        kbase_reset_gpu(kbdev);
                        }
-
-                       if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
-                               /* Issue an UNLOCK command to ensure that valid page tables are re-read by the GPU after an update.
-                                  Note that, the FLUSH command should perform all the actions necessary, however the bus logs show
-                                  that if multiple page faults occur within an 8 page region the MMU does not always re-read the
-                                  updated page table entries for later faults or is only partially read, it subsequently raises the
-                                  page fault IRQ for the same addresses, the unlock ensures that the MMU cache is flushed, so updates
-                                  can be re-read.  As the region is now unlocked we need to issue 2 UNLOCK commands in order to flush the
-                                  MMU/uTLB, see PRLAM-8812.
-                                */
-                               kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
-                               kbase_reg_write(kctx->kbdev, MMU_AS_REG(kctx->as_nr, ASn_COMMAND), ASn_COMMAND_UNLOCK, kctx);
-                       }
+#endif /* KBASE_GPU_RESET_EN */
 
                        mutex_unlock(&kbdev->as[kctx->as_nr].transaction_mutex);
                        /* AS transaction end */
@@ -777,15 +767,15 @@ static void kbase_mmu_flush(kbase_context *kctx, u64 vpfn, size_t nr)
  * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
  * information.
  */
-mali_error kbase_mmu_teardown_pages(kbase_context *kctx, u64 vpfn, size_t nr)
+mali_error kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
 {
        phys_addr_t pgd;
        u64 *pgd_page;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        size_t requested_nr = nr;
 
        KBASE_DEBUG_ASSERT(NULL != kctx);
-       beenthere(kctx, "kctx %p vpfn %lx nr %d", (void *)kctx, (unsigned long)vpfn, nr);
+       beenthere(kctx, "kctx %p vpfn %lx nr %zd", (void *)kctx, (unsigned long)vpfn, nr);
 
        lockdep_assert_held(&kctx->reg_lock);
 
@@ -800,6 +790,7 @@ mali_error kbase_mmu_teardown_pages(kbase_context *kctx, u64 vpfn, size_t nr)
                unsigned int i;
                unsigned int index = vpfn & 0x1FF;
                unsigned int count = KBASE_MMU_PAGE_ENTRIES - index;
+               struct page *p;
                if (count > nr)
                        count = nr;
 
@@ -809,25 +800,30 @@ mali_error kbase_mmu_teardown_pages(kbase_context *kctx, u64 vpfn, size_t nr)
                        return MALI_ERROR_FUNCTION_FAILED;
                }
 
-               pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+               p = pfn_to_page(PFN_DOWN(pgd));
+               pgd_page = kmap(p);
                if (!pgd_page) {
                        dev_warn(kbdev->dev, "kbase_mmu_teardown_pages: kmap failure\n");
                        return MALI_ERROR_OUT_OF_MEMORY;
                }
 
                for (i = 0; i < count; i++) {
-                       page_table_entry_set( kctx->kbdev, &pgd_page[index + i], ENTRY_IS_INVAL );
+                       page_table_entry_set(kctx->kbdev, &pgd_page[index + i], ENTRY_IS_INVAL);
                }
 
                vpfn += count;
                nr -= count;
 
-               ksync_kern_vrange_gpu(pgd + (index * sizeof(u64)), pgd_page + index, count * sizeof(u64));
+               dma_sync_single_for_device(kctx->kbdev->dev,
+                                          page_private(p) +
+                                          (index * sizeof(u64)),
+                                          count * sizeof(u64),
+                                          DMA_TO_DEVICE);
 
-               kunmap(pfn_to_page(PFN_DOWN(pgd)));
+               kunmap(p);
        }
 
-       kbase_mmu_flush(kctx,vpfn,requested_nr);
+       kbase_mmu_flush(kctx, vpfn, requested_nr);
        return MALI_ERROR_NONE;
 }
 
@@ -845,7 +841,7 @@ KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages)
  * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
  * information.
  */
-mali_error kbase_mmu_update_pages(kbase_context* kctx, u64 vpfn, phys_addr_t* phys, size_t nr, unsigned long flags)
+mali_error kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t* phys, size_t nr, unsigned long flags)
 {
        phys_addr_t pgd;
        u64* pgd_page;
@@ -865,11 +861,12 @@ mali_error kbase_mmu_update_pages(kbase_context* kctx, u64 vpfn, phys_addr_t* ph
                        vpfn, phys, nr);
 
 
-       while(nr)
-       {
+       while(nr) {
                unsigned int i;
                unsigned int index = vpfn & 0x1FF;
                size_t count = KBASE_MMU_PAGE_ENTRIES - index;
+               struct page *p;
+
                if (count > nr)
                        count = nr;
 
@@ -879,26 +876,31 @@ mali_error kbase_mmu_update_pages(kbase_context* kctx, u64 vpfn, phys_addr_t* ph
                        return MALI_ERROR_FUNCTION_FAILED;
                }
 
-               pgd_page = kmap(pfn_to_page(PFN_DOWN(pgd)));
+               p = pfn_to_page(PFN_DOWN(pgd));
+               pgd_page = kmap(p);
                if (!pgd_page) {
                        dev_warn(kctx->kbdev->dev, "kmap failure\n");
                        return MALI_ERROR_OUT_OF_MEMORY;
                }
 
                for (i = 0; i < count; i++) {
-                       page_table_entry_set( kctx->kbdev, &pgd_page[index + i],  mmu_phyaddr_to_ate(phys[i], mmu_flags)  );
+                       page_table_entry_set(kctx->kbdev, &pgd_page[index + i],  mmu_phyaddr_to_ate(phys[i], mmu_flags));
                }
 
                phys += count;
                vpfn += count;
                nr -= count;
 
-               ksync_kern_vrange_gpu(pgd + (index * sizeof(u64)), pgd_page + index, count * sizeof(u64));
+               dma_sync_single_for_device(kctx->kbdev->dev,
+                                          page_private(p) +
+                                          (index * sizeof(u64)),
+                                          count * sizeof(u64),
+                                          DMA_TO_DEVICE);
 
                kunmap(pfn_to_page(PFN_DOWN(pgd)));
        }
 
-       kbase_mmu_flush(kctx,vpfn,requested_nr);
+       kbase_mmu_flush(kctx, vpfn, requested_nr);
 
        return MALI_ERROR_NONE;
 }
@@ -909,7 +911,7 @@ static int mmu_pte_is_valid(u64 pte)
 }
 
 /* This is a debug feature only */
-static void mmu_check_unused(kbase_context *kctx, phys_addr_t pgd)
+static void mmu_check_unused(struct kbase_context *kctx, phys_addr_t pgd)
 {
        u64 *page;
        int i;
@@ -925,7 +927,7 @@ static void mmu_check_unused(kbase_context *kctx, phys_addr_t pgd)
        kunmap_atomic(page);
 }
 
-static void mmu_teardown_level(kbase_context *kctx, phys_addr_t pgd, int level, int zap, u64 *pgd_page_buffer)
+static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd, int level, int zap, u64 *pgd_page_buffer)
 {
        phys_addr_t target_pgd;
        u64 *pgd_page;
@@ -960,7 +962,7 @@ static void mmu_teardown_level(kbase_context *kctx, phys_addr_t pgd, int level,
                        beenthere(kctx, "pte %lx level %d", (unsigned long)target_pgd, level + 1);
                        if (zap) {
                                kbase_mem_allocator_free(kctx->pgd_allocator, 1, &target_pgd, MALI_TRUE);
-                               kbase_process_page_usage_dec(kctx, 1 );
+                               kbase_process_page_usage_dec(kctx, 1);
                                kbase_atomic_sub_pages(1, &kctx->used_pages);
                                kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
                        }
@@ -968,7 +970,7 @@ static void mmu_teardown_level(kbase_context *kctx, phys_addr_t pgd, int level,
        }
 }
 
-mali_error kbase_mmu_init(kbase_context *kctx)
+mali_error kbase_mmu_init(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(NULL != kctx);
        KBASE_DEBUG_ASSERT(NULL == kctx->mmu_teardown_pages);
@@ -976,12 +978,12 @@ mali_error kbase_mmu_init(kbase_context *kctx)
        /* Preallocate MMU depth of four pages for mmu_teardown_level to use */
        kctx->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
 
-       kctx->mem_attrs = (ASn_MEMATTR_IMPL_DEF_CACHE_POLICY <<
-                          (ASn_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
-                         (ASn_MEMATTR_FORCE_TO_CACHE_ALL    <<
-                          (ASn_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
-                         (ASn_MEMATTR_WRITE_ALLOC           <<
-                          (ASn_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+       kctx->mem_attrs = (AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
+                          (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+                         (AS_MEMATTR_FORCE_TO_CACHE_ALL    <<
+                          (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+                         (AS_MEMATTR_WRITE_ALLOC           <<
+                          (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
                          0; /* The other indices are unused for now */
 
        if (NULL == kctx->mmu_teardown_pages)
@@ -990,7 +992,7 @@ mali_error kbase_mmu_init(kbase_context *kctx)
        return MALI_ERROR_NONE;
 }
 
-void kbase_mmu_term(kbase_context *kctx)
+void kbase_mmu_term(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(NULL != kctx);
        KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
@@ -999,7 +1001,7 @@ void kbase_mmu_term(kbase_context *kctx)
        kctx->mmu_teardown_pages = NULL;
 }
 
-void kbase_mmu_free_pgd(kbase_context *kctx)
+void kbase_mmu_free_pgd(struct kbase_context *kctx)
 {
        KBASE_DEBUG_ASSERT(NULL != kctx);
        KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
@@ -1010,14 +1012,14 @@ void kbase_mmu_free_pgd(kbase_context *kctx)
 
        beenthere(kctx, "pgd %lx", (unsigned long)kctx->pgd);
        kbase_mem_allocator_free(kctx->pgd_allocator, 1, &kctx->pgd, MALI_TRUE);
-       kbase_process_page_usage_dec(kctx, 1 );
+       kbase_process_page_usage_dec(kctx, 1);
        kbase_atomic_sub_pages(1, &kctx->used_pages);
        kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
 }
 
 KBASE_EXPORT_TEST_API(kbase_mmu_free_pgd)
 
-static size_t kbasep_mmu_dump_level(kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
+static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
 {
        phys_addr_t target_pgd;
        u64 *pgd_page;
@@ -1067,7 +1069,7 @@ static size_t kbasep_mmu_dump_level(kbase_context *kctx, phys_addr_t pgd, int le
        return size;
 }
 
-void *kbase_mmu_dump(kbase_context *kctx, int nr_pages)
+void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
 {
        void *kaddr;
        size_t size_left;
@@ -1113,63 +1115,28 @@ void *kbase_mmu_dump(kbase_context *kctx, int nr_pages)
 }
 KBASE_EXPORT_TEST_API(kbase_mmu_dump)
 
-static u64 lock_region(kbase_device *kbdev, u64 pfn, size_t num_pages)
-{
-       u64 region;
-
-       /* can't lock a zero sized range */
-       KBASE_DEBUG_ASSERT(num_pages);
-
-       region = pfn << PAGE_SHIFT;
-       /*
-        * fls returns (given the ASSERT above):
-        * 32-bit: 1 .. 32
-        * 64-bit: 1 .. 32
-        *
-        * 32-bit: 10 + fls(num_pages)
-        * results in the range (11 .. 42)
-        * 64-bit: 10 + fls(num_pages)
-        * results in the range (11 .. 42)
-        */
-
-       /* gracefully handle num_pages being zero */
-       if (0 == num_pages) {
-               region |= 11;
-       } else {
-               u8 region_width;
-               region_width = 10 + fls(num_pages);
-               if (num_pages != (1ul << (region_width - 11))) {
-                       /* not pow2, so must go up to the next pow2 */
-                       region_width += 1;
-               }
-               KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
-               KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
-               region |= region_width;
-       }
-
-       return region;
-}
-
 static void bus_fault_worker(struct work_struct *data)
 {
-       kbase_as *faulting_as;
+       struct kbase_as *faulting_as;
        int as_no;
-       kbase_context *kctx;
-       kbase_device *kbdev;
-       u32 reg;
+       struct kbase_context *kctx;
+       struct kbase_device *kbdev;
+#if KBASE_GPU_RESET_EN
        mali_bool reset_status = MALI_FALSE;
+#endif /* KBASE_GPU_RESET_EN */
+
+       faulting_as = container_of(data, struct kbase_as, work_busfault);
 
-       faulting_as = container_of(data, kbase_as, work_busfault);
        as_no = faulting_as->number;
 
-       kbdev = container_of(faulting_as, kbase_device, as[as_no]);
+       kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
 
        /* Grab the context that was already refcounted in kbase_mmu_interrupt().
         * Therefore, it cannot be scheduled out of this AS until we explicitly release it
         *
         * NOTE: NULL can be returned here if we're gracefully handling a spurious interrupt */
        kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as_no);
-
+#if KBASE_GPU_RESET_EN
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
                /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
                 * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
@@ -1178,153 +1145,38 @@ static void bus_fault_worker(struct work_struct *data)
                dev_err(kbdev->dev, "GPU bus error occurred. For this GPU version we now soft-reset as part of bus error recovery\n");
                reset_status = kbase_prepare_to_reset_gpu(kbdev);
        }
-
+#endif /* KBASE_GPU_RESET_EN */
        /* NOTE: If GPU already powered off for suspend, we don't need to switch to unmapped */
        if (!kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+               struct kbase_mmu_setup *current_setup = &faulting_as->current_setup;
+
                /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
                /* AS transaction begin */
                mutex_lock(&kbdev->as[as_no].transaction_mutex);
 
-               reg = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), kctx);
-               reg &= ~3;
-               kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), reg, kctx);
-               kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_UPDATE, kctx);
-               
+               /* Set the MMU into unmapped mode */
+               current_setup->transtab &= ~(u64)MMU_TRANSTAB_ADRMODE_MASK;
+               current_setup->transtab |= AS_TRANSTAB_ADRMODE_UNMAPPED;
+
+               /* Apply the new settings */
+               kbase_mmu_hw_configure(kbdev, faulting_as, kctx);
+
                mutex_unlock(&kbdev->as[as_no].transaction_mutex);
                /* AS transaction end */
 
-               mmu_mask_reenable(kbdev, kctx, faulting_as);
+               kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+                                        KBASE_MMU_FAULT_TYPE_BUS);
                kbase_pm_context_idle(kbdev);
        }
-
+#if KBASE_GPU_RESET_EN
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
                kbase_reset_gpu(kbdev);
-
+#endif /* KBASE_GPU_RESET_EN */
        /* By this point, the fault was handled in some way, so release the ctx refcount */
        if (kctx != NULL)
                kbasep_js_runpool_release_ctx(kbdev, kctx);
 }
 
-void kbase_mmu_interrupt(kbase_device *kbdev, u32 irq_stat)
-{
-       unsigned long flags;
-       const int num_as = 16;
-       const int busfault_shift = 16;
-       const int pf_shift = 0;
-       const unsigned long mask = (1UL << num_as) - 1;
-       kbasep_js_device_data *js_devdata;
-       u32 new_mask;
-       u32 tmp;
-       u32 bf_bits = (irq_stat >> busfault_shift) & mask;      /* bus faults */
-       /* Ignore ASes with both pf and bf */
-       u32 pf_bits = ((irq_stat >> pf_shift) & mask) & ~bf_bits;       /* page faults */
-
-       KBASE_DEBUG_ASSERT(NULL != kbdev);
-
-       js_devdata = &kbdev->js_data;
-
-       /* remember current mask */
-       spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
-       new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
-       /* mask interrupts for now */
-       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
-       spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
-
-       while (bf_bits) {
-               /* the while logic ensures we have a bit set, no need to check for not-found here */
-               int as_no = ffs(bf_bits) - 1;
-               kbase_as *as = &kbdev->as[as_no];
-               kbase_context *kctx;
-
-               /* Refcount the kctx ASAP - it shouldn't disappear anyway, since Bus/Page faults
-                * _should_ only occur whilst jobs are running, and a job causing the Bus/Page fault
-                * shouldn't complete until the MMU is updated */
-               kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
-
-               /* mark as handled */
-               bf_bits &= ~(1UL << as_no);
-
-               /* find faulting address & status */
-               as->fault_addr = ((u64)kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTADDRESS_HI), kctx) << 32) |
-                                      kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTADDRESS_LO), kctx);
-               as->fault_status = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTSTATUS), kctx);
-
-               /* Clear the internal JM mask first before clearing the internal MMU mask */
-               kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 1UL << MMU_REGS_BUS_ERROR_FLAG(as_no), kctx);
-
-               if (kctx) {
-                       /* hw counters dumping in progress, signal the other thread that it failed */
-                       if ((kbdev->hwcnt.kctx == kctx) && (kbdev->hwcnt.state == KBASE_INSTR_STATE_DUMPING))
-                               kbdev->hwcnt.state = KBASE_INSTR_STATE_FAULT;
-
-                       /* Stop the kctx from submitting more jobs and cause it to be scheduled
-                        * out/rescheduled when all references to it are released */
-                       spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
-                       kbasep_js_clear_submit_allowed(js_devdata, kctx);
-                       spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
-
-                       dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n", as_no, as->fault_addr);
-               } else {
-                       dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx with no context present! " "Suprious IRQ or SW Design Error?\n", as_no, as->fault_addr);
-               }
-
-               /* remove the queued BFs from the mask */
-               new_mask &= ~(1UL << (as_no + num_as));
-
-               /* We need to switch to UNMAPPED mode - but we do this in a worker so that we can sleep */
-               KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault));
-               INIT_WORK(&as->work_busfault, bus_fault_worker);
-               queue_work(as->pf_wq, &as->work_busfault);
-       }
-
-       /*
-        * pf_bits is non-zero if we have at least one AS with a page fault and no bus fault.
-        * Handle the PFs in our worker thread.
-        */
-       while (pf_bits) {
-               /* the while logic ensures we have a bit set, no need to check for not-found here */
-               int as_no = ffs(pf_bits) - 1;
-               kbase_as *as = &kbdev->as[as_no];
-               kbase_context *kctx;
-
-               /* Refcount the kctx ASAP - it shouldn't disappear anyway, since Bus/Page faults
-                * _should_ only occur whilst jobs are running, and a job causing the Bus/Page fault
-                * shouldn't complete until the MMU is updated */
-               kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
-
-               /* mark as handled */
-               pf_bits &= ~(1UL << as_no);
-
-               /* find faulting address & status */
-               as->fault_addr = ((u64)kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTADDRESS_HI), kctx) << 32) |
-                                      kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTADDRESS_LO), kctx);
-               as->fault_status = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_FAULTSTATUS), kctx);
-
-               /* Clear the internal JM mask first before clearing the internal MMU mask */
-               kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no), kctx);
-
-               if (kctx == NULL)
-                       dev_warn(kbdev->dev, "Page fault in AS%d at 0x%016llx with no context present! " "Suprious IRQ or SW Design Error?\n", as_no, as->fault_addr);
-
-               /* remove the queued PFs from the mask */
-               new_mask &= ~((1UL << as_no) | (1UL << (as_no + num_as)));
-               kbdev->kbase_group_error++;
-               /* queue work pending for this AS */
-               KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault));
-               INIT_WORK(&as->work_pagefault, page_fault_worker);
-               queue_work(as->pf_wq, &as->work_pagefault);
-       }
-
-       /* reenable interrupts */
-       spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
-       tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
-       new_mask |= tmp;
-       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
-       spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
-}
-
-KBASE_EXPORT_TEST_API(kbase_mmu_interrupt)
-
 const char *kbase_exception_name(u32 exception_code)
 {
        const char *e;
@@ -1405,7 +1257,7 @@ const char *kbase_exception_name(u32 exception_code)
        case 0x80:
                e = "DELAYED_BUS_FAULT";
                break;
-       case 0x81:
+       case 0x88:
                e = "SHAREABILITY_FAULT";
                break;
                /* MMU exceptions */
@@ -1446,17 +1298,20 @@ const char *kbase_exception_name(u32 exception_code)
 /**
  * The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on.
  */
-static void kbase_mmu_report_fault_and_kill(kbase_context *kctx, kbase_as *as)
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as)
 {
        unsigned long flags;
-       u32 reg;
        int exception_type;
        int access_type;
        int source_id;
        int as_no;
-       kbase_device *kbdev;
-       kbasep_js_device_data *js_devdata;
+       struct kbase_device *kbdev;
+       struct kbase_mmu_setup *current_setup;
+       struct kbasep_js_device_data *js_devdata;
+
+#if KBASE_GPU_RESET_EN
        mali_bool reset_status = MALI_FALSE;
+#endif
        static const char * const access_type_names[] = { "RESERVED", "EXECUTE", "READ", "WRITE" };
 
        KBASE_DEBUG_ASSERT(as);
@@ -1506,7 +1361,7 @@ static void kbase_mmu_report_fault_and_kill(kbase_context *kctx, kbase_as *as)
        kbase_job_kill_jobs_from_context(kctx);
        /* AS transaction begin */
        mutex_lock(&as->transaction_mutex);
-
+#if KBASE_GPU_RESET_EN
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245)) {
                /* Due to H/W issue 8245 we need to reset the GPU after using UNMAPPED mode.
                 * We start the reset before switching to UNMAPPED to ensure that unrelated jobs
@@ -1515,40 +1370,55 @@ static void kbase_mmu_report_fault_and_kill(kbase_context *kctx, kbase_as *as)
                dev_err(kbdev->dev, "Unhandled page fault. For this GPU version we now soft-reset the GPU as part of page fault recovery.");
                reset_status = kbase_prepare_to_reset_gpu(kbdev);
        }
-
+#endif /* KBASE_GPU_RESET_EN */
        /* switch to UNMAPPED mode, will abort all jobs and stop any hw counter dumping */
-       reg = kbase_reg_read(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), kctx);
-       reg &= ~3;
-       kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_TRANSTAB_LO), reg, kctx);
-       kbase_reg_write(kbdev, MMU_AS_REG(as_no, ASn_COMMAND), ASn_COMMAND_UPDATE, kctx);
+       current_setup = &as->current_setup;
+
+       current_setup->transtab &= ~(u64)MMU_TRANSTAB_ADRMODE_MASK;
+       current_setup->transtab |= AS_TRANSTAB_ADRMODE_UNMAPPED;
+
+       /* Apply the new address space setting */
+       kbase_mmu_hw_configure(kbdev, as, kctx);
 
        mutex_unlock(&as->transaction_mutex);
        /* AS transaction end */
-       mmu_mask_reenable(kbdev, kctx, as);
 
+       /* Clear down the fault */
+       kbase_mmu_hw_clear_fault(kbdev, as, kctx, KBASE_MMU_FAULT_TYPE_PAGE);
+
+#if KBASE_GPU_RESET_EN
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
                kbase_reset_gpu(kbdev);
+#endif /* KBASE_GPU_RESET_EN */
 }
 
 void kbasep_as_do_poke(struct work_struct *work)
 {
-       kbase_as *as;
-       kbase_device *kbdev;
+       struct kbase_as *as;
+       struct kbase_device *kbdev;
+       struct kbase_context *kctx;
        unsigned long flags;
 
        KBASE_DEBUG_ASSERT(work);
-       as = container_of(work, kbase_as, poke_work);
-       kbdev = container_of(as, kbase_device, as[as->number]);
+       as = container_of(work, struct kbase_as, poke_work);
+       kbdev = container_of(as, struct kbase_device, as[as->number]);
        KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
 
        /* GPU power will already be active by virtue of the caller holding a JS
         * reference on the address space, and will not release it until this worker
         * has finished */
 
+       /* Further to the comment above, we know that while this function is running
+        * the AS will not be released as before the atom is released this workqueue
+        * is flushed (in kbase_as_poking_timer_release_atom)
+        */
+       kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number);
+
        /* AS transaction begin */
        mutex_lock(&as->transaction_mutex);
        /* Force a uTLB invalidate */
-       kbase_reg_write(kbdev, MMU_AS_REG(as->number, ASn_COMMAND), ASn_COMMAND_UNLOCK, NULL);
+       kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0,
+                                 AS_COMMAND_UNLOCK, 0);
        mutex_unlock(&as->transaction_mutex);
        /* AS transaction end */
 
@@ -1564,11 +1434,11 @@ void kbasep_as_do_poke(struct work_struct *work)
 
 enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
 {
-       kbase_as *as;
+       struct kbase_as *as;
        int queue_work_ret;
 
        KBASE_DEBUG_ASSERT(NULL != timer);
-       as = container_of(timer, kbase_as, poke_timer);
+       as = container_of(timer, struct kbase_as, poke_timer);
        KBASE_DEBUG_ASSERT(as->poke_state & KBASE_AS_POKE_STATE_IN_FLIGHT);
 
        queue_work_ret = queue_work(as->poke_wq, &as->poke_work);
@@ -1587,9 +1457,9 @@ enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer)
  *
  * This can be called safely from atomic context
  */
-void kbase_as_poking_timer_retain_atom(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom)
+void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
-       kbase_as *as;
+       struct kbase_as *as;
        KBASE_DEBUG_ASSERT(kbdev);
        KBASE_DEBUG_ASSERT(kctx);
        KBASE_DEBUG_ASSERT(katom);
@@ -1624,9 +1494,9 @@ void kbase_as_poking_timer_retain_atom(kbase_device *kbdev, kbase_context *kctx,
  *
  * This must \b not be called from atomic context, since it can sleep.
  */
-void kbase_as_poking_timer_release_atom(kbase_device *kbdev, kbase_context *kctx, kbase_jd_atom *katom)
+void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom)
 {
-       kbase_as *as;
+       struct kbase_as *as;
        unsigned long flags;
 
        KBASE_DEBUG_ASSERT(kbdev);
@@ -1679,3 +1549,54 @@ void kbase_as_poking_timer_release_atom(kbase_device *kbdev, kbase_context *kctx
 
        katom->poking = 0;
 }
+
+void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_as *as)
+{
+       struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+       unsigned long flags;
+
+       if (kctx == NULL) {
+               dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Suprious IRQ or SW Design Error?\n",
+                                kbase_as_has_bus_fault(as) ? "Bus error" : "Page fault",
+                                as->number, as->fault_addr);
+       }
+
+       if (kbase_as_has_bus_fault(as)) {
+               if (kctx) {
+                       /*
+                        * hw counters dumping in progress, signal the
+                        * other thread that it failed
+                        */
+                       if ((kbdev->hwcnt.kctx == kctx) &&
+                           (kbdev->hwcnt.state == KBASE_INSTR_STATE_DUMPING))
+                               kbdev->hwcnt.state = KBASE_INSTR_STATE_FAULT;
+
+                       /*
+                        * Stop the kctx from submitting more jobs and cause it
+                        * to be scheduled out/rescheduled when all references
+                        * to it are released
+                        */
+                       spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+                       kbasep_js_clear_submit_allowed(js_devdata, kctx);
+                       spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
+                                              flags);
+
+                       dev_warn(kbdev->dev, "Bus error in AS%d at 0x%016llx\n",
+                                        as->number, as->fault_addr);
+               }
+
+               /*
+                * We need to switch to UNMAPPED mode - but we do this in a
+                * worker so that we can sleep
+                */
+               kbdev->kbase_group_error++;
+               KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault));
+               INIT_WORK(&as->work_busfault, bus_fault_worker);
+               queue_work(as->pf_wq, &as->work_busfault);
+       } else {
+               kbdev->kbase_group_error++;
+               KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault));
+               INIT_WORK(&as->work_pagefault, page_fault_worker);
+               queue_work(as->pf_wq, &as->work_pagefault);
+       }
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h
new file mode 100755 (executable)
index 0000000..d210e12
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file
+ * Interface file for accessing MMU hardware functionality
+ */
+
+/**
+ * @page mali_kbase_mmu_hw_page MMU hardware interface
+ *
+ * @section mali_kbase_mmu_hw_intro_sec Introduction
+ * This module provides an abstraction for accessing the functionality provided
+ * by the midgard MMU and thus allows all MMU HW access to be contained within
+ * one common place and allows for different backends (implementations) to
+ * be provided.
+ */
+
+#ifndef _MALI_KBASE_MMU_HW_H_
+#define _MALI_KBASE_MMU_HW_H_
+
+/* Forward declarations */
+struct kbase_device;
+struct kbase_as;
+struct kbase_context;
+
+/**
+ * @addtogroup base_kbase_api
+ * @{
+ */
+
+/**
+ * @addtogroup mali_kbase_mmu_hw  MMU access APIs
+ * @{
+ */
+
+/** @brief MMU fault type descriptor.
+ */
+enum kbase_mmu_fault_type {
+       KBASE_MMU_FAULT_TYPE_UNKNOWN = 0,
+       KBASE_MMU_FAULT_TYPE_PAGE,
+       KBASE_MMU_FAULT_TYPE_BUS
+};
+
+/** @brief Configure an address space for use.
+ *
+ * Configure the MMU using the address space details setup in the
+ * @ref kbase_context structure.
+ *
+ * @param[in]  kbdev          kbase device to configure.
+ * @param[in]  as             address space to configure.
+ * @param[in]  kctx           kbase context to configure.
+ */
+void kbase_mmu_hw_configure(struct kbase_device *kbdev,
+               struct kbase_as *as, struct kbase_context *kctx);
+
+/** @brief Issue an operation to the MMU.
+ *
+ * Issue an operation (MMU invalidate, MMU flush, etc) on the address space that
+ * is associated with the provided @ref kbase_context over the specified range
+ *
+ * @param[in]  kbdev         kbase device to issue the MMU operation on.
+ * @param[in]  as            address space to issue the MMU operation on.
+ * @param[in]  kctx          kbase context to issue the MMU operation on.
+ * @param[in]  vpfn          MMU Virtual Page Frame Number to start the
+ *                           operation on.
+ * @param[in]  nr            Number of pages to work on.
+ * @param[in]  type          Operation type (written to ASn_COMMAND).
+ * @param[in]  handling_irq  Is this operation being called during the handling
+ *                           of an interrupt?
+ *
+ * @return Zero if the operation was successful, non-zero otherwise.
+ */
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+               struct kbase_context *kctx, u64 vpfn, u32 nr, u32 type,
+               unsigned int handling_irq);
+
+/** @brief Clear a fault that has been previously reported by the MMU.
+ *
+ * Clear a bus error or page fault that has been reported by the MMU.
+ *
+ * @param[in]  kbdev         kbase device to  clear the fault from.
+ * @param[in]  as            address space to  clear the fault from.
+ * @param[in]  kctx          kbase context to clear the fault from.
+ * @param[in]  type          The type of fault that needs to be cleared.
+ */
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+               struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+
+/** @} *//* end group mali_kbase_mmu_hw */
+/** @} *//* end group base_kbase_api */
+
+#endif /* _MALI_KBASE_MMU_HW_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_hw_direct.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw_direct.c
new file mode 100755 (executable)
index 0000000..acef59d
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/bitops.h>
+
+#include <mali_kbase.h>
+#include <mali_kbase_mem.h>
+#include <mali_kbase_mmu_hw.h>
+#include <mali_kbase_mmu_hw_direct.h>
+
+#if KBASE_MMU_HW_BACKEND
+
+static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
+               u32 num_pages)
+{
+       u64 region;
+
+       /* can't lock a zero sized range */
+       KBASE_DEBUG_ASSERT(num_pages);
+
+       region = pfn << PAGE_SHIFT;
+       /*
+        * fls returns (given the ASSERT above):
+        * 1 .. 32
+        *
+        * 10 + fls(num_pages)
+        * results in the range (11 .. 42)
+        */
+
+       /* gracefully handle num_pages being zero */
+       if (0 == num_pages) {
+               region |= 11;
+       } else {
+               u8 region_width;
+
+               region_width = 10 + fls(num_pages);
+               if (num_pages != (1ul << (region_width - 11))) {
+                       /* not pow2, so must go up to the next pow2 */
+                       region_width += 1;
+               }
+               KBASE_DEBUG_ASSERT(region_width <= KBASE_LOCK_REGION_MAX_SIZE);
+               KBASE_DEBUG_ASSERT(region_width >= KBASE_LOCK_REGION_MIN_SIZE);
+               region |= region_width;
+       }
+
+       return region;
+}
+
+static int wait_ready(struct kbase_device *kbdev,
+               unsigned int as_nr, struct kbase_context *kctx)
+{
+       unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
+
+       /* Wait for the MMU status to indicate there is no active command. */
+       while (--max_loops && kbase_reg_read(kbdev,
+                       MMU_AS_REG(as_nr, AS_STATUS),
+                       kctx) & AS_STATUS_AS_ACTIVE) {
+               ;
+       }
+
+       if (max_loops == 0) {
+               dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
+               struct kbase_context *kctx)
+{
+       int status;
+
+       /* write AS_COMMAND when MMU is ready to accept another command */
+       status = wait_ready(kbdev, as_nr, kctx);
+       if (status == 0)
+               kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd, kctx);
+
+       return status;
+}
+
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
+{
+       const int num_as = 16;
+       const int busfault_shift = MMU_REGS_PAGE_FAULT_FLAGS;
+       const int pf_shift = 0;
+       const unsigned long as_bit_mask = (1UL << num_as) - 1;
+       unsigned long flags;
+       u32 new_mask;
+       u32 tmp;
+
+       /* bus faults */
+       u32 bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
+       /* page faults (note: Ignore ASes with both pf and bf) */
+       u32 pf_bits = ((irq_stat >> pf_shift) & as_bit_mask) & ~bf_bits;
+
+       KBASE_DEBUG_ASSERT(NULL != kbdev);
+
+       /* remember current mask */
+       spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+       new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
+       /* mask interrupts for now */
+       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
+       spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+
+       while (bf_bits | pf_bits) {
+               struct kbase_as *as;
+               int as_no;
+               struct kbase_context *kctx;
+
+               /*
+                * the while logic ensures we have a bit set, no need to check
+                * for not-found here
+                */
+               as_no = ffs(bf_bits | pf_bits) - 1;
+               as = &kbdev->as[as_no];
+
+               /*
+                * Refcount the kctx ASAP - it shouldn't disappear anyway, since
+                * Bus/Page faults _should_ only occur whilst jobs are running,
+                * and a job causing the Bus/Page fault shouldn't complete until
+                * the MMU is updated
+                */
+               kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
+
+               /* find faulting address */
+               as->fault_addr = kbase_reg_read(kbdev,
+                                               MMU_AS_REG(as_no, AS_FAULTADDRESS_HI),
+                                               kctx);
+               as->fault_addr <<= 32;
+               as->fault_addr |= kbase_reg_read(kbdev,
+                                               MMU_AS_REG(as_no, AS_FAULTADDRESS_LO),
+                                               kctx);
+
+               /* record the fault status */
+               as->fault_status = kbase_reg_read(kbdev,
+                                                 MMU_AS_REG(as_no, AS_FAULTSTATUS),
+                                                 kctx);
+
+               /* find the fault type */
+               as->fault_type = (bf_bits & (1 << as_no)) ?
+                                 KBASE_MMU_FAULT_TYPE_BUS : KBASE_MMU_FAULT_TYPE_PAGE;
+
+
+               if (kbase_as_has_bus_fault(as)) {
+                       /*
+                        * Clear the internal JM mask first before clearing the
+                        * internal MMU mask
+                        *
+                        * Note:
+                        * Always clear the page fault just in case there was
+                        * one at the same time as the bus error (bus errors are
+                        * always processed in preference to pagefaults should
+                        * both happen at the same time).
+                        */
+                       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR),
+                                       (1UL << MMU_REGS_BUS_ERROR_FLAG(as_no)) |
+                                       (1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no)), kctx);
+
+                       /* mark as handled (note: bf_bits is already shifted) */
+                       bf_bits &= ~(1UL << (as_no));
+
+                       /* remove the queued BFs (and PFs) from the mask */
+                       new_mask &= ~((1UL << MMU_REGS_BUS_ERROR_FLAG(as_no)) |
+                                     (1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no)));
+               } else {
+                       /*
+                        * Clear the internal JM mask first before clearing the
+                        * internal MMU mask
+                        */
+                       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR),
+                                       1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no),
+                                       kctx);
+
+                       /* mark as handled */
+                       pf_bits &= ~(1UL << as_no);
+
+                       /* remove the queued PFs from the mask */
+                       new_mask &= ~(1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no));
+               }
+
+               /* Process the interrupt for this address space */
+               kbase_mmu_interrupt_process(kbdev, kctx, as);
+       }
+
+       /* reenable interrupts */
+       spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+       tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
+       new_mask |= tmp;
+       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
+       spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+
+void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
+               struct kbase_context *kctx)
+{
+       struct kbase_mmu_setup *current_setup = &as->current_setup;
+
+       kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
+                       current_setup->transtab & 0xFFFFFFFFUL, kctx);
+       kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
+                       (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
+
+       kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
+                       current_setup->memattr & 0xFFFFFFFFUL, kctx);
+       kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
+                       (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
+       write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
+}
+
+int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
+               struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
+               unsigned int handling_irq)
+{
+       int ret;
+
+       if (op == AS_COMMAND_UNLOCK) {
+               /* Unlock doesn't require a lock first */
+               ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+       } else {
+               u64 lock_addr = lock_region(kbdev, vpfn, nr);
+
+               /* Lock the region that needs to be updated */
+               kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
+                               lock_addr & 0xFFFFFFFFUL, kctx);
+               kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
+                               (lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
+               write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
+
+               if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3285) &&
+                               handling_irq) {
+                       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR),
+                                       (1UL << as->number), NULL);
+                       write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
+               }
+
+               /* Run the MMU operation */
+               write_cmd(kbdev, as->number, op, kctx);
+
+               /* Wait for the flush to complete */
+               ret = wait_ready(kbdev, as->number, kctx);
+
+               if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
+                       /* Issue an UNLOCK command to ensure that valid page
+                          tables are re-read by the GPU after an update.
+                          Note that, the FLUSH command should perform all the
+                          actions necessary, however the bus logs show that if
+                          multiple page faults occur within an 8 page region
+                          the MMU does not always re-read the updated page
+                          table entries for later faults or is only partially
+                          read, it subsequently raises the page fault IRQ for
+                          the same addresses, the unlock ensures that the MMU
+                          cache is flushed, so updates can be re-read.  As the
+                          region is now unlocked we need to issue 2 UNLOCK
+                          commands in order to flush the MMU/uTLB,
+                          see PRLAM-8812.
+                        */
+                       write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+                       write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+               }
+       }
+
+       return ret;
+}
+
+void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
+               struct kbase_context *kctx, enum kbase_mmu_fault_type type)
+{
+       unsigned long flags;
+       u32 mask;
+
+       spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
+       mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx);
+
+       mask |= (1UL << MMU_REGS_PAGE_FAULT_FLAG(as->number));
+       if (type == KBASE_MMU_FAULT_TYPE_BUS)
+               mask |= (1UL << MMU_REGS_BUS_ERROR_FLAG(as->number));
+
+       kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), mask, kctx);
+       spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
+}
+#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_hw_direct.h b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw_direct.h
new file mode 100755 (executable)
index 0000000..9737dbb
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file
+ * Interface file for the direct implementation for MMU hardware access
+ */
+
+/**
+ * @page mali_kbase_mmu_hw_direct_page Direct MMU hardware interface
+ *
+ * @section mali_kbase_mmu_hw_direct_intro_sec Introduction
+ * This module provides the interface(s) that are required by the direct
+ * register access implementation of the MMU hardware interface
+ * @ref mali_kbase_mmu_hw_page .
+ */
+
+#ifndef _MALI_KBASE_MMU_HW_DIRECT_H_
+#define _MALI_KBASE_MMU_HW_DIRECT_H_
+
+#include <mali_kbase_defs.h>
+
+/**
+ * @addtogroup mali_kbase_mmu_hw
+ * @{
+ */
+
+/**
+ * @addtogroup mali_kbase_mmu_hw_direct Direct register access to MMU
+ * @{
+ */
+
+/** @brief Process an MMU interrupt.
+ *
+ * Process the MMU interrupt that was reported by the @ref kbase_device.
+ *
+ * @param[in]  kbdev          kbase context to clear the fault from.
+ * @param[in]  irq_stat       Value of the MMU_IRQ_STATUS register
+ */
+void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
+
+/** @} *//* end group mali_kbase_mmu_hw_direct */
+/** @} *//* end group mali_kbase_mmu_hw */
+
+#endif /* _MALI_KBASE_MMU_HW_DIRECT_H_ */
index b33f0b3bb4d12e4092b1f61b71f326ed5734d73a..c36b9358949722c430eedc020839f3d9483b6ea4 100755 (executable)
@@ -41,16 +41,16 @@ static struct platform_device *mali_device;
 
 #ifndef CONFIG_OF
 /**
- * @brief Convert data in kbase_io_resources struct to Linux-specific resources
+ * @brief Convert data in struct kbase_io_resources struct to Linux-specific resources
  *
- * Function converts data in kbase_io_resources struct to an array of Linux resource structures. Note that function
+ * Function converts data in struct kbase_io_resources struct to an array of Linux resource structures. Note that function
  * assumes that size of linux_resource array is at least PLATFORM_CONFIG_RESOURCE_COUNT.
  * Resources are put in fixed order: I/O memory region, job IRQ, MMU IRQ, GPU IRQ.
  *
  * @param[in]  io_resource      Input IO resource data
  * @param[out] linux_resources  Pointer to output array of Linux resource structures
  */
-static void kbasep_config_parse_io_resources(const kbase_io_resources *io_resources, struct resource *const linux_resources)
+static void kbasep_config_parse_io_resources(const struct kbase_io_resources *io_resources, struct resource *const linux_resources)
 {
        if (!io_resources || !linux_resources) {
                pr_err("%s: couldn't find proper resources\n", __func__);
@@ -60,23 +60,25 @@ static void kbasep_config_parse_io_resources(const kbase_io_resources *io_resour
        memset(linux_resources, 0, PLATFORM_CONFIG_RESOURCE_COUNT * sizeof(struct resource));
 
        linux_resources[0].start = io_resources->io_memory_region.start;
-       linux_resources[0].end = io_resources->io_memory_region.end;
+       linux_resources[0].end   = io_resources->io_memory_region.end;
        linux_resources[0].flags = IORESOURCE_MEM;
-
-       linux_resources[1].start = linux_resources[1].end = io_resources->job_irq_number;
+       linux_resources[1].start = io_resources->job_irq_number;
+       linux_resources[1].end   = io_resources->job_irq_number;
        linux_resources[1].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
 
-       linux_resources[2].start = linux_resources[2].end = io_resources->mmu_irq_number;
+       linux_resources[2].start = io_resources->mmu_irq_number;
+       linux_resources[2].end   = io_resources->mmu_irq_number;
        linux_resources[2].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
 
-       linux_resources[3].start = linux_resources[3].end = io_resources->gpu_irq_number;
+       linux_resources[3].start = io_resources->gpu_irq_number;
+       linux_resources[3].end   = io_resources->gpu_irq_number;
        linux_resources[3].flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL;
 }
 #endif /* CONFIG_OF */
 
 int kbase_platform_fake_register(void)
 {
-       kbase_platform_config *config;
+       struct kbase_platform_config *config;
        int attribute_count;
 #ifndef CONFIG_OF
        struct resource resources[PLATFORM_CONFIG_RESOURCE_COUNT];
@@ -84,8 +86,7 @@ int kbase_platform_fake_register(void)
        int err;
 
        config = kbase_get_platform_config(); /* declared in midgard/mali_kbase_config.h but defined in platform folder */
-       if (config == NULL)
-       {
+       if (config == NULL) {
                pr_err("%s: couldn't get platform config\n", __func__);
                return -ENODEV;
        }
@@ -128,14 +129,13 @@ int kbase_platform_fake_register(void)
 
        return 0;
 }
+EXPORT_SYMBOL(kbase_platform_fake_register);
 
 void kbase_platform_fake_unregister(void)
 {
        if (mali_device)
                platform_device_unregister(mali_device);
 }
-
-EXPORT_SYMBOL(kbase_platform_fake_register);
 EXPORT_SYMBOL(kbase_platform_fake_unregister);
 
 #endif /* CONFIG_MALI_PLATFORM_FAKE */
index 3fc671017b0e981f79bb38ee9141079631985d6a..08d96936ca3dd3a5111f80cfd59bc04bc84f3d8b 100755 (executable)
 
 #include <mali_kbase_pm.h>
 
-void kbase_pm_register_access_enable(kbase_device *kbdev)
+#if KBASE_PM_EN
+
+void kbase_pm_register_access_enable(struct kbase_device *kbdev)
 {
-       kbase_pm_callback_conf *callbacks;
+       struct kbase_pm_callback_conf *callbacks;
 
-       callbacks = (kbase_pm_callback_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
+       callbacks = (struct kbase_pm_callback_conf *)kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
 
        if (callbacks)
                callbacks->power_on_callback(kbdev);
 }
 
-void kbase_pm_register_access_disable(kbase_device *kbdev)
+void kbase_pm_register_access_disable(struct kbase_device *kbdev)
 {
-       kbase_pm_callback_conf *callbacks;
+       struct kbase_pm_callback_conf *callbacks;
 
-       callbacks = (kbase_pm_callback_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
+       callbacks = (struct kbase_pm_callback_conf *)kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
 
        if (callbacks)
                callbacks->power_off_callback(kbdev);
 }
 
-mali_error kbase_pm_init(kbase_device *kbdev)
+mali_error kbase_pm_init(struct kbase_device *kbdev)
 {
        mali_error ret = MALI_ERROR_NONE;
-       kbase_pm_callback_conf *callbacks;
+       struct kbase_pm_callback_conf *callbacks;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -64,7 +66,7 @@ mali_error kbase_pm_init(kbase_device *kbdev)
        kbdev->pm.gpu_in_desired_state = MALI_TRUE;
        init_waitqueue_head(&kbdev->pm.gpu_in_desired_state_wait);
 
-       callbacks = (kbase_pm_callback_conf *) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
+       callbacks = (struct kbase_pm_callback_conf *)kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS);
        if (callbacks) {
                kbdev->pm.callback_power_on = callbacks->power_on_callback;
                kbdev->pm.callback_power_off = callbacks->power_off_callback;
@@ -124,7 +126,7 @@ workq_fail:
 
 KBASE_EXPORT_TEST_API(kbase_pm_init)
 
-void kbase_pm_do_poweron(kbase_device *kbdev, mali_bool is_resume)
+void kbase_pm_do_poweron(struct kbase_device *kbdev, mali_bool is_resume)
 {
        lockdep_assert_held(&kbdev->pm.lock);
 
@@ -141,7 +143,7 @@ void kbase_pm_do_poweron(kbase_device *kbdev, mali_bool is_resume)
         * will wait for that state to be reached anyway */
 }
 
-void kbase_pm_do_poweroff(kbase_device *kbdev, mali_bool is_suspend)
+void kbase_pm_do_poweroff(struct kbase_device *kbdev, mali_bool is_suspend)
 {
        unsigned long flags;
        mali_bool cores_are_available;
@@ -153,7 +155,7 @@ void kbase_pm_do_poweroff(kbase_device *kbdev, mali_bool is_suspend)
        /* Force all cores off */
        kbdev->pm.desired_shader_state = 0;
 
-       /* Force all cores to be unavailable, in the situation where 
+       /* Force all cores to be unavailable, in the situation where
         * transitions are in progress for some cores but not others,
         * and kbase_pm_check_transitions_nolock can not immediately
         * power off the cores */
@@ -180,7 +182,7 @@ void kbase_pm_do_poweroff(kbase_device *kbdev, mali_bool is_suspend)
        kbase_pm_clock_off(kbdev, is_suspend);
 }
 
-mali_error kbase_pm_powerup(kbase_device *kbdev)
+mali_error kbase_pm_powerup(struct kbase_device *kbdev)
 {
        unsigned long flags;
        mali_error ret;
@@ -193,7 +195,7 @@ mali_error kbase_pm_powerup(kbase_device *kbdev)
        KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
 
        /* Power up the GPU, don't enable IRQs as we are not ready to receive them. */
-       ret = kbase_pm_init_hw(kbdev, MALI_FALSE );
+       ret = kbase_pm_init_hw(kbdev, MALI_FALSE);
        if (ret != MALI_ERROR_NONE) {
                mutex_unlock(&kbdev->pm.lock);
                return ret;
@@ -209,7 +211,6 @@ mali_error kbase_pm_powerup(kbase_device *kbdev)
        spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
        /* Ensure cycle counter is off */
        kbdev->pm.gpu_cycle_counter_requests = 0;
-       kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
        spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
 
        /* We are ready to receive IRQ's now as power policy is set up, so enable them now. */
@@ -232,13 +233,13 @@ mali_error kbase_pm_powerup(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbase_pm_powerup)
 
-void kbase_pm_context_active(kbase_device *kbdev)
+void kbase_pm_context_active(struct kbase_device *kbdev)
 {
        (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
 }
 
-int kbase_pm_context_active_handle_suspend(kbase_device *kbdev, kbase_pm_suspend_handler suspend_handler)
-{      
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
+{
        int c;
        int old_count;
 
@@ -253,11 +254,10 @@ int kbase_pm_context_active_handle_suspend(kbase_device *kbdev, kbase_pm_suspend
                kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
 
        mutex_lock(&kbdev->pm.lock);
-       if (kbase_pm_is_suspending(kbdev))
-       {
+       if (kbase_pm_is_suspending(kbdev)) {
                switch (suspend_handler) {
                case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
-                       if (kbdev->pm.active_count != 0 )
+                       if (kbdev->pm.active_count != 0)
                                break;
                        /* FALLTHROUGH */
                case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
@@ -269,11 +269,12 @@ int kbase_pm_context_active_handle_suspend(kbase_device *kbdev, kbase_pm_suspend
                case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
                        /* FALLTHROUGH */
                default:
-                       KBASE_DEBUG_ASSERT_MSG(MALI_FALSE,"unreachable");
+                       KBASE_DEBUG_ASSERT_MSG(MALI_FALSE, "unreachable");
                        break;
                }
        }
        c = ++kbdev->pm.active_count;
+       KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
 
        KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
 
@@ -296,7 +297,7 @@ int kbase_pm_context_active_handle_suspend(kbase_device *kbdev, kbase_pm_suspend
 
 KBASE_EXPORT_TEST_API(kbase_pm_context_active)
 
-void kbase_pm_context_idle(kbase_device *kbdev)
+void kbase_pm_context_idle(struct kbase_device *kbdev)
 {
        int c;
        int old_count;
@@ -314,6 +315,7 @@ void kbase_pm_context_idle(kbase_device *kbdev)
        mutex_lock(&kbdev->pm.lock);
 
        c = --kbdev->pm.active_count;
+       KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
 
        KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
 
@@ -340,7 +342,7 @@ void kbase_pm_context_idle(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbase_pm_context_idle)
 
-void kbase_pm_halt(kbase_device *kbdev)
+void kbase_pm_halt(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -352,7 +354,7 @@ void kbase_pm_halt(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbase_pm_halt)
 
-void kbase_pm_term(kbase_device *kbdev)
+void kbase_pm_term(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
@@ -371,6 +373,7 @@ KBASE_EXPORT_TEST_API(kbase_pm_term)
 void kbase_pm_suspend(struct kbase_device *kbdev)
 {
        int nr_keep_gpu_powered_ctxs;
+
        KBASE_DEBUG_ASSERT(kbdev);
 
        mutex_lock(&kbdev->pm.lock);
@@ -391,8 +394,8 @@ void kbase_pm_suspend(struct kbase_device *kbdev)
 
        /* Cancel the keep_gpu_powered calls */
        for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
-                nr_keep_gpu_powered_ctxs > 0 ;
-                --nr_keep_gpu_powered_ctxs ) {
+                nr_keep_gpu_powered_ctxs > 0;
+                --nr_keep_gpu_powered_ctxs) {
                kbase_pm_context_idle(kbdev);
        }
 
@@ -428,8 +431,8 @@ void kbase_pm_resume(struct kbase_device *kbdev)
 
        /* Restore the keep_gpu_powered calls */
        for (nr_keep_gpu_powered_ctxs = atomic_read(&kbdev->keep_gpu_powered_count);
-                nr_keep_gpu_powered_ctxs > 0 ;
-                --nr_keep_gpu_powered_ctxs ) {
+                nr_keep_gpu_powered_ctxs > 0;
+                --nr_keep_gpu_powered_ctxs) {
                kbase_pm_context_active(kbdev);
        }
 
@@ -448,3 +451,4 @@ void kbase_pm_resume(struct kbase_device *kbdev)
         * need it and the policy doesn't want it on */
        kbase_pm_context_idle(kbdev);
 }
+#endif /* KBASE_PM_EN */
index 4647dfe9cebb8918b56942e23911ecfd6e8a693e..86e23ea35f6c65bd4f9304dbe93b99e82c42b640 100755 (executable)
@@ -35,14 +35,14 @@ struct kbase_device;
 #include "mali_kbase_pm_policy.h"
 
 #include "mali_kbase_pm_ca_fixed.h"
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
 #include "mali_kbase_pm_ca_random.h"
 #endif
 
 #include "mali_kbase_pm_always_on.h"
 #include "mali_kbase_pm_coarse_demand.h"
 #include "mali_kbase_pm_demand.h"
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
 #include "mali_kbase_pm_demand_always_powered.h"
 #include "mali_kbase_pm_fast_start.h"
 #endif
@@ -59,12 +59,12 @@ struct kbase_device;
  * a manner that allows @ref core_type_to_reg function to be simpler and more
  * efficient.
  */
-typedef enum kbase_pm_core_type {
+enum kbase_pm_core_type {
        KBASE_PM_CORE_L3 = L3_PRESENT_LO,           /**< The L3 cache */
        KBASE_PM_CORE_L2 = L2_PRESENT_LO,           /**< The L2 cache */
        KBASE_PM_CORE_SHADER = SHADER_PRESENT_LO,   /**< Shader cores */
        KBASE_PM_CORE_TILER = TILER_PRESENT_LO      /**< Tiler cores */
-} kbase_pm_core_type;
+};
 
 /** Initialize the power management framework.
  *
@@ -107,7 +107,7 @@ void kbase_pm_term(struct kbase_device *kbdev);
 /** Metrics data collected for use by the power management framework.
  *
  */
-typedef struct kbasep_pm_metrics_data {
+struct kbasep_pm_metrics_data {
        int vsync_hit;
        int utilisation;
        int util_gl_share;
@@ -123,47 +123,49 @@ typedef struct kbasep_pm_metrics_data {
 
        spinlock_t lock;
 
+#ifdef CONFIG_MALI_MIDGARD_DVFS
        struct hrtimer timer;
        mali_bool timer_active;
+#endif
 
        void *platform_data;
        struct kbase_device *kbdev;
-} kbasep_pm_metrics_data;
+};
 
 /** Actions for DVFS.
  *
  * kbase_pm_get_dvfs_action will return one of these enumerated values to
  * describe the action that the DVFS system should take.
  */
-typedef enum kbase_pm_dvfs_action {
+enum kbase_pm_dvfs_action {
        KBASE_PM_DVFS_NOP,          /**< No change in clock frequency is requested */
        KBASE_PM_DVFS_CLOCK_UP,     /**< The clock frequency should be increased if possible */
        KBASE_PM_DVFS_CLOCK_DOWN    /**< The clock frequency should be decreased if possible */
-} kbase_pm_dvfs_action;
-
-typedef union kbase_pm_policy_data {
-       kbasep_pm_policy_always_on always_on;
-       kbasep_pm_policy_coarse_demand coarse_demand;
-       kbasep_pm_policy_demand demand;
-#if MALI_CUSTOMER_RELEASE == 0         
-       kbasep_pm_policy_demand_always_powered demand_always_powered;
-       kbasep_pm_policy_fast_start fast_start;
+};
+
+union kbase_pm_policy_data {
+       struct kbasep_pm_policy_always_on always_on;
+       struct kbasep_pm_policy_coarse_demand coarse_demand;
+       struct kbasep_pm_policy_demand demand;
+#if !MALI_CUSTOMER_RELEASE
+       struct kbasep_pm_policy_demand_always_powered demand_always_powered;
+       struct kbasep_pm_policy_fast_start fast_start;
 #endif
-} kbase_pm_policy_data;
+};
 
-typedef union kbase_pm_ca_policy_data {
-       kbasep_pm_ca_policy_fixed fixed;
-#if MALI_CUSTOMER_RELEASE == 0
-       kbasep_pm_ca_policy_random random;
+union kbase_pm_ca_policy_data {
+       struct kbasep_pm_ca_policy_fixed fixed;
+#if !MALI_CUSTOMER_RELEASE
+       struct kbasep_pm_ca_policy_random random;
 #endif
-} kbase_pm_ca_policy_data;
+};
 
 /** Data stored per device for power management.
  *
  * This structure contains data for the power management framework. There is one instance of this structure per device
  * in the system.
  */
-typedef struct kbase_pm_device_data {
+struct kbase_pm_device_data {
        /** The lock protecting Power Management structures accessed
         * outside of IRQ.
         *
@@ -180,7 +182,7 @@ typedef struct kbase_pm_device_data {
         * kbase_pm_ca_set_policy() will re-issue the policy functions that would've
         * been done under IRQ.
         */
-       const kbase_pm_ca_policy *ca_current_policy;
+       const struct kbase_pm_ca_policy *ca_current_policy;
 
        /** The policy that is currently actively controlling the power state.
         *
@@ -191,13 +193,13 @@ typedef struct kbase_pm_device_data {
         * kbase_pm_set_policy() will re-issue the policy functions that would've
         * been done under IRQ.
         */
-       const kbase_pm_policy *pm_current_policy;
+       const struct kbase_pm_policy *pm_current_policy;
 
        /** Private data for current CA policy */
-       kbase_pm_ca_policy_data ca_policy_data;
+       union kbase_pm_ca_policy_data ca_policy_data;
 
        /** Private data for current PM policy */
-       kbase_pm_policy_data pm_policy_data;
+       union kbase_pm_policy_data pm_policy_data;
 
        /** Flag indicating when core availability policy is transitioning cores.
         * The core availability policy must set this when a change in core availability
@@ -290,7 +292,7 @@ typedef struct kbase_pm_device_data {
 
        /** Structure to hold metrics for the GPU */
 
-       kbasep_pm_metrics_data metrics;
+       struct kbasep_pm_metrics_data metrics;
 
        /** Set to the number of poweroff timer ticks until the GPU is powered off */
        int gpu_poweroff_pending;
@@ -324,27 +326,27 @@ typedef struct kbase_pm_device_data {
         *
         * @return 1 if GPU state was lost, 0 otherwise
         */
-       int (*callback_power_on) (struct kbase_device *kbdev);
+       int (*callback_power_on)(struct kbase_device *kbdev);
 
        /** Callback when the GPU may be turned off. See @ref kbase_pm_callback_conf
         *
         * @param kbdev         The kbase device
         */
-       void (*callback_power_off) (struct kbase_device *kbdev);
+       void (*callback_power_off)(struct kbase_device *kbdev);
 
        /** Callback when a suspend occurs and the GPU needs to be turned off.
         *  See @ref kbase_pm_callback_conf
         *
         * @param kbdev         The kbase device
         */
-       void (*callback_power_suspend) (struct kbase_device *kbdev);
+       void (*callback_power_suspend)(struct kbase_device *kbdev);
 
        /** Callback when a resume occurs and the GPU needs to be turned on.
         *  See @ref kbase_pm_callback_conf
         *
         * @param kbdev         The kbase device
         */
-       void (*callback_power_resume) (struct kbase_device *kbdev);
+       void (*callback_power_resume)(struct kbase_device *kbdev);
 
        /** Callback for initializing the runtime power management.
         *
@@ -352,13 +354,13 @@ typedef struct kbase_pm_device_data {
         *
         * @return MALI_ERROR_NONE on success, else error code
         */
-        mali_error(*callback_power_runtime_init) (struct kbase_device *kbdev);
+        mali_error (*callback_power_runtime_init)(struct kbase_device *kbdev);
 
        /** Callback for terminating the runtime power management.
         *
         * @param kbdev         The kbase device
         */
-       void (*callback_power_runtime_term) (struct kbase_device *kbdev);
+       void (*callback_power_runtime_term)(struct kbase_device *kbdev);
 
        /** Callback when the GPU needs to be turned on. See @ref kbase_pm_callback_conf
         *
@@ -366,15 +368,15 @@ typedef struct kbase_pm_device_data {
         *
         * @return 1 if GPU state was lost, 0 otherwise
         */
-       int (*callback_power_runtime_on) (struct kbase_device *kbdev);
+       int (*callback_power_runtime_on)(struct kbase_device *kbdev);
 
        /** Callback when the GPU may be turned off. See @ref kbase_pm_callback_conf
         *
         * @param kbdev         The kbase device
         */
-       void (*callback_power_runtime_off) (struct kbase_device *kbdev);
+       void (*callback_power_runtime_off)(struct kbase_device *kbdev);
 
-} kbase_pm_device_data;
+};
 
 /** The GPU is idle.
  *
@@ -398,11 +400,11 @@ void kbase_pm_dev_activate(struct kbase_device *kbdev);
  * present in the GPU device and also a count of the number of cores.
  *
  * @param kbdev     The kbase device structure for the device (must be a valid pointer)
- * @param type      The type of core (see the @ref kbase_pm_core_type enumeration)
+ * @param type      The type of core (see the @ref enum kbase_pm_core_type enumeration)
  *
  * @return          The bit mask of cores present
  */
-u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, kbase_pm_core_type type);
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type);
 
 /** Get details of the cores that are currently active in the device.
  *
@@ -410,11 +412,11 @@ u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, kbase_pm_core_type ty
  * are actively processing work (i.e. turned on *and* busy).
  *
  * @param kbdev     The kbase device structure for the device (must be a valid pointer)
- * @param type      The type of core (see the @ref kbase_pm_core_type enumeration)
+ * @param type      The type of core (see the @ref enum kbase_pm_core_type enumeration)
  *
  * @return          The bit mask of active cores
  */
-u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, kbase_pm_core_type type);
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type);
 
 /** Get details of the cores that are currently transitioning between power states.
  *
@@ -422,11 +424,11 @@ u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, kbase_pm_core_type typ
  * are currently transitioning between power states.
  *
  * @param kbdev     The kbase device structure for the device (must be a valid pointer)
- * @param type      The type of core (see the @ref kbase_pm_core_type enumeration)
+ * @param type      The type of core (see the @ref enum kbase_pm_core_type enumeration)
  *
  * @return          The bit mask of transitioning cores
  */
-u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, kbase_pm_core_type type);
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type);
 
 /** Get details of the cores that are currently powered and ready for jobs.
  *
@@ -434,11 +436,11 @@ u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, kbase_pm_core_type type
  * are powered and ready for jobs (they may or may not be currently executing jobs).
  *
  * @param kbdev     The kbase device structure for the device (must be a valid pointer)
- * @param type      The type of core (see the @ref kbase_pm_core_type enumeration)
+ * @param type      The type of core (see the @ref enum kbase_pm_core_type enumeration)
  *
  * @return          The bit mask of ready cores
  */
-u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, kbase_pm_core_type type);
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type);
 
 /** Turn the clock for the device on, and enable device interrupts.
  *
@@ -492,7 +494,7 @@ void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
  *
  * @return MALI_ERROR_NONE if the device is supported and successfully reset.
  */
-mali_error kbase_pm_init_hw(struct kbase_device *kbdev, mali_bool enable_irqs );
+mali_error kbase_pm_init_hw(struct kbase_device *kbdev, mali_bool enable_irqs);
 
 /** The GPU has been reset successfully.
  *
@@ -523,7 +525,7 @@ void kbase_pm_context_active(struct kbase_device *kbdev);
 
 
 /** Handler codes for doing kbase_pm_context_active_handle_suspend() */
-typedef enum {
+enum kbase_pm_suspend_handler {
        /** A suspend is not expected/not possible - this is the same as
         * kbase_pm_context_active() */
        KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE,
@@ -535,12 +537,12 @@ typedef enum {
         * This should only be used when there is a bounded time on the activation
         * (e.g. guarantee it's going to be idled very soon after) */
        KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE
-} kbase_pm_suspend_handler;
+};
 
 /** Suspend 'safe' variant of kbase_pm_context_active()
  *
  * If a suspend is in progress, this allows for various different ways of
- * handling the suspend. Refer to @ref kbase_pm_suspend_handler for details.
+ * handling the suspend. Refer to @ref enum kbase_pm_suspend_handler for details.
  *
  * We returns a status code indicating whether we're allowed to keep the GPU
  * active during the suspend, depending on the handler code. If the status code
@@ -552,7 +554,7 @@ typedef enum {
  * @return zero     Indicates success
  * @return non-zero Indicates failure due to the system being suspending/suspended.
  */
-int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, kbase_pm_suspend_handler suspend_handler);
+int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler);
 
 /** Decrement the reference count of active contexts.
  *
@@ -565,7 +567,7 @@ void kbase_pm_context_idle(struct kbase_device *kbdev);
 
 /** Check if there are any power transitions to make, and if so start them.
  *
- * This function will check the desired_xx_state members of kbase_pm_device_data and the actual status of the
+ * This function will check the desired_xx_state members of struct kbase_pm_device_data and the actual status of the
  * hardware to see if any power transitions can be made at this time to make the hardware state closer to the state
  * desired by the power policy.
  *
@@ -621,7 +623,7 @@ void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
  * any power transitions.
  *
  * This function will update the desired_xx_state members of
- * kbase_pm_device_data by calling into the current Power Policy. It will then
+ * struct kbase_pm_device_data by calling into the current Power Policy. It will then
  * begin power transitions to make the hardware acheive the desired shader core
  * state.
  *
@@ -738,18 +740,28 @@ void kbase_pm_unregister_vsync_callback(struct kbase_device *kbdev);
  * @retval KBASE_PM_DVFS_CLOCK_UP,  The clock frequency should be increased if possible.
  * @retval KBASE_PM_DVFS_CLOCK_DOWN The clock frequency should be decreased if possible.
  */
-kbase_pm_dvfs_action kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
+enum kbase_pm_dvfs_action kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
 
 /** Mark that the GPU cycle counter is needed, if the caller is the first caller
- *  then the GPU cycle counters will be enabled.
+ *  then the GPU cycle counters will be enabled along with the l2 cache
  *
  * The GPU must be powered when calling this function (i.e. @ref kbase_pm_context_active must have been called).
  *
  * @param kbdev    The kbase device structure for the device (must be a valid pointer)
  */
-
 void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
 
+/** This is a version of the above function (@ref kbase_pm_request_gpu_cycle_counter) suitable for being 
+ *  called when the l2 cache is known to be on and assured to be on until the subsequent call of 
+ *  kbase_pm_release_gpu_cycle_counter such as when a job is submitted. 
+ *  It does not sleep and can be called from atomic functions. 
+ *
+ *  The GPU must be powered when calling this function (i.e. @ref kbase_pm_context_active must have been called).
+ *  and the l2 cache must be powered on 
+ *  @param kbdev    The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
+
 /** Mark that the GPU cycle counter is no longer in use, if the caller is the last
  *  caller then the GPU cycle counters will be disabled. A request must have been made
  *  before a call to this.
@@ -849,6 +861,11 @@ void kbase_pm_do_poweron(struct kbase_device *kbdev, mali_bool is_resume);
  */
 void kbase_pm_do_poweroff(struct kbase_device *kbdev, mali_bool is_suspend);
 
+#ifdef CONFIG_PM_DEVFREQ
+void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
+               unsigned long *total, unsigned long *busy, bool reset);
+#endif
+
 #ifdef CONFIG_MALI_MIDGARD_DVFS
 
 /**
index b457ca229337ec95b598bfba703ab042f2c507ce..4d3878b50937fc4cea2d80da64c5ccdebb98f0e7 100755 (executable)
 #include <mali_kbase.h>
 #include <mali_kbase_pm.h>
 
+#if KBASE_PM_EN
 static u64 always_on_get_core_mask(struct kbase_device *kbdev)
 {
        return kbdev->shader_present_bitmap;
 }
 
-static mali_bool always_on_get_core_active (struct kbase_device *kbdev)
+static mali_bool always_on_get_core_active(struct kbase_device *kbdev)
 {
        return MALI_TRUE;
 }
@@ -45,11 +46,11 @@ static void always_on_term(struct kbase_device *kbdev)
        CSTD_UNUSED(kbdev);
 }
 
-/** The @ref kbase_pm_policy structure for the demand power policy.
+/** The @ref struct kbase_pm_policy structure for the demand power policy.
  *
  * This is the static structure that defines the demand power policy's callback and name.
  */
-const kbase_pm_policy kbase_pm_always_on_policy_ops = {
+const struct kbase_pm_policy kbase_pm_always_on_policy_ops = {
        "always_on",                    /* name */
        always_on_init,                 /* init */
        always_on_term,                 /* term */
@@ -60,3 +61,4 @@ const kbase_pm_policy kbase_pm_always_on_policy_ops = {
 };
 
 KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops)
+#endif  /* KBASE_PM_EN */
index e7cfba5b50fef95d54e550c6822b59359a7a7932..9d5fb94c8218dd619b97be195e700828ebc22e0e 100755 (executable)
 
 #include <mali_kbase.h>
 #include <mali_kbase_pm.h>
-
-extern const kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops;
-#if MALI_CUSTOMER_RELEASE == 0
-extern const kbase_pm_ca_policy kbase_pm_ca_random_policy_ops;
+#if KBASE_PM_EN
+extern const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops;
+#if !MALI_CUSTOMER_RELEASE
+extern const struct kbase_pm_ca_policy kbase_pm_ca_random_policy_ops;
 #endif
 
-static const kbase_pm_ca_policy *const policy_list[] = {
+static const struct kbase_pm_ca_policy *const policy_list[] = {
        &kbase_pm_ca_fixed_policy_ops,
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
        &kbase_pm_ca_random_policy_ops
 #endif
 };
@@ -40,7 +40,7 @@ static const kbase_pm_ca_policy *const policy_list[] = {
  */
 #define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
 
-mali_error kbase_pm_ca_init(kbase_device *kbdev)
+mali_error kbase_pm_ca_init(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -51,12 +51,12 @@ mali_error kbase_pm_ca_init(kbase_device *kbdev)
        return MALI_ERROR_NONE;
 }
 
-void kbase_pm_ca_term(kbase_device *kbdev)
+void kbase_pm_ca_term(struct kbase_device *kbdev)
 {
        kbdev->pm.ca_current_policy->term(kbdev);
 }
 
-int kbase_pm_ca_list_policies(const kbase_pm_ca_policy * const **list)
+int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list)
 {
        if (!list)
                return POLICY_COUNT;
@@ -68,7 +68,7 @@ int kbase_pm_ca_list_policies(const kbase_pm_ca_policy * const **list)
 
 KBASE_EXPORT_TEST_API(kbase_pm_ca_list_policies)
 
-const kbase_pm_ca_policy *kbase_pm_ca_get_policy(kbase_device *kbdev)
+const struct kbase_pm_ca_policy *kbase_pm_ca_get_policy(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -77,9 +77,9 @@ const kbase_pm_ca_policy *kbase_pm_ca_get_policy(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbase_pm_ca_get_policy)
 
-void kbase_pm_ca_set_policy(kbase_device *kbdev, const kbase_pm_ca_policy *new_policy)
+void kbase_pm_ca_set_policy(struct kbase_device *kbdev, const struct kbase_pm_ca_policy *new_policy)
 {
-       const kbase_pm_ca_policy *old_policy;
+       const struct kbase_pm_ca_policy *old_policy;
        unsigned long flags;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -125,7 +125,7 @@ void kbase_pm_ca_set_policy(kbase_device *kbdev, const kbase_pm_ca_policy *new_p
 
 KBASE_EXPORT_TEST_API(kbase_pm_ca_set_policy)
 
-u64 kbase_pm_ca_get_core_mask(kbase_device *kbdev)
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
 {
        lockdep_assert_held(&kbdev->pm.power_change_lock);
 
@@ -141,7 +141,7 @@ u64 kbase_pm_ca_get_core_mask(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask)
 
-void kbase_pm_ca_update_core_status(kbase_device *kbdev, u64 cores_ready, u64 cores_transitioning)
+void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready, u64 cores_transitioning)
 {
        lockdep_assert_held(&kbdev->pm.power_change_lock);
 
@@ -170,4 +170,4 @@ void kbase_pm_ca_instr_disable(struct kbase_device *kbdev)
        kbase_pm_update_cores_state_nolock(kbdev);
        spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
 }
-
+#endif /* KBASE_PM_EN */
index f6a97c7c6b899ffa3a06bbf63911a90a4112fb5b..e073470e085501e1ae4531f9b36a8319cd5c2780 100755 (executable)
 #ifndef _KBASE_PM_CA_H_
 #define _KBASE_PM_CA_H_
 
-typedef enum kbase_pm_ca_policy_id {
+enum kbase_pm_ca_policy_id {
        KBASE_PM_CA_POLICY_ID_FIXED = 1,
        KBASE_PM_CA_POLICY_ID_RANDOM
-} kbase_pm_ca_policy_id;
+};
 
 typedef u32 kbase_pm_ca_policy_flags;
 
@@ -35,7 +35,7 @@ typedef u32 kbase_pm_ca_policy_flags;
  * Each core availability policy exposes a (static) instance of this structure which contains function pointers to the
  * policy's methods.
  */
-typedef struct kbase_pm_ca_policy {
+struct kbase_pm_ca_policy {
        /** The name of this policy */
        char *name;
 
@@ -48,13 +48,13 @@ typedef struct kbase_pm_ca_policy {
         *
         * @param kbdev     The kbase device structure for the device (must be a valid pointer)
         */
-       void (*init) (struct kbase_device *kbdev);
+       void (*init)(struct kbase_device *kbdev);
 
        /** Function called when the policy is unselected.
         *
         * @param kbdev     The kbase device structure for the device (must be a valid pointer)
         */
-       void (*term) (struct kbase_device *kbdev);
+       void (*term)(struct kbase_device *kbdev);
 
        /** Function called to get the current shader core availability mask
         *
@@ -67,7 +67,7 @@ typedef struct kbase_pm_ca_policy {
         * @param kbdev     The kbase device structure for the device (must be a valid pointer)
         *
         * @return     The current core availability mask */
-       u64 (*get_core_mask) (struct kbase_device *kbdev);
+       u64 (*get_core_mask)(struct kbase_device *kbdev);
 
        /** Function called to update the current core status
         *
@@ -82,7 +82,7 @@ typedef struct kbase_pm_ca_policy {
         * @param kbdev                   The kbase device structure for the device (must be a valid pointer)
         * @param cores_ready             The mask of cores currently powered and ready to run jobs
         * @param cores_transitioning     The mask of cores currently transitioning power state */
-       void (*update_core_status) (struct kbase_device *kbdev, u64 cores_ready, u64 cores_transitioning);
+       void (*update_core_status)(struct kbase_device *kbdev, u64 cores_ready, u64 cores_transitioning);
 
        /** Field indicating flags for this policy */
        kbase_pm_ca_policy_flags flags;
@@ -90,8 +90,8 @@ typedef struct kbase_pm_ca_policy {
        /** Field indicating an ID for this policy. This is not necessarily the
         * same as its index in the list returned by kbase_pm_list_policies().
         * It is used purely for debugging. */
-       kbase_pm_ca_policy_id id;
-} kbase_pm_ca_policy;
+       enum kbase_pm_ca_policy_id id;
+};
 
 /** Initialize core availability framework
  *
@@ -150,14 +150,14 @@ void kbase_pm_ca_instr_disable(struct kbase_device *kbdev);
  *
  * @return The current policy
  */
-const kbase_pm_ca_policy *kbase_pm_ca_get_policy(struct kbase_device *kbdev);
+const struct kbase_pm_ca_policy *kbase_pm_ca_get_policy(struct kbase_device *kbdev);
 
 /** Change the policy to the one specified.
  *
  * @param kbdev     The kbase device structure for the device (must be a valid pointer)
  * @param policy    The policy to change to (valid pointer returned from @ref kbase_pm_ca_list_policies)
  */
-void kbase_pm_ca_set_policy(struct kbase_device *kbdev, const kbase_pm_ca_policy *policy);
+void kbase_pm_ca_set_policy(struct kbase_device *kbdev, const struct kbase_pm_ca_policy *policy);
 
 /** Retrieve a static list of the available policies.
  * @param[out]  policies    An array pointer to take the list of policies. This may be NULL.
@@ -165,6 +165,6 @@ void kbase_pm_ca_set_policy(struct kbase_device *kbdev, const kbase_pm_ca_policy
  *
  * @return The number of policies
  */
-int kbase_pm_ca_list_policies(const kbase_pm_ca_policy * const **policies);
+int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **policies);
 
 #endif                         /* _KBASE_PM_CA_H_ */
index e391ecfce9b983cdc27b00c3bf8541fb1aa433e9..d81cdbd42e7cf0d0fd89aca6f78c13724de00996 100755 (executable)
@@ -22,7 +22,7 @@
 
 #include <mali_kbase.h>
 #include <mali_kbase_pm.h>
-
+#if KBASE_PM_EN
 static void fixed_init(struct kbase_device *kbdev)
 {
        kbdev->pm.ca_in_transition = MALI_FALSE;
@@ -38,18 +38,18 @@ static u64 fixed_get_core_mask(struct kbase_device *kbdev)
        return kbdev->shader_present_bitmap;
 }
 
-static void fixed_update_core_status (struct kbase_device *kbdev, u64 cores_ready, u64 cores_transitioning)
+static void fixed_update_core_status(struct kbase_device *kbdev, u64 cores_ready, u64 cores_transitioning)
 {
        CSTD_UNUSED(kbdev);
        CSTD_UNUSED(cores_ready);
        CSTD_UNUSED(cores_transitioning);
 }
 
-/** The @ref kbase_pm_policy structure for the fixed power policy.
+/** The @ref struct kbase_pm_policy structure for the fixed power policy.
  *
  * This is the static structure that defines the fixed power policy's callback and name.
  */
-const kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = {
+const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = {
        "fixed",                        /* name */
        fixed_init,                     /* init */
        fixed_term,                     /* term */
@@ -60,3 +60,4 @@ const kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = {
 };
 
 KBASE_EXPORT_TEST_API(kbase_pm_ca_fixed_policy_ops)
+#endif  /* KBASE_PM_EN */
index 095e6f097eceb18e50fafc899070c48361dc6914..8093e32e4dcd29a02ef582cac7005223a074631c 100755 (executable)
@@ -25,6 +25,7 @@
 #include <mali_kbase.h>
 #include <mali_kbase_pm.h>
 
+#if KBASE_PM_EN
 static u64 coarse_demand_get_core_mask(struct kbase_device *kbdev)
 {
        if (kbdev->pm.active_count == 0)
@@ -51,11 +52,11 @@ static void coarse_demand_term(struct kbase_device *kbdev)
        CSTD_UNUSED(kbdev);
 }
 
-/** The @ref kbase_pm_policy structure for the demand power policy.
+/** The @ref struct kbase_pm_policy structure for the demand power policy.
  *
  * This is the static structure that defines the demand power policy's callback and name.
  */
-const kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
+const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
        "coarse_demand",                        /* name */
        coarse_demand_init,                     /* init */
        coarse_demand_term,                     /* term */
@@ -66,3 +67,4 @@ const kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
 };
 
 KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops)
+#endif  /* KBASE_PM_EN */
index fd94294433f756fc7bb32ff8483c53ac70cb537a..0e322924c7ef14eccb6761dfc23d1d2a35651ddd 100755 (executable)
@@ -25,6 +25,8 @@
 #include <mali_kbase.h>
 #include <mali_kbase_pm.h>
 
+#if KBASE_PM_EN
+
 static u64 demand_get_core_mask(struct kbase_device *kbdev)
 {
        u64 desired = kbdev->shader_needed_bitmap | kbdev->shader_inuse_bitmap;
@@ -35,7 +37,7 @@ static u64 demand_get_core_mask(struct kbase_device *kbdev)
        return desired;
 }
 
-static mali_bool demand_get_core_active (struct kbase_device *kbdev)
+static mali_bool demand_get_core_active(struct kbase_device *kbdev)
 {
        if (0 == kbdev->pm.active_count)
                return MALI_FALSE;
@@ -53,11 +55,11 @@ static void demand_term(struct kbase_device *kbdev)
        CSTD_UNUSED(kbdev);
 }
 
-/** The @ref kbase_pm_policy structure for the demand power policy.
+/** The @ref struct kbase_pm_policy structure for the demand power policy.
  *
  * This is the static structure that defines the demand power policy's callback and name.
  */
-const kbase_pm_policy kbase_pm_demand_policy_ops = {
+const struct kbase_pm_policy kbase_pm_demand_policy_ops = {
        "demand",                       /* name */
        demand_init,                    /* init */
        demand_term,                    /* term */
@@ -68,3 +70,4 @@ const kbase_pm_policy kbase_pm_demand_policy_ops = {
 };
 
 KBASE_EXPORT_TEST_API(kbase_pm_demand_policy_ops)
+#endif  /* KBASE_PM_EN */
index d8c9bd8fe28571ca85e754fdcccacaef57b71508..c16f2bc21d13bfbdf7e2f40ff61622d0c5ac3d96 100755 (executable)
  */
 
 #include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
 #include <mali_midg_regmap.h>
 #include <mali_kbase_gator.h>
 #include <mali_kbase_pm.h>
 #include <mali_kbase_config_defaults.h>
 
+#if KBASE_PM_EN
+
 #if MALI_MOCK_TEST
 #define MOCKABLE(function) function##_original
 #else
  * This enumeration is private to the file. Its values are set to allow @ref core_type_to_reg function,
  * which decodes this enumeration, to be simpler and more efficient.
  */
-typedef enum kbasep_pm_action {
+enum kbasep_pm_action {
        ACTION_PRESENT = 0,
        ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
        ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
        ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
        ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
        ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
-} kbasep_pm_action;
+};
 
 /** Decode a core type and action to a register.
  *
@@ -60,9 +63,9 @@ typedef enum kbasep_pm_action {
  * @return The register offset of the \c _LO register that performs an action of type \c action on a core of type \c
  * core_type.
  */
-static u32 core_type_to_reg(kbase_pm_core_type core_type, kbasep_pm_action action)
+static u32 core_type_to_reg(enum kbase_pm_core_type core_type, enum kbasep_pm_action action)
 {
-       return core_type + action;
+       return (u32)core_type + (u32)action;
 }
 
 /** Invokes an action on a core set
@@ -75,7 +78,7 @@ static u32 core_type_to_reg(kbase_pm_core_type core_type, kbasep_pm_action actio
  * @param cores     A bit mask of cores to perform the action on (low 32 bits)
  * @param action    The action to perform on the cores
  */
-STATIC void kbase_pm_invoke(kbase_device *kbdev, kbase_pm_core_type core_type, u64 cores, kbasep_pm_action action)
+STATIC void kbase_pm_invoke(struct kbase_device *kbdev, enum kbase_pm_core_type core_type, u64 cores, enum kbasep_pm_action action)
 {
        u32 reg;
        u32 lo = cores & 0xFFFFFFFF;
@@ -98,33 +101,33 @@ STATIC void kbase_pm_invoke(kbase_device *kbdev, kbase_pm_core_type core_type, u
        if (cores) {
                if (action == ACTION_PWRON)
                        switch (core_type) {
-                               case KBASE_PM_CORE_SHADER:
-                                       KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u, lo);
-                                       break;
-                               case KBASE_PM_CORE_TILER:
-                                       KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL, NULL, 0u, lo);
-                                       break;
-                               case KBASE_PM_CORE_L2:
-                                       KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL, 0u, lo);
-                                       break;
-                               default:
-                                       /* L3 not handled */
-                                       break;
+                       case KBASE_PM_CORE_SHADER:
+                               KBASE_TRACE_ADD(kbdev, PM_PWRON, NULL, NULL, 0u, lo);
+                               break;
+                       case KBASE_PM_CORE_TILER:
+                               KBASE_TRACE_ADD(kbdev, PM_PWRON_TILER, NULL, NULL, 0u, lo);
+                               break;
+                       case KBASE_PM_CORE_L2:
+                               KBASE_TRACE_ADD(kbdev, PM_PWRON_L2, NULL, NULL, 0u, lo);
+                               break;
+                       default:
+                               /* L3 not handled */
+                               break;
                        }
                else if (action == ACTION_PWROFF)
                        switch (core_type) {
-                               case KBASE_PM_CORE_SHADER:
-                                       KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL, 0u, lo);
-                                       break;
-                               case KBASE_PM_CORE_TILER:
-                                       KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL, NULL, 0u, lo);
-                                       break;
-                               case KBASE_PM_CORE_L2:
-                                       KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL, 0u, lo);
-                                       break;
-                               default:
-                                       /* L3 not handled */
-                                       break;
+                       case KBASE_PM_CORE_SHADER:
+                               KBASE_TRACE_ADD(kbdev, PM_PWROFF, NULL, NULL, 0u, lo);
+                               break;
+                       case KBASE_PM_CORE_TILER:
+                               KBASE_TRACE_ADD(kbdev, PM_PWROFF_TILER, NULL, NULL, 0u, lo);
+                               break;
+                       case KBASE_PM_CORE_L2:
+                               KBASE_TRACE_ADD(kbdev, PM_PWROFF_L2, NULL, NULL, 0u, lo);
+                               break;
+                       default:
+                               /* L3 not handled */
+                               break;
                        }
        }
 
@@ -147,7 +150,7 @@ STATIC void kbase_pm_invoke(kbase_device *kbdev, kbase_pm_core_type core_type, u
  *
  * @return A bit mask specifying the state of the cores
  */
-static u64 kbase_pm_get_state(kbase_device *kbdev, kbase_pm_core_type core_type, kbasep_pm_action action)
+static u64 kbase_pm_get_state(struct kbase_device *kbdev, enum kbase_pm_core_type core_type, enum kbasep_pm_action action)
 {
        u32 reg;
        u32 lo, hi;
@@ -162,7 +165,7 @@ static u64 kbase_pm_get_state(kbase_device *kbdev, kbase_pm_core_type core_type,
        return (((u64) hi) << 32) | ((u64) lo);
 }
 
-void kbasep_pm_read_present_cores(kbase_device *kbdev)
+void kbasep_pm_read_present_cores(struct kbase_device *kbdev)
 {
        kbdev->shader_present_bitmap = kbase_pm_get_state(kbdev, KBASE_PM_CORE_SHADER, ACTION_PRESENT);
        kbdev->tiler_present_bitmap = kbase_pm_get_state(kbdev, KBASE_PM_CORE_TILER, ACTION_PRESENT);
@@ -185,7 +188,7 @@ KBASE_EXPORT_TEST_API(kbasep_pm_read_present_cores)
 
 /** Get the cores that are present
  */
-u64 kbase_pm_get_present_cores(kbase_device *kbdev, kbase_pm_core_type type)
+u64 kbase_pm_get_present_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -211,7 +214,7 @@ KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores)
 
 /** Get the cores that are "active" (busy processing work)
  */
-u64 kbase_pm_get_active_cores(kbase_device *kbdev, kbase_pm_core_type type)
+u64 kbase_pm_get_active_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type)
 {
        return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
 }
@@ -220,7 +223,7 @@ KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores)
 
 /** Get the cores that are transitioning between power states
  */
-u64 kbase_pm_get_trans_cores(kbase_device *kbdev, kbase_pm_core_type type)
+u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type)
 {
        return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
 }
@@ -228,24 +231,25 @@ u64 kbase_pm_get_trans_cores(kbase_device *kbdev, kbase_pm_core_type type)
 KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores)
 /** Get the cores that are powered on
  */
-u64 kbase_pm_get_ready_cores(kbase_device *kbdev, kbase_pm_core_type type)
+u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev, enum kbase_pm_core_type type)
 {
        u64 result;
+
        result = kbase_pm_get_state(kbdev, type, ACTION_READY);
 
        switch (type) {
-               case KBASE_PM_CORE_SHADER:
-                       KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u, (u32) result);
-                       break;
-               case KBASE_PM_CORE_TILER:
-                       KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u, (u32) result);
-                       break;
-               case KBASE_PM_CORE_L2:
-                       KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u, (u32) result);
-                       break;
-               default:
-                       /* NB: L3 not currently traced */
-                       break;
+       case KBASE_PM_CORE_SHADER:
+               KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED, NULL, NULL, 0u, (u32) result);
+               break;
+       case KBASE_PM_CORE_TILER:
+               KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, NULL, 0u, (u32) result);
+               break;
+       case KBASE_PM_CORE_L2:
+               KBASE_TRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, NULL, 0u, (u32) result);
+               break;
+       default:
+               /* NB: L3 not currently traced */
+               break;
        }
 
        return result;
@@ -272,7 +276,7 @@ KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores)
  *
  * @return MALI_TRUE if the desired state has been reached, MALI_FALSE otherwise
  */
-STATIC mali_bool kbase_pm_transition_core_type(kbase_device *kbdev, kbase_pm_core_type type, u64 desired_state,
+STATIC mali_bool kbase_pm_transition_core_type(struct kbase_device *kbdev, enum kbase_pm_core_type type, u64 desired_state,
                                               u64 in_use, u64 * const available, u64 *powering_on)
 {
        u64 present;
@@ -330,7 +334,7 @@ STATIC mali_bool kbase_pm_transition_core_type(kbase_device *kbdev, kbase_pm_cor
         * Mali cores that support the following case:
         *
         * If the SHADER_PWRON or TILER_PWRON registers are written to turn on
-        * a core that is currently transitioning to power off, then this is 
+        * a core that is currently transitioning to power off, then this is
         * remembered and the shader core is automatically powered up again once
         * the original transition completes. Once the automatic power on is
         * complete any job scheduled on the shader core should start.
@@ -407,6 +411,7 @@ mali_bool MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbde
        u64 shader_ready_bitmap;
        u64 shader_transitioning_bitmap;
        u64 l2_available_bitmap;
+       u64 prev_l2_available_bitmap;
 
        KBASE_DEBUG_ASSERT(NULL != kbdev);
        lockdep_assert_held(&kbdev->pm.power_change_lock);
@@ -447,19 +452,17 @@ mali_bool MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbde
        }
 
        desired_l3_state = get_desired_cache_status(kbdev->l3_present_bitmap, desired_l2_state);
-
+       prev_l2_available_bitmap = l2_available_bitmap;
        in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_L3, desired_l3_state, 0, NULL, &kbdev->pm.powering_on_l3_state);
        in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_L2, desired_l2_state, 0, &l2_available_bitmap, &kbdev->pm.powering_on_l2_state);
 
-       if( kbdev->l2_available_bitmap != l2_available_bitmap)
-       {
-               KBASE_TIMELINE_POWER_L2(kbdev,l2_available_bitmap);
+       if (kbdev->l2_available_bitmap != l2_available_bitmap) {
+               KBASE_TIMELINE_POWER_L2(kbdev, l2_available_bitmap);
        }
 
        kbdev->l2_available_bitmap = l2_available_bitmap;
 
        if (in_desired_state) {
-
                in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_TILER, kbdev->pm.desired_tiler_state, 0, &tiler_available_bitmap, &kbdev->pm.powering_on_tiler_state);
                in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_SHADER, kbdev->pm.desired_shader_state, kbdev->shader_inuse_bitmap, &shader_available_bitmap, &kbdev->pm.powering_on_shader_state);
 
@@ -496,8 +499,8 @@ mali_bool MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbde
        /* Determine whether the cores are now available (even if the set of
         * available cores is empty). Note that they can be available even if we've
         * not finished transitioning to the desired state */
-       if ((kbdev->shader_available_bitmap & kbdev->pm.desired_shader_state) == kbdev->pm.desired_shader_state
-               && (kbdev->tiler_available_bitmap & kbdev->pm.desired_tiler_state) == kbdev->pm.desired_tiler_state) {
+       if ((kbdev->shader_available_bitmap & kbdev->pm.desired_shader_state) == kbdev->pm.desired_shader_state &&
+                       (kbdev->tiler_available_bitmap & kbdev->pm.desired_tiler_state) == kbdev->pm.desired_tiler_state) {
                cores_are_available = MALI_TRUE;
 
                KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE, NULL, NULL, 0u, (u32)(kbdev->shader_available_bitmap & kbdev->pm.desired_shader_state));
@@ -529,7 +532,7 @@ mali_bool MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbde
                KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
                wake_up(&kbdev->pm.gpu_in_desired_state_wait);
        }
-       
+
        spin_unlock(&kbdev->pm.gpu_powered_lock);
 
        /* kbase_pm_ca_update_core_status can cause one-level recursion into
@@ -544,15 +547,16 @@ mali_bool MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbde
                kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap, shader_transitioning_bitmap);
        }
 
-       /* The core availability policy is not allowed to keep core group 0 off */
+       /* The core availability policy is not allowed to keep core group 0 turned off (unless it was changing the l2 power state) */
        if (!((shader_ready_bitmap | shader_transitioning_bitmap) & kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
-           !(kbase_pm_ca_get_core_mask(kbdev) & kbdev->gpu_props.props.coherency_info.group[0].core_mask))
+                       (prev_l2_available_bitmap == desired_l2_state) &&
+                       !(kbase_pm_ca_get_core_mask(kbdev) & kbdev->gpu_props.props.coherency_info.group[0].core_mask))
                BUG();
 
-       /* The core availability policy is allowed to keep core group 1 off, 
+       /* The core availability policy is allowed to keep core group 1 off,
         * but all jobs specifically targeting CG1 must fail */
        if (!((shader_ready_bitmap | shader_transitioning_bitmap) & kbdev->gpu_props.props.coherency_info.group[1].core_mask) &&
-           !(kbase_pm_ca_get_core_mask(kbdev) & kbdev->gpu_props.props.coherency_info.group[1].core_mask))
+               !(kbase_pm_ca_get_core_mask(kbdev) & kbdev->gpu_props.props.coherency_info.group[1].core_mask))
                kbdev->pm.cg1_disabled = MALI_TRUE;
        else
                kbdev->pm.cg1_disabled = MALI_FALSE;
@@ -581,7 +585,7 @@ void kbase_pm_check_transitions_sync(struct kbase_device *kbdev)
 }
 KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync)
 
-void kbase_pm_enable_interrupts(kbase_device *kbdev)
+void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
 {
        unsigned long flags;
 
@@ -604,7 +608,7 @@ void kbase_pm_enable_interrupts(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts)
 
-void kbase_pm_disable_interrupts(kbase_device *kbdev)
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
 {
        unsigned long flags;
 
@@ -633,10 +637,11 @@ KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts)
  * 0x0004: PMU VERSION ID (RO) (0x00000000)
  * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
  */
-void kbase_pm_clock_on(kbase_device *kbdev, mali_bool is_resume)
+void kbase_pm_clock_on(struct kbase_device *kbdev, mali_bool is_resume)
 {
        mali_bool reset_required = is_resume;
        unsigned long flags;
+
        KBASE_DEBUG_ASSERT(NULL != kbdev);
        lockdep_assert_held(&kbdev->pm.lock);
 
@@ -675,9 +680,10 @@ void kbase_pm_clock_on(kbase_device *kbdev, mali_bool is_resume)
 
 KBASE_EXPORT_TEST_API(kbase_pm_clock_on)
 
-void kbase_pm_clock_off(kbase_device *kbdev, mali_bool is_suspend)
+void kbase_pm_clock_off(struct kbase_device *kbdev, mali_bool is_suspend)
 {
        unsigned long flags;
+
        KBASE_DEBUG_ASSERT(NULL != kbdev);
        lockdep_assert_held(&kbdev->pm.lock);
 
@@ -714,10 +720,10 @@ KBASE_EXPORT_TEST_API(kbase_pm_clock_off)
 struct kbasep_reset_timeout_data {
        struct hrtimer timer;
        mali_bool timed_out;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 };
 
-void kbase_pm_reset_done(kbase_device *kbdev)
+void kbase_pm_reset_done(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
        kbdev->pm.reset_done = MALI_TRUE;
@@ -727,7 +733,7 @@ void kbase_pm_reset_done(kbase_device *kbdev)
 /**
  * Wait for the RESET_COMPLETED IRQ to occur, then reset the waiting state.
  */
-STATIC void kbase_pm_wait_for_reset(kbase_device *kbdev)
+STATIC void kbase_pm_wait_for_reset(struct kbase_device *kbdev)
 {
        lockdep_assert_held(&kbdev->pm.lock);
 
@@ -749,10 +755,9 @@ static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-static void kbase_pm_hw_issues(kbase_device *kbdev)
+static void kbase_pm_hw_issues(struct kbase_device *kbdev)
 {
        u32 value = 0;
-       u32 config_value;
 
        /* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
         * and
@@ -769,6 +774,10 @@ static void kbase_pm_hw_issues(kbase_device *kbdev)
        if (DEFAULT_ALTERNATIVE_HWC)
                value |= SC_ALT_COUNTERS;
 
+       /* Use software control of forward pixel kill when needed. See MIDEUR-174. */
+       if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_2121))
+               value |= SC_OVERRIDE_FWD_PIXEL_KILL;
+
        /* Needed due to MIDBASE-2795. ENABLE_TEXGRD_FLAGS. See PRLAM-10797. */
        if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10797))
                value |= SC_ENABLE_TEXGRD_FLAGS;
@@ -776,23 +785,28 @@ static void kbase_pm_hw_issues(kbase_device *kbdev)
        if (value != 0)
                kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), value, NULL);
 
+       /* Set tiler clock gate override if required */
+       if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953)) {
+               value = kbase_reg_read(kbdev, GPU_CONTROL_REG(TILER_CONFIG), NULL);
+               value |= TC_CLOCK_GATE_OVERRIDE;
+               kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG), value, NULL);
+       }
+
        /* Limit the GPU bus bandwidth if the platform needs this. */
        value = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL);
 
        /* Limit read ID width for AXI */
-       config_value = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_ARID_LIMIT);
        value &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS);
-       value |= (config_value & 0x3) << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
+       value |= (DEFAULT_ARID_LIMIT & 0x3) << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT;
 
        /* Limit write ID width for AXI */
-       config_value = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_AWID_LIMIT);
        value &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
-       value |= (config_value & 0x3) << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
+       value |= (DEFAULT_AWID_LIMIT & 0x3) << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT;
 
        kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), value, NULL);
 }
 
-mali_error kbase_pm_init_hw(kbase_device *kbdev, mali_bool enable_irqs )
+mali_error kbase_pm_init_hw(struct kbase_device *kbdev, mali_bool enable_irqs)
 {
        unsigned long flags;
        struct kbasep_reset_timeout_data rtdata;
@@ -891,18 +905,25 @@ mali_error kbase_pm_init_hw(kbase_device *kbdev, mali_bool enable_irqs )
        return MALI_ERROR_FUNCTION_FAILED;
 
  out:
-       /* Re-enable interrupts if requested*/
-       if ( enable_irqs )
-       {
+
+       /* If cycle counter was in use-re enable it enable_irqs will only be false when called from kbase_pm_powerup */
+       if (kbdev->pm.gpu_cycle_counter_requests && enable_irqs) {
+               /* enable interrupts as the L2 may have to be powered on */
                kbase_pm_enable_interrupts(kbdev);
-       }
-       /* If cycle counter was in use-re enable it */
-       spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
+               kbase_pm_request_l2_caches(kbdev);
 
-       if (kbdev->pm.gpu_cycle_counter_requests)
-               kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_START, NULL);
+               /* Re-enable the counters if we need to */
+               spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
+               if (kbdev->pm.gpu_cycle_counter_requests)
+                       kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_START, NULL);
+               spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
 
-       spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
+               kbase_pm_release_l2_caches(kbdev);
+               kbase_pm_disable_interrupts(kbdev);
+       }
+
+       if (enable_irqs)
+               kbase_pm_enable_interrupts(kbdev);
 
        kbase_pm_hw_issues(kbdev);
 
@@ -911,17 +932,23 @@ mali_error kbase_pm_init_hw(kbase_device *kbdev, mali_bool enable_irqs )
 
 KBASE_EXPORT_TEST_API(kbase_pm_init_hw)
 
-void kbase_pm_request_gpu_cycle_counter(kbase_device *kbdev)
+/** Increase the count of cycle counter users and turn the cycle counters on if they were previously off
+ *
+ * this function is designed to be called by @ref kbase_pm_request_gpu_cycle_counter or
+ * @ref kbase_pm_request_gpu_cycle_counter_l2_is_on only
+ *
+ * When this function is called the l2 cache must be on and the l2 cache users count must
+ * have been incremented by a call to (@ref kbase_pm_request_l2_caches or @ref kbase_pm_request_l2_caches_l2_on)
+ *
+ * @param kbdev     The kbase device structure of the device
+ *
+ */
+static void kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
 {
        unsigned long flags;
-       KBASE_DEBUG_ASSERT(kbdev != NULL);
-
-       KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
 
        spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
 
-       KBASE_DEBUG_ASSERT(kbdev->pm.gpu_cycle_counter_requests < INT_MAX);
-
        ++kbdev->pm.gpu_cycle_counter_requests;
 
        if (1 == kbdev->pm.gpu_cycle_counter_requests)
@@ -930,11 +957,40 @@ void kbase_pm_request_gpu_cycle_counter(kbase_device *kbdev)
        spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
 }
 
+void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+       KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
+
+       KBASE_DEBUG_ASSERT(kbdev->pm.gpu_cycle_counter_requests < INT_MAX);
+
+       kbase_pm_request_l2_caches(kbdev);
+
+       kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
 KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter)
 
-void kbase_pm_release_gpu_cycle_counter(kbase_device *kbdev)
+void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
+{
+       KBASE_DEBUG_ASSERT(kbdev != NULL);
+
+       KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
+
+       KBASE_DEBUG_ASSERT(kbdev->pm.gpu_cycle_counter_requests < INT_MAX);
+
+       kbase_pm_request_l2_caches_l2_is_on(kbdev);
+
+       kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on)
+
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
 {
        unsigned long flags;
+
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
        spin_lock_irqsave(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
@@ -947,6 +1003,9 @@ void kbase_pm_release_gpu_cycle_counter(kbase_device *kbdev)
                kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
 
        spin_unlock_irqrestore(&kbdev->pm.gpu_cycle_counter_requests_lock, flags);
+
+       kbase_pm_release_l2_caches(kbdev);
 }
 
 KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter)
+#endif /* KBASE_PM_EN */
index c02940c1c07567e81229c5389447c1034f86a68b..4eda9663281e156c91b16f3fcbc8c90fec7ea790 100755 (executable)
@@ -24,7 +24,7 @@
 
 #include <mali_kbase.h>
 #include <mali_kbase_pm.h>
-
+#if KBASE_PM_EN
 /* When VSync is being hit aim for utilisation between 70-90% */
 #define KBASE_PM_VSYNC_MIN_UTILISATION          70
 #define KBASE_PM_VSYNC_MAX_UTILISATION          90
    Exceeding this will cause overflow */
 #define KBASE_PM_TIME_SHIFT                    8
 
+#ifdef CONFIG_MALI_MIDGARD_DVFS
 static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
 {
        unsigned long flags;
-       kbase_pm_dvfs_action action;
-       kbasep_pm_metrics_data *metrics;
+       enum kbase_pm_dvfs_action action;
+       struct kbasep_pm_metrics_data *metrics;
 
        KBASE_DEBUG_ASSERT(timer != NULL);
 
-       metrics = container_of(timer, kbasep_pm_metrics_data, timer);
+       metrics = container_of(timer, struct kbasep_pm_metrics_data, timer);
        action = kbase_pm_get_dvfs_action(metrics->kbdev);
 
        spin_lock_irqsave(&metrics->lock, flags);
@@ -59,11 +60,10 @@ static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
 
        return HRTIMER_NORESTART;
 }
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
 
-mali_error kbasep_pm_metrics_init(kbase_device *kbdev)
+mali_error kbasep_pm_metrics_init(struct kbase_device *kbdev)
 {
-       static bool timer_inited = false;
-
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
        kbdev->pm.metrics.kbdev = kbdev;
@@ -77,7 +77,6 @@ mali_error kbasep_pm_metrics_init(kbase_device *kbdev)
        kbdev->pm.metrics.time_busy = 0;
        kbdev->pm.metrics.time_idle = 0;
        kbdev->pm.metrics.gpu_active = MALI_TRUE;
-       kbdev->pm.metrics.timer_active = MALI_TRUE;
        kbdev->pm.metrics.active_cl_ctx[0] = 0;
        kbdev->pm.metrics.active_cl_ctx[1] = 0;
        kbdev->pm.metrics.active_gl_ctx = 0;
@@ -87,13 +86,13 @@ mali_error kbasep_pm_metrics_init(kbase_device *kbdev)
 
        spin_lock_init(&kbdev->pm.metrics.lock);
 
-       if (!timer_inited) {
-               hrtimer_init(&kbdev->pm.metrics.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-               timer_inited = true;
-       }
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+       kbdev->pm.metrics.timer_active = MALI_TRUE;
+       hrtimer_init(&kbdev->pm.metrics.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        kbdev->pm.metrics.timer.function = dvfs_callback;
 
        hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(kbdev->pm.platform_dvfs_frequency), HRTIMER_MODE_REL);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
 
        kbase_pm_register_vsync_callback(kbdev);
 
@@ -102,8 +101,9 @@ mali_error kbasep_pm_metrics_init(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init)
 
-void kbasep_pm_metrics_term(kbase_device *kbdev)
+void kbasep_pm_metrics_term(struct kbase_device *kbdev)
 {
+#ifdef CONFIG_MALI_MIDGARD_DVFS
        unsigned long flags;
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -112,6 +112,7 @@ void kbasep_pm_metrics_term(kbase_device *kbdev)
        spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
 
        hrtimer_cancel(&kbdev->pm.metrics.timer);
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
 
        kbase_pm_unregister_vsync_callback(kbdev);
 }
@@ -119,7 +120,7 @@ void kbasep_pm_metrics_term(kbase_device *kbdev)
 KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term)
 
 /*caller needs to hold kbdev->pm.metrics.lock before calling this function*/
-void kbasep_pm_record_job_status(kbase_device *kbdev)
+void kbasep_pm_record_job_status(struct kbase_device *kbdev)
 {
        ktime_t now;
        ktime_t diff;
@@ -140,7 +141,7 @@ void kbasep_pm_record_job_status(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbasep_pm_record_job_status)
 
-void kbasep_pm_record_gpu_idle(kbase_device *kbdev)
+void kbasep_pm_record_gpu_idle(struct kbase_device *kbdev)
 {
        unsigned long flags;
 
@@ -159,7 +160,7 @@ void kbasep_pm_record_gpu_idle(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbasep_pm_record_gpu_idle)
 
-void kbasep_pm_record_gpu_active(kbase_device *kbdev)
+void kbasep_pm_record_gpu_active(struct kbase_device *kbdev)
 {
        unsigned long flags;
        ktime_t now;
@@ -184,7 +185,7 @@ void kbasep_pm_record_gpu_active(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbasep_pm_record_gpu_active)
 
-void kbase_pm_report_vsync(kbase_device *kbdev, int buffer_updated)
+void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated)
 {
        unsigned long flags;
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -197,11 +198,8 @@ void kbase_pm_report_vsync(kbase_device *kbdev, int buffer_updated)
 KBASE_EXPORT_TEST_API(kbase_pm_report_vsync)
 
 /*caller needs to hold kbdev->pm.metrics.lock before calling this function*/
-int kbase_pm_get_dvfs_utilisation(kbase_device *kbdev, int *util_gl_share, int util_cl_share[2])
+static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev, ktime_t now)
 {
-       int utilisation = 0;
-       int busy;
-       ktime_t now = ktime_get();
        ktime_t diff;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -214,11 +212,52 @@ int kbase_pm_get_dvfs_utilisation(kbase_device *kbdev, int *util_gl_share, int u
                kbdev->pm.metrics.busy_cl[0] += ns_time * kbdev->pm.metrics.active_cl_ctx[0];
                kbdev->pm.metrics.busy_cl[1] += ns_time * kbdev->pm.metrics.active_cl_ctx[1];
                kbdev->pm.metrics.busy_gl += ns_time * kbdev->pm.metrics.active_gl_ctx;
-               kbdev->pm.metrics.time_period_start = now;
        } else {
                kbdev->pm.metrics.time_idle += (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
-               kbdev->pm.metrics.time_period_start = now;
        }
+}
+
+/*caller needs to hold kbdev->pm.metrics.lock before calling this function*/
+static void kbase_pm_get_dvfs_utilisation_reset(struct kbase_device *kbdev, ktime_t now)
+{
+       kbdev->pm.metrics.time_period_start = now;
+       kbdev->pm.metrics.time_idle = 0;
+       kbdev->pm.metrics.time_busy = 0;
+       kbdev->pm.metrics.busy_cl[0] = 0;
+       kbdev->pm.metrics.busy_cl[1] = 0;
+       kbdev->pm.metrics.busy_gl = 0;
+}
+
+void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev, unsigned long *total, unsigned long *busy, bool reset)
+{
+       ktime_t now = ktime_get();
+       unsigned long tmp, flags;
+
+       spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
+       kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
+
+       tmp = kbdev->pm.metrics.busy_gl;
+       tmp += kbdev->pm.metrics.busy_cl[0];
+       tmp += kbdev->pm.metrics.busy_cl[1];
+
+       *busy = tmp;
+       *total = tmp + kbdev->pm.metrics.time_idle;
+
+       if (reset)
+               kbase_pm_get_dvfs_utilisation_reset(kbdev, now);
+       spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
+}
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+
+/*caller needs to hold kbdev->pm.metrics.lock before calling this function*/
+int kbase_pm_get_dvfs_utilisation_old(struct kbase_device *kbdev, int *util_gl_share, int util_cl_share[2])
+{
+       int utilisation;
+       int busy;
+       ktime_t now = ktime_get();
+
+       kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
 
        if (kbdev->pm.metrics.time_idle + kbdev->pm.metrics.time_busy == 0) {
                /* No data - so we return NOP */
@@ -260,30 +299,25 @@ int kbase_pm_get_dvfs_utilisation(kbase_device *kbdev, int *util_gl_share, int u
        }
 
 out:
-
-       kbdev->pm.metrics.time_idle = 0;
-       kbdev->pm.metrics.time_busy = 0;
-       kbdev->pm.metrics.busy_cl[0] = 0;
-       kbdev->pm.metrics.busy_cl[1] = 0;
-       kbdev->pm.metrics.busy_gl = 0;
+       kbase_pm_get_dvfs_utilisation_reset(kbdev, now);
 
        return utilisation;
 }
 
-kbase_pm_dvfs_action kbase_pm_get_dvfs_action(kbase_device *kbdev)
+enum kbase_pm_dvfs_action kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
 {
        unsigned long flags;
        int utilisation, util_gl_share;
        int util_cl_share[2];
-       kbase_pm_dvfs_action action;
+       enum kbase_pm_dvfs_action action;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
        spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
 
-       utilisation = kbase_pm_get_dvfs_utilisation(kbdev, &util_gl_share, util_cl_share);
+       utilisation = kbase_pm_get_dvfs_utilisation_old(kbdev, &util_gl_share, util_cl_share);
 
-       if (utilisation < 0 || util_gl_share < 0 || util_cl_share < 0) {
+       if (utilisation < 0 || util_gl_share < 0 || util_cl_share[0] < 0 || util_cl_share[1] < 0) {
                action = KBASE_PM_DVFS_NOP;
                utilisation = 0;
                util_gl_share = 0;
@@ -329,7 +363,7 @@ out:
 }
 KBASE_EXPORT_TEST_API(kbase_pm_get_dvfs_action)
 
-mali_bool kbase_pm_metrics_is_active(kbase_device *kbdev)
+mali_bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
 {
        mali_bool isactive;
        unsigned long flags;
@@ -343,3 +377,7 @@ mali_bool kbase_pm_metrics_is_active(kbase_device *kbdev)
        return isactive;
 }
 KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active)
+
+#endif /* CONFIG_MALI_MIDGARD_DVFS */
+
+#endif  /* KBASE_PM_EN */
index 81dd06b5ed6768d88f90e0d1ea88704d1fb0b514..7823d824f5a32888b0367904681b96013ad8ffbb 100755 (executable)
@@ -25,7 +25,8 @@
 #include <mali_kbase.h>
 #include <mali_kbase_pm.h>
 
-void kbase_pm_register_vsync_callback(kbase_device *kbdev)
+#if KBASE_PM_EN
+void kbase_pm_register_vsync_callback(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -33,7 +34,8 @@ void kbase_pm_register_vsync_callback(kbase_device *kbdev)
        kbdev->pm.metrics.platform_data = NULL;
 }
 
-void kbase_pm_unregister_vsync_callback(kbase_device *kbdev)
+void kbase_pm_unregister_vsync_callback(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 }
+#endif  /* KBASE_PM_EN */
index 7e43512181bd24f96687edcae05a178f898db7c9..fcae1a02100e802dfbb156210b02ee095d95144a 100755 (executable)
 #include <mali_kbase_gator.h>
 #include <mali_kbase_pm.h>
 
-extern const kbase_pm_policy kbase_pm_always_on_policy_ops;
-extern const kbase_pm_policy kbase_pm_coarse_demand_policy_ops;
-extern const kbase_pm_policy kbase_pm_demand_policy_ops;
+#if KBASE_PM_EN
 
-#if MALI_CUSTOMER_RELEASE == 0 
-extern const kbase_pm_policy kbase_pm_fast_start_policy_ops;
-extern const kbase_pm_policy kbase_pm_demand_always_powered_policy_ops;
+extern const struct kbase_pm_policy kbase_pm_always_on_policy_ops;
+extern const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops;
+extern const struct kbase_pm_policy kbase_pm_demand_policy_ops;
+
+#if !MALI_CUSTOMER_RELEASE
+extern const struct kbase_pm_policy kbase_pm_fast_start_policy_ops;
+extern const struct kbase_pm_policy kbase_pm_demand_always_powered_policy_ops;
 #endif
 
-static const kbase_pm_policy *const policy_list[] = {
+static const struct kbase_pm_policy *const policy_list[] = {
 #ifdef CONFIG_MALI_NO_MALI
        &kbase_pm_always_on_policy_ops,
        &kbase_pm_demand_policy_ops,
        &kbase_pm_coarse_demand_policy_ops,
-#if MALI_CUSTOMER_RELEASE == 0 
+#if !MALI_CUSTOMER_RELEASE
        &kbase_pm_demand_always_powered_policy_ops,
        &kbase_pm_fast_start_policy_ops,
 #endif
@@ -47,7 +49,7 @@ static const kbase_pm_policy *const policy_list[] = {
        &kbase_pm_demand_policy_ops,
        &kbase_pm_always_on_policy_ops,
        &kbase_pm_coarse_demand_policy_ops,
-#if MALI_CUSTOMER_RELEASE == 0        
+#if !MALI_CUSTOMER_RELEASE
        &kbase_pm_demand_always_powered_policy_ops,
        &kbase_pm_fast_start_policy_ops,
 #endif
@@ -61,8 +63,7 @@ static const kbase_pm_policy *const policy_list[] = {
 
 
 /* Function IDs for looking up Timeline Trace codes in kbase_pm_change_state_trace_code */
-typedef enum
-{
+enum kbase_pm_func_id {
        KBASE_PM_FUNC_ID_REQUEST_CORES_START,
        KBASE_PM_FUNC_ID_REQUEST_CORES_END,
        KBASE_PM_FUNC_ID_RELEASE_CORES_START,
@@ -74,12 +75,11 @@ typedef enum
 
        /* Must be the last */
        KBASE_PM_FUNC_ID_COUNT
-} kbase_pm_func_id;
+};
 
 
 /* State changes during request/unrequest/release-ing cores */
-enum
-{
+enum {
        KBASE_PM_CHANGE_STATE_SHADER = (1u << 0),
        KBASE_PM_CHANGE_STATE_TILER  = (1u << 1),
 
@@ -92,8 +92,7 @@ typedef u32 kbase_pm_change_state;
 
 #ifdef CONFIG_MALI_TRACE_TIMELINE
 /* Timeline Trace code lookups for each function */
-static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT][KBASE_PM_CHANGE_STATE_COUNT] =
-{
+static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT][KBASE_PM_CHANGE_STATE_COUNT] = {
        /* kbase_pm_request_cores */
        [KBASE_PM_FUNC_ID_REQUEST_CORES_START][0] = 0,
        [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
@@ -129,11 +128,12 @@ static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT][KBASE_PM_CHA
                SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END
 };
 
-STATIC INLINE void kbase_timeline_pm_cores_func(kbase_device *kbdev,
-                                                kbase_pm_func_id func_id,
-                                                kbase_pm_change_state state)
+STATIC INLINE void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
+               enum kbase_pm_func_id func_id,
+               kbase_pm_change_state state)
 {
        int trace_code;
+
        KBASE_DEBUG_ASSERT(func_id >= 0 && func_id < KBASE_PM_FUNC_ID_COUNT);
        KBASE_DEBUG_ASSERT(state != 0 && (state & KBASE_PM_CHANGE_STATE_MASK) == state);
 
@@ -142,9 +142,8 @@ STATIC INLINE void kbase_timeline_pm_cores_func(kbase_device *kbdev,
 }
 
 #else /* CONFIG_MALI_TRACE_TIMELINE */
-STATIC INLINE void kbase_timeline_pm_cores_func(kbase_device *kbdev,
-                                                kbase_pm_func_id func_id,
-                                                kbase_pm_change_state state)
+STATIC INLINE void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
+               enum kbase_pm_func_id func_id, kbase_pm_change_state state)
 {
 }
 
@@ -152,9 +151,9 @@ STATIC INLINE void kbase_timeline_pm_cores_func(kbase_device *kbdev,
 
 static enum hrtimer_restart kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
 {
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
 
-       kbdev = container_of(timer, kbase_device, pm.gpu_poweroff_timer);
+       kbdev = container_of(timer, struct kbase_device, pm.gpu_poweroff_timer);
 
        /* It is safe for this call to do nothing if the work item is already queued.
         * The worker function will read the must up-to-date state of kbdev->pm.gpu_poweroff_pending
@@ -189,7 +188,7 @@ static enum hrtimer_restart kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *t
 
                                        KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START);
                                        cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
-                                       KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END);               
+                                       KBASE_TIMELINE_PM_CHECKTRANS(kbdev, SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END);
 
                                        /* Don't need 'cores_are_available', because we don't return anything */
                                        CSTD_UNUSED(cores_are_available);
@@ -207,10 +206,10 @@ static enum hrtimer_restart kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *t
 static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
 {
        unsigned long flags;
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        mali_bool do_poweroff = MALI_FALSE;
 
-       kbdev = container_of(data, kbase_device, pm.gpu_poweroff_work);
+       kbdev = container_of(data, struct kbase_device, pm.gpu_poweroff_work);
 
        mutex_lock(&kbdev->pm.lock);
 
@@ -246,7 +245,7 @@ static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
        mutex_unlock(&kbdev->pm.lock);
 }
 
-mali_error kbase_pm_policy_init(kbase_device *kbdev)
+mali_error kbase_pm_policy_init(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -270,12 +269,12 @@ mali_error kbase_pm_policy_init(kbase_device *kbdev)
        return MALI_ERROR_NONE;
 }
 
-void kbase_pm_policy_term(kbase_device *kbdev)
+void kbase_pm_policy_term(struct kbase_device *kbdev)
 {
        kbdev->pm.pm_current_policy->term(kbdev);
 }
 
-void kbase_pm_cancel_deferred_poweroff(kbase_device *kbdev)
+void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev)
 {
        unsigned long flags;
 
@@ -294,7 +293,7 @@ void kbase_pm_cancel_deferred_poweroff(kbase_device *kbdev)
        spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
 }
 
-void kbase_pm_update_active(kbase_device *kbdev)
+void kbase_pm_update_active(struct kbase_device *kbdev)
 {
        unsigned long flags;
        mali_bool active;
@@ -352,7 +351,7 @@ void kbase_pm_update_active(kbase_device *kbdev)
        }
 }
 
-void kbase_pm_update_cores_state_nolock(kbase_device *kbdev)
+void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
 {
        u64 desired_bitmap;
        mali_bool cores_are_available;
@@ -375,7 +374,6 @@ void kbase_pm_update_cores_state_nolock(kbase_device *kbdev)
        /* Are any cores being powered on? */
        if (~kbdev->pm.desired_shader_state & desired_bitmap ||
            kbdev->pm.ca_in_transition != MALI_FALSE) {
-
                /* Check if we are powering off any cores before updating shader state */
                if (kbdev->pm.desired_shader_state & ~desired_bitmap) {
                        /* Start timer to power off cores */
@@ -409,7 +407,7 @@ void kbase_pm_update_cores_state_nolock(kbase_device *kbdev)
        CSTD_UNUSED(cores_are_available);
 }
 
-void kbase_pm_update_cores_state(kbase_device *kbdev)
+void kbase_pm_update_cores_state(struct kbase_device *kbdev)
 {
        unsigned long flags;
 
@@ -420,7 +418,7 @@ void kbase_pm_update_cores_state(kbase_device *kbdev)
        spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
 }
 
-int kbase_pm_list_policies(const kbase_pm_policy * const **list)
+int kbase_pm_list_policies(const struct kbase_pm_policy * const **list)
 {
        if (!list)
                return POLICY_COUNT;
@@ -432,7 +430,7 @@ int kbase_pm_list_policies(const kbase_pm_policy * const **list)
 
 KBASE_EXPORT_TEST_API(kbase_pm_list_policies)
 
-const kbase_pm_policy *kbase_pm_get_policy(kbase_device *kbdev)
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev)
 {
        KBASE_DEBUG_ASSERT(kbdev != NULL);
 
@@ -441,9 +439,9 @@ const kbase_pm_policy *kbase_pm_get_policy(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbase_pm_get_policy)
 
-void kbase_pm_set_policy(kbase_device *kbdev, const kbase_pm_policy *new_policy)
+void kbase_pm_set_policy(struct kbase_device *kbdev, const struct kbase_pm_policy *new_policy)
 {
-       const kbase_pm_policy *old_policy;
+       const struct kbase_pm_policy *old_policy;
        unsigned long flags;
 
        KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -490,14 +488,14 @@ void kbase_pm_set_policy(kbase_device *kbdev, const kbase_pm_policy *new_policy)
 KBASE_EXPORT_TEST_API(kbase_pm_set_policy)
 
 /** Check whether a state change has finished, and trace it as completed */
-STATIC void kbase_pm_trace_check_and_finish_state_change(kbase_device *kbdev)
+STATIC void kbase_pm_trace_check_and_finish_state_change(struct kbase_device *kbdev)
 {
-       if ((kbdev->shader_available_bitmap & kbdev->pm.desired_shader_state) == kbdev->pm.desired_shader_state
-               && (kbdev->tiler_available_bitmap & kbdev->pm.desired_tiler_state) == kbdev->pm.desired_tiler_state)
+       if ((kbdev->shader_available_bitmap & kbdev->pm.desired_shader_state) == kbdev->pm.desired_shader_state &&
+               (kbdev->tiler_available_bitmap & kbdev->pm.desired_tiler_state) == kbdev->pm.desired_tiler_state)
                kbase_timeline_pm_check_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
 }
 
-void kbase_pm_request_cores(kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores)
+void kbase_pm_request_cores(struct kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores)
 {
        unsigned long flags;
        u64 cores;
@@ -514,8 +512,8 @@ void kbase_pm_request_cores(kbase_device *kbdev, mali_bool tiler_required, u64 s
                u64 bit = 1ULL << bitnum;
 
                /* It should be almost impossible for this to overflow. It would require 2^32 atoms
-                * to request a particular core, which would require 2^24 contexts to submit. This 
-                * would require an amount of memory that is impossible on a 32-bit system and 
+                * to request a particular core, which would require 2^24 contexts to submit. This
+                * would require an amount of memory that is impossible on a 32-bit system and
                 * extremely unlikely on a 64-bit system. */
                int cnt = ++kbdev->shader_needed_cnt[bitnum];
 
@@ -533,7 +531,7 @@ void kbase_pm_request_cores(kbase_device *kbdev, mali_bool tiler_required, u64 s
                KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt != 0);
 
                /* For tiler jobs, we must make sure that core 0 is not turned off if it's already on.
-                * However, it's safe for core 0 to be left off and turned on later whilst a tiler job
+                * However, it's safe for core 0 to be left off and turned on later whilst a tiler job
                 * is running. Hence, we don't need to update the cores state immediately. Also,
                 * attempts to turn off cores will always check the tiler_needed/inuse state first anyway.
                 *
@@ -559,7 +557,7 @@ void kbase_pm_request_cores(kbase_device *kbdev, mali_bool tiler_required, u64 s
 
 KBASE_EXPORT_TEST_API(kbase_pm_request_cores)
 
-void kbase_pm_unrequest_cores(kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores)
+void kbase_pm_unrequest_cores(struct kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores)
 {
        unsigned long flags;
 
@@ -614,7 +612,7 @@ void kbase_pm_unrequest_cores(kbase_device *kbdev, mali_bool tiler_required, u64
 
 KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores)
 
-kbase_pm_cores_ready kbase_pm_register_inuse_cores(kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores)
+enum kbase_pm_cores_ready kbase_pm_register_inuse_cores(struct kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores)
 {
        unsigned long flags;
        u64 prev_shader_needed; /* Just for tracing */
@@ -693,7 +691,7 @@ kbase_pm_cores_ready kbase_pm_register_inuse_cores(kbase_device *kbdev, mali_boo
 
 KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores)
 
-void kbase_pm_release_cores(kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores)
+void kbase_pm_release_cores(struct kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores)
 {
        unsigned long flags;
        kbase_pm_change_state change_gpu_state = 0u;
@@ -756,18 +754,21 @@ void kbase_pm_request_cores_sync(struct kbase_device *kbdev, mali_bool tiler_req
 
 KBASE_EXPORT_TEST_API(kbase_pm_request_cores_sync)
 
-void kbase_pm_request_l2_caches(kbase_device *kbdev)
+void kbase_pm_request_l2_caches(struct kbase_device *kbdev)
 {
        unsigned long flags;
        u32 prior_l2_users_count;
+
        spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
 
        prior_l2_users_count = kbdev->l2_users_count++;
 
        KBASE_DEBUG_ASSERT(kbdev->l2_users_count != 0);
 
-       if (!prior_l2_users_count)
-               kbase_pm_update_cores_state_nolock(kbdev);
+       /* if the GPU is reset while the l2 is on, l2 will be off but prior_l2_users_count will be > 0
+        * l2_available_bitmap will have been set to 0 though by kbase_pm_init_hw */
+       if (!prior_l2_users_count || !kbdev->l2_available_bitmap)
+               kbase_pm_check_transitions_nolock(kbdev);
 
        spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
        wait_event(kbdev->pm.l2_powered_wait, kbdev->pm.l2_powered == 1);
@@ -778,7 +779,19 @@ void kbase_pm_request_l2_caches(kbase_device *kbdev)
 
 KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches)
 
-void kbase_pm_release_l2_caches(kbase_device *kbdev)
+void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+
+       kbdev->l2_users_count++;
+
+       spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+}
+
+KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on)
+
+void kbase_pm_release_l2_caches(struct kbase_device *kbdev)
 {
        unsigned long flags;
        spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
@@ -788,7 +801,7 @@ void kbase_pm_release_l2_caches(kbase_device *kbdev)
        --kbdev->l2_users_count;
 
        if (!kbdev->l2_users_count) {
-               kbase_pm_update_cores_state_nolock(kbdev);
+               kbase_pm_check_transitions_nolock(kbdev);
                /* Trace that any state change completed immediately */
                kbase_pm_trace_check_and_finish_state_change(kbdev);
        }
@@ -797,4 +810,4 @@ void kbase_pm_release_l2_caches(kbase_device *kbdev)
 }
 
 KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches)
-
+#endif /* KBASE_PM_EN */
index 007cdde946052b361c0287a36f654cb37d791200..4a8cc44b6308fc6cedc78e96d071b00eea0c7e1c 100755 (executable)
 #define _KBASE_PM_POLICY_H_
 
 /** List of policy IDs */
-typedef enum kbase_pm_policy_id {
+enum kbase_pm_policy_id {
        KBASE_PM_POLICY_ID_DEMAND = 1,
        KBASE_PM_POLICY_ID_ALWAYS_ON,
        KBASE_PM_POLICY_ID_COARSE_DEMAND,
-#if MALI_CUSTOMER_RELEASE == 0
+#if !MALI_CUSTOMER_RELEASE
        KBASE_PM_POLICY_ID_DEMAND_ALWAYS_POWERED,
        KBASE_PM_POLICY_ID_FAST_START
 #endif
-} kbase_pm_policy_id;
+};
 
 typedef u32 kbase_pm_policy_flags;
 
@@ -54,13 +54,13 @@ typedef struct kbase_pm_policy {
         *
         * @param kbdev     The kbase device structure for the device (must be a valid pointer)
         */
-       void (*init) (struct kbase_device *kbdev);
+       void (*init)(struct kbase_device *kbdev);
 
        /** Function called when the policy is unselected.
         *
         * @param kbdev     The kbase device structure for the device (must be a valid pointer)
         */
-       void (*term) (struct kbase_device *kbdev);
+       void (*term)(struct kbase_device *kbdev);
 
        /** Function called to get the current shader core mask
         *
@@ -69,7 +69,7 @@ typedef struct kbase_pm_policy {
         * @param kbdev     The kbase device structure for the device (must be a valid pointer)
         *
         * @return     The mask of shader cores to be powered */
-       u64 (*get_core_mask) (struct kbase_device *kbdev);
+       u64 (*get_core_mask)(struct kbase_device *kbdev);
 
        /** Function called to get the current overall GPU power state
         *
@@ -88,7 +88,7 @@ typedef struct kbase_pm_policy {
        /** Field indicating an ID for this policy. This is not necessarily the
         * same as its index in the list returned by kbase_pm_list_policies().
         * It is used purely for debugging. */
-       kbase_pm_policy_id id;
+       enum kbase_pm_policy_id id;
 } kbase_pm_policy;
 
 /** Initialize power policy framework
@@ -128,14 +128,14 @@ void kbase_pm_update_cores(struct kbase_device *kbdev);
  *
  * @return The current policy
  */
-const kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev);
+const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev);
 
 /** Change the policy to the one specified.
  *
  * @param kbdev     The kbase device structure for the device (must be a valid pointer)
  * @param policy    The policy to change to (valid pointer returned from @ref kbase_pm_list_policies)
  */
-void kbase_pm_set_policy(struct kbase_device *kbdev, const kbase_pm_policy *policy);
+void kbase_pm_set_policy(struct kbase_device *kbdev, const struct kbase_pm_policy *policy);
 
 /** Retrieve a static list of the available policies.
  * @param[out]  policies    An array pointer to take the list of policies. This may be NULL.
@@ -143,14 +143,14 @@ void kbase_pm_set_policy(struct kbase_device *kbdev, const kbase_pm_policy *poli
  *
  * @return The number of policies
  */
-int kbase_pm_list_policies(const kbase_pm_policy * const **policies);
+int kbase_pm_list_policies(const struct kbase_pm_policy * const **policies);
 
 
-typedef enum kbase_pm_cores_ready {
+enum kbase_pm_cores_ready {
        KBASE_CORES_NOT_READY = 0,
        KBASE_NEW_AFFINITY = 1,
        KBASE_CORES_READY = 2
-} kbase_pm_cores_ready;
+};
 
 
 /** Synchronous variant of kbase_pm_request_cores()
@@ -221,7 +221,7 @@ void kbase_pm_unrequest_cores(struct kbase_device *kbdev, mali_bool tiler_requir
  *
  * @return MALI_TRUE if the job can be submitted to the hardware or MALI_FALSE if the job is not ready to run.
  */
-mali_bool kbase_pm_register_inuse_cores(struct kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores);
+enum kbase_pm_cores_ready kbase_pm_register_inuse_cores(struct kbase_device *kbdev, mali_bool tiler_required, u64 shader_cores);
 
 /** Release cores after a job has run.
  *
@@ -252,6 +252,14 @@ void kbase_pm_release_cores(struct kbase_device *kbdev, mali_bool tiler_required
  */
 void kbase_pm_request_l2_caches(struct kbase_device *kbdev);
 
+/** Increment the count of l2 users but do not attempt to power on the l2
+ *  It is the callers responsibility to ensure that the l2 is already powered up
+ *  and to eventually  call @ref kbase_pm_release_l2_caches 
+ *
+ * @param kbdev    The kbase device structure for the device (must be a valid pointer)
+ */
+void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev);
+
 /** Release the use of l2 caches for all core groups and allow the power manager to
  *  power them down when necessary.
  *
diff --git a/drivers/gpu/arm/midgard/mali_kbase_power_actor.c b/drivers/gpu/arm/midgard/mali_kbase_power_actor.c
new file mode 100755 (executable)
index 0000000..5d07da2
--- /dev/null
@@ -0,0 +1,268 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#include <mali_kbase.h>
+#include <mali_kbase_config_defaults.h>
+
+#include <linux/devfreq_cooling.h>
+#include <linux/power_actor.h>
+#include <linux/thermal.h>
+
+#include "mali_kbase_power_actor.h"
+
+
+static u32 mali_pa_get_req_power(struct power_actor *actor, struct thermal_zone_device *zone)
+{
+       struct mali_power_actor *mali_actor = actor->data;
+       struct kbase_device *kbdev = mali_actor->kbdev;
+       struct devfreq_dev_status stat;
+       unsigned long power, temperature;
+       int err;
+       struct dev_pm_opp *opp;
+       unsigned long voltage;
+       unsigned long freq;
+
+
+       err = kbdev->devfreq->profile->get_dev_status(kbdev->dev, &stat);
+       if (err) {
+               dev_err(kbdev->dev, "Failed to get devfreq status (%d)\n", err);
+               return 0;
+       }
+
+       freq = stat.current_frequency;
+
+       rcu_read_lock();
+       opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
+       if (IS_ERR_OR_NULL(opp)) {
+               rcu_read_unlock();
+               return 0;
+       }
+
+       voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
+       rcu_read_unlock();
+
+       power = mali_actor->ops->get_dynamic_power(freq);
+       power = (power * stat.busy_time) / stat.total_time;
+
+       temperature = zone->temperature;
+
+       /* Assume all cores are always powered */
+       power += mali_actor->ops->get_static_power(voltage, temperature);
+
+       dev_dbg(kbdev->dev, "get req power = %lu\n", power);
+
+       return (u32)power;
+}
+
+static u32 mali_pa_get_max_power(struct power_actor *actor, struct thermal_zone_device *zone)
+{
+       struct mali_power_actor *mali_actor = actor->data;
+       struct kbase_device *kbdev = mali_actor->kbdev;
+       struct dev_pm_opp *opp;
+       unsigned long voltage, temperature;
+       unsigned long freq = ULONG_MAX;
+       u32 power;
+
+       rcu_read_lock();
+       opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
+       if (IS_ERR_OR_NULL(opp)) {
+               rcu_read_unlock();
+               dev_err(kbdev->dev, "Failed to get OPP for max freq\n");
+               return 0;
+       }
+       voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
+       rcu_read_unlock();
+
+       temperature = zone->temperature;
+
+       power = mali_actor->ops->get_static_power(voltage, temperature)
+                       + mali_actor->ops->get_dynamic_power(freq);
+
+       dev_dbg(kbdev->dev, "get max power = %u\n", power);
+
+       return power;
+}
+
+static int mali_pa_set_power(struct power_actor *actor, struct thermal_zone_device *zone, u32 power)
+{
+       struct mali_power_actor *mali_actor = actor->data;
+       struct kbase_device *kbdev = mali_actor->kbdev;
+       struct thermal_cooling_device *cdev;
+       struct devfreq_dev_status stat;
+       unsigned long freq, state;
+       unsigned long static_power, normalized_power;
+       unsigned long voltage, temperature;
+       struct dev_pm_opp *opp;
+       int err, i;
+
+       dev_dbg(kbdev->dev, "Setting max power %u\n", power);
+
+       err = kbdev->devfreq->profile->get_dev_status(kbdev->dev, &stat);
+       if (err) {
+               dev_err(kbdev->dev, "Failed to get devfreq status (%d)\n", err);
+               return err;
+       }
+
+       freq = stat.current_frequency;
+
+       rcu_read_lock();
+       opp = dev_pm_opp_find_freq_exact(kbdev->dev, freq, true);
+       if (IS_ERR_OR_NULL(opp)) {
+               rcu_read_unlock();
+               return -ENOENT;
+       }
+       voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
+       rcu_read_unlock();
+
+       temperature = zone->temperature;
+
+       static_power = mali_actor->ops->get_static_power(voltage, temperature);
+
+       if (power < static_power) {
+               normalized_power = 0;
+       } else {
+               unsigned long dyn_power = power - static_power;
+
+               if (!stat.busy_time)
+                       normalized_power = dyn_power;
+               else
+                       normalized_power = (dyn_power * stat.total_time) / stat.busy_time;
+       }
+
+       /* Find target frequency. Use the lowest OPP if allocated power is too
+        * low. */
+       freq = mali_actor->dyn_table[0].freq;
+       for (i = 1; i < mali_actor->dyn_table_count; i++) {
+               if (mali_actor->dyn_table[i].power > normalized_power)
+                       break;
+               else
+                       freq = mali_actor->dyn_table[i].freq;
+       }
+
+       state = devfreq_cooling_get_level(kbdev->devfreq, freq);
+       if (state == THERMAL_CSTATE_INVALID) {
+               dev_err(kbdev->dev,
+                       "Failed to lookup cooling level for freq %ld\n", freq);
+               return -EINVAL;
+       }
+
+       cdev = kbdev->devfreq_cooling->cdev;
+       err = cdev->ops->set_cur_state(cdev, state);
+
+       dev_dbg(kbdev->dev,
+               "Max power set to %u using frequency %ld (cooling level %ld) (%d)\n",
+               power, freq, state, err);
+
+       return err;
+}
+
+static struct power_actor_ops mali_pa_ops = {
+       .get_req_power = mali_pa_get_req_power,
+       .get_max_power = mali_pa_get_max_power,
+       .set_power = mali_pa_set_power,
+};
+
+int mali_pa_init(struct kbase_device *kbdev)
+{
+       struct power_actor *actor;
+       struct mali_power_actor *mali_actor;
+       struct mali_pa_model_ops *callbacks;
+       struct mali_pa_power_table *table;
+       unsigned long freq;
+       int i, num_opps;
+
+       callbacks = (void *)kbasep_get_config_value(kbdev, kbdev->config_attributes,
+                               KBASE_CONFIG_ATTR_POWER_MODEL_CALLBACKS);
+       if (!callbacks)
+               return -ENODEV;
+
+       mali_actor = kzalloc(sizeof(*mali_actor), GFP_KERNEL);
+       if (!mali_actor)
+               return -ENOMEM;
+
+       mali_actor->ops = callbacks;
+       mali_actor->kbdev = kbdev;
+
+       rcu_read_lock();
+       num_opps = dev_pm_opp_get_opp_count(kbdev->dev);
+       rcu_read_unlock();
+
+       table = kzalloc(num_opps, sizeof(table[0]), GFP_KERNEL);
+       if (!table) {
+               kfree(mali_actor);
+               return -ENOMEM;
+       }
+
+       rcu_read_lock();
+       for (i = 0, freq = 0; i < num_opps; i++, freq++) {
+               unsigned long power_static, power_dyn, voltage;
+               struct dev_pm_opp *opp;
+
+               opp = dev_pm_opp_find_freq_ceil(kbdev->dev, &freq);
+               if (IS_ERR(opp))
+                       break;
+
+               voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
+
+               table[i].freq = freq;
+
+               power_dyn = callbacks->get_dynamic_power(freq);
+               power_static = callbacks->get_static_power(voltage, 85000);
+
+               dev_info(kbdev->dev, "Power table: %lu MHz @ %lu mV: %lu + %lu = %lu mW\n",
+                               freq / 1000000, voltage,
+                               power_dyn, power_static, power_dyn + power_static);
+
+               table[i].power = power_dyn;
+       }
+       rcu_read_unlock();
+
+       if (i != num_opps)
+               dev_warn(kbdev->dev, "Power actor: Unable to enumerate all OPPs (%d != %d)\n",
+                               i, num_opps);
+
+       mali_actor->dyn_table = table;
+       mali_actor->dyn_table_count = i;
+
+       actor = power_actor_register(&mali_pa_ops, mali_actor);
+       if (IS_ERR_OR_NULL(actor)) {
+               kfree(mali_actor->dyn_table);
+               kfree(mali_actor);
+               return PTR_ERR(actor);
+       }
+
+       kbdev->power_actor = actor;
+
+       dev_info(kbdev->dev, "Initalized power actor\n");
+
+       return 0;
+}
+
+void mali_pa_term(struct kbase_device *kbdev)
+{
+       struct mali_power_actor *mali_actor;
+
+       if (kbdev->power_actor) {
+               mali_actor = kbdev->power_actor->data;
+
+               power_actor_unregister(kbdev->power_actor);
+               kbdev->power_actor = NULL;
+
+               kfree(mali_actor->dyn_table);
+               kfree(mali_actor);
+       }
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_power_actor.h b/drivers/gpu/arm/midgard/mali_kbase_power_actor.h
new file mode 100755 (executable)
index 0000000..aea13b2
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+
+#ifndef _KBASE_POWER_ACTOR_H_
+#define _KBASE_POWER_ACTOR_H_
+
+#include <mali_kbase.h>
+
+#include <linux/pm_opp.h>
+
+struct mali_pa_model_ops {
+       unsigned long (*get_static_power)(unsigned long voltage, unsigned long temperature);
+       unsigned long (*get_dynamic_power)(unsigned long freq);
+};
+
+struct mali_pa_power_table {
+       unsigned long freq;
+       unsigned long power;
+};
+
+struct mali_power_actor {
+       struct kbase_device *kbdev;
+       struct mali_pa_model_ops *ops;
+       struct mali_pa_power_table *dyn_table;
+       int dyn_table_count;
+};
+
+int mali_pa_init(struct kbase_device *kbdev);
+void mali_pa_term(struct kbase_device *kbdev);
+
+
+#endif
index 1f4ac3cf2395f3a0599c0be4b60f3f0f487fd078..004b2e8e38a469f71bccf498fc7e8aad057a0ad2 100755 (executable)
  * Replay soft job handlers
  */
 
+#include <linux/dma-mapping.h>
 #include <mali_kbase_config.h>
 #include <mali_kbase.h>
 #include <mali_kbase_mem.h>
-#include <mali_kbase_debug.h>
 
 #define JOB_NOT_STARTED 0
-
 #define JOB_TYPE_MASK      0xfe
 #define JOB_TYPE_NULL      (1 << 1)
 #define JOB_TYPE_VERTEX    (5 << 1)
 #define JOB_FLAG_PERFORM_JOB_BARRIER (1 << 8)
 
 #define JOB_HEADER_32_FBD_OFFSET (31*4)
+#define JOB_HEADER_64_FBD_OFFSET (44*4)
 
 #define FBD_POINTER_MASK (~0x3f)
 
 #define SFBD_TILER_OFFSET (48*4)
 
-#define MFBD_TILER_FLAGS_OFFSET (15*4)
-#define MFBD_TILER_OFFSET       (16*4)
+#define MFBD_TILER_OFFSET       (14*4)
 
 #define FBD_HIERARCHY_WEIGHTS 8
 #define FBD_HIERARCHY_MASK_MASK 0x1fff
 
 #define JOB_HEADER_ID_MAX                 0xffff
 
-typedef struct job_head
-{
+#define JOB_SOURCE_ID(status)          (((status) >> 16) & 0xFFFF)
+#define JOB_POLYGON_LIST               (0x03)
+
+struct job_head {
        u32 status;
        u32 not_complete_index;
        u64 fault_addr;
        u16 flags;
        u16 index;
        u16 dependencies[2];
-       union
-       {
+       union {
                u64 _64;
                u32 _32;
        } next;
        u32 x[2];
-       union
-       {
+       union {
                u64 _64;
                u32 _32;
        } fragment_fbd;
-} job_head;
+};
 
-static void dump_job_head(kbase_context *kctx, char *head_str, job_head *job)
+static void dump_job_head(struct kbase_context *kctx, char *head_str,
+               struct job_head *job)
 {
 #ifdef CONFIG_MALI_DEBUG
-       struct device *dev = kctx->kbdev->dev;
-
-       dev_dbg(dev, "%s\n", head_str);
-       dev_dbg(dev, "addr               = %p\n"
-                                       "status             = %x\n"
-                                       "not_complete_index = %x\n"
-                                       "fault_addr         = %llx\n"
-                                       "flags              = %x\n"
-                                       "index              = %x\n"
-                                       "dependencies       = %x,%x\n",
-                                                                          job,
-                                                                  job->status,
-                                                      job->not_complete_index,
-                                                              job->fault_addr,
-                                                                   job->flags,
-                                                                   job->index,
-                                                         job->dependencies[0],
-                                                        job->dependencies[1]);
+       dev_dbg(kctx->kbdev->dev, "%s\n", head_str);
+       dev_dbg(kctx->kbdev->dev, "addr               = %p\n"
+                       "status             = %x\n"
+                       "not_complete_index = %x\n"
+                       "fault_addr         = %llx\n"
+                       "flags              = %x\n"
+                       "index              = %x\n"
+                       "dependencies       = %x,%x\n",
+                       job, job->status, job->not_complete_index,
+                       job->fault_addr, job->flags, job->index,
+                       job->dependencies[0],
+                       job->dependencies[1]);
 
        if (job->flags & JOB_FLAG_DESC_SIZE)
-               dev_dbg(dev, "next               = %llx\n", job->next._64);
+               dev_dbg(kctx->kbdev->dev, "next               = %llx\n",
+                               job->next._64);
        else
-               dev_dbg(dev, "next               = %x\n", job->next._32);
+               dev_dbg(kctx->kbdev->dev, "next               = %x\n",
+                               job->next._32);
 #endif
 }
 
-
-static void *kbasep_map_page(kbase_context *kctx, mali_addr64 gpu_addr,
-                                                               u64 *phys_addr)
+struct kbasep_map_struct {
+       mali_addr64 gpu_addr;
+       struct kbase_mem_phy_alloc *alloc;
+       struct page **pages;
+       void *addr;
+       size_t size;
+       mali_bool is_cached;
+};
+
+static void *kbasep_map(struct kbase_context *kctx, mali_addr64 gpu_addr,
+               size_t size, struct kbasep_map_struct *map)
 {
-       void *cpu_addr = NULL;
-       u64 page_index;
-       kbase_va_region *region;
+       struct kbase_va_region *region;
+       unsigned long page_index;
+       unsigned int offset = gpu_addr & ~PAGE_MASK;
+       size_t page_count = PFN_UP(offset + size);
        phys_addr_t *page_array;
+       struct page **pages;
+       void *cpu_addr = NULL;
+       pgprot_t prot;
+       size_t i;
+
+       if (!size || !map)
+               return NULL;
 
-       region = kbase_region_tracker_find_region_enclosing_address(kctx,
-                                                                    gpu_addr);
+       /* check if page_count calculation will wrap */
+       if (size > ((size_t)-1 / PAGE_SIZE))
+               return NULL;
+
+       region = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
        if (!region || (region->flags & KBASE_REG_FREE))
                return NULL;
 
        page_index = (gpu_addr >> PAGE_SHIFT) - region->start_pfn;
-       if (page_index >= kbase_reg_current_backed_size(region))
+
+       /* check if page_index + page_count will wrap */
+       if (-1UL - page_count < page_index)
+               return NULL;
+
+       if (page_index + page_count > kbase_reg_current_backed_size(region))
                return NULL;
 
        page_array = kbase_get_phy_pages(region);
        if (!page_array)
                return NULL;
 
-       cpu_addr = kmap_atomic(pfn_to_page(PFN_DOWN(page_array[page_index])));
-       if (!cpu_addr)
+       pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+       if (!pages)
                return NULL;
 
-       if (phys_addr)
-               *phys_addr = page_array[page_index];
-
-       return cpu_addr + (gpu_addr & ~PAGE_MASK);
-}
+       for (i = 0; i < page_count; i++)
+               pages[i] = pfn_to_page(PFN_DOWN(page_array[page_index + i]));
 
-static void *kbasep_map_page_sync(kbase_context *kctx, mali_addr64 gpu_addr,
-                                                               u64 *phys_addr)
-{
-       void *cpu_addr = kbasep_map_page(kctx, gpu_addr, phys_addr);
+       prot = PAGE_KERNEL;
+       if (!(region->flags & KBASE_REG_CPU_CACHED)) {
+               /* Map uncached */
+               prot = pgprot_writecombine(prot);
+       }
 
+       cpu_addr = vmap(pages, page_count, VM_MAP, prot);
        if (!cpu_addr)
-               return NULL;
+               goto vmap_failed;
+
+       map->gpu_addr = gpu_addr;
+       map->alloc = kbase_mem_phy_alloc_get(region->alloc);
+       map->pages = pages;
+       map->addr = (void *)((uintptr_t)cpu_addr + offset);
+       map->size = size;
+       map->is_cached = (region->flags & KBASE_REG_CPU_CACHED) != 0;
+
+       if (map->is_cached) {
+               /* Sync first page */
+               size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+               phys_addr_t pa = page_to_phys(map->pages[0]) + offset;
+
+               kbase_sync_single(kctx, pa, sz, dma_sync_single_for_cpu);
+
+               /* Sync middle pages (if any) */
+               for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+                       pa = page_to_phys(map->pages[i]);
+                       kbase_sync_single(kctx, pa, PAGE_SIZE,
+                                       dma_sync_single_for_cpu);
+               }
+
+               /* Sync last page (if any) */
+               if (page_count > 1) {
+                       pa = page_to_phys(map->pages[page_count - 1]);
+                       sz = ((offset + size - 1) & ~PAGE_MASK) + 1;
+                       kbase_sync_single(kctx, pa, sz,
+                                       dma_sync_single_for_cpu);
+               }
+       }
 
-       kbase_sync_to_cpu(*phys_addr,
-                                (void *)((uintptr_t)cpu_addr & PAGE_MASK),
-                                                                   PAGE_SIZE);
+       return map->addr;
 
-       return cpu_addr;
-}
+vmap_failed:
+       kfree(pages);
 
-static void kbasep_unmap_page(void *cpu_addr)
-{
-       kunmap_atomic((void *)((uintptr_t)cpu_addr & PAGE_MASK));
+       return NULL;
 }
 
-static void kbasep_unmap_page_sync(void *cpu_addr, u64 phys_addr)
+static void kbasep_unmap(struct kbase_context *kctx,
+               struct kbasep_map_struct *map)
 {
-       kbase_sync_to_memory(phys_addr,
-                                (void *)((uintptr_t)cpu_addr & PAGE_MASK),
-                                                                   PAGE_SIZE);
+       void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK);
 
-       kunmap_atomic((void *)((uintptr_t)cpu_addr & PAGE_MASK));
+       vunmap(addr);
+
+       if (map->is_cached) {
+               off_t offset = (uintptr_t)map->addr & ~PAGE_MASK;
+               size_t size = map->size;
+               size_t page_count = PFN_UP(offset + size);
+               size_t i;
+
+               /* Sync first page */
+               size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+               phys_addr_t pa = page_to_phys(map->pages[0]) + offset;
+
+               kbase_sync_single(kctx, pa, sz, dma_sync_single_for_device);
+
+               /* Sync middle pages (if any) */
+               for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+                       pa = page_to_phys(map->pages[i]);
+                       kbase_sync_single(kctx, pa, PAGE_SIZE,
+                                       dma_sync_single_for_device);
+               }
+
+               /* Sync last page (if any) */
+               if (page_count > 1) {
+                       pa = page_to_phys(map->pages[page_count - 1]);
+                       sz = ((offset + size - 1) & ~PAGE_MASK) + 1;
+                       kbase_sync_single(kctx, pa, sz,
+                                       dma_sync_single_for_device);
+               }
+       }
+
+       kfree(map->pages);
+
+       map->gpu_addr = 0;
+       map->alloc = kbase_mem_phy_alloc_put(map->alloc);
+       map->pages = NULL;
+       map->addr = NULL;
+       map->size = 0;
+       map->is_cached = MALI_FALSE;
 }
 
-static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
-                                          mali_addr64 fbd_address,
-                                          mali_addr64 tiler_heap_free,
-                                          u16 hierarchy_mask,
-                                          u32 default_weight)
+static mali_error kbasep_replay_reset_sfbd(struct kbase_context *kctx,
+               mali_addr64 fbd_address, mali_addr64 tiler_heap_free,
+               u16 hierarchy_mask, u32 default_weight)
 {
-       u64 phys_addr;
-       struct
-       {
+       struct {
                u32 padding_1[1];
                u32 flags;
                u64 padding_2[2];
@@ -182,22 +259,23 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
                u32 padding[8];
                u32 weights[FBD_HIERARCHY_WEIGHTS];
        } *fbd_tiler;
-       struct device *dev = kctx->kbdev->dev;
+       struct kbasep_map_struct map;
 
-       dev_dbg(dev, "fbd_address: %llx\n", fbd_address);
+       dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
 
-       fbd_tiler = kbasep_map_page_sync(kctx, fbd_address + SFBD_TILER_OFFSET,
-                                                                  &phys_addr);
+       fbd_tiler = kbasep_map(kctx, fbd_address + SFBD_TILER_OFFSET,
+                       sizeof(*fbd_tiler), &map);
        if (!fbd_tiler) {
-               dev_err(dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
+               dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
                return MALI_ERROR_FUNCTION_FAILED;
        }
+
 #ifdef CONFIG_MALI_DEBUG
-       dev_dbg(dev, "FBD tiler:\n"
-                               "flags = %x\n"
-                               "heap_free_address = %llx\n",
-                                                             fbd_tiler->flags,
-                                                fbd_tiler->heap_free_address);
+       dev_dbg(kctx->kbdev->dev,
+               "FBD tiler:\n"
+               "flags = %x\n"
+               "heap_free_address = %llx\n",
+               fbd_tiler->flags, fbd_tiler->heap_free_address);
 #endif
        if (hierarchy_mask) {
                u32 weights[HIERARCHY_WEIGHTS];
@@ -215,12 +293,12 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
                }
 
 
-               dev_dbg(dev,
-                             "Old hierarchy mask=%x  New hierarchy mask=%x\n",
-                                          old_hierarchy_mask, hierarchy_mask);
+               dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x  New hierarchy mask=%x\n",
+                               old_hierarchy_mask, hierarchy_mask);
+
                for (i = 0; i < HIERARCHY_WEIGHTS; i++)
-                       dev_dbg(dev, " Hierarchy weight %02d: %08x\n",
-                                                               i, weights[i]);
+                       dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+                                       i, weights[i]);
 
                j = 0;
 
@@ -228,9 +306,8 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
                        if (hierarchy_mask & (1 << i)) {
                                KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
 
-                               dev_dbg(dev,
-                               " Writing hierarchy level %02d (%08x) to %d\n",
-                                                            i, weights[i], j);
+                               dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n",
+                                               i, weights[i], j);
 
                                fbd_tiler->weights[j++] = weights[i];
                        }
@@ -244,67 +321,48 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
 
        fbd_tiler->heap_free_address = tiler_heap_free;
 
-       dev_dbg(dev, "heap_free_address=%llx flags=%x\n",
-                              fbd_tiler->heap_free_address, fbd_tiler->flags);
+       dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n",
+                       fbd_tiler->heap_free_address, fbd_tiler->flags);
 
-       kbasep_unmap_page_sync(fbd_tiler, phys_addr);
+       kbasep_unmap(kctx, &map);
 
        return MALI_ERROR_NONE;
 }
 
-static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
-                                          mali_addr64 fbd_address,
-                                          mali_addr64 tiler_heap_free,
-                                          u16 hierarchy_mask,
-                                          u32 default_weight)
+static mali_error kbasep_replay_reset_mfbd(struct kbase_context *kctx,
+               mali_addr64 fbd_address, mali_addr64 tiler_heap_free,
+               u16 hierarchy_mask, u32 default_weight)
 {
-       u64 phys_addr, phys_addr_flags;
-       struct
-       {
+       struct kbasep_map_struct map;
+       struct {
+               u32 padding_0;
+               u32 flags;
                u64 padding_1[2];
                u64 heap_free_address;
                u64 padding_2;
                u32 weights[FBD_HIERARCHY_WEIGHTS];
        } *fbd_tiler;
-       u32 *fbd_tiler_flags;
-       mali_bool flags_different_page;
-       struct device *dev = kctx->kbdev->dev;
 
-       dev_dbg(dev, "fbd_address: %llx\n", fbd_address);
-
-       fbd_tiler = kbasep_map_page_sync(kctx, fbd_address + MFBD_TILER_OFFSET,
-                                                                  &phys_addr);
-       if (((fbd_address + MFBD_TILER_OFFSET) & PAGE_MASK) !=
-           ((fbd_address + MFBD_TILER_FLAGS_OFFSET) & PAGE_MASK)) {
-               flags_different_page = MALI_TRUE;
-               fbd_tiler_flags = kbasep_map_page_sync(kctx,
-                                        fbd_address + MFBD_TILER_FLAGS_OFFSET,
-                                                            &phys_addr_flags);
-       } else {
-               flags_different_page = MALI_FALSE;
-               fbd_tiler_flags = (u32 *)((uintptr_t)fbd_tiler -
-                                 MFBD_TILER_OFFSET + MFBD_TILER_FLAGS_OFFSET);
-       }
-
-       if (!fbd_tiler || !fbd_tiler_flags) {
-               dev_err(dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
-
-               if (fbd_tiler_flags && flags_different_page)
-                       kbasep_unmap_page_sync(fbd_tiler_flags,
-                                                             phys_addr_flags);
-               if (fbd_tiler)
-                       kbasep_unmap_page_sync(fbd_tiler, phys_addr);
+       dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
 
+       fbd_tiler = kbasep_map(kctx, fbd_address + MFBD_TILER_OFFSET,
+                       sizeof(*fbd_tiler), &map);
+       if (!fbd_tiler) {
+               dev_err(kctx->kbdev->dev,
+                              "kbasep_replay_reset_fbd: failed to map fbd\n");
                return MALI_ERROR_FUNCTION_FAILED;
        }
+
 #ifdef CONFIG_MALI_DEBUG
-       dev_dbg(dev, "FBD tiler:\n"
-                               "heap_free_address = %llx\n",
-                                fbd_tiler->heap_free_address);
+       dev_dbg(kctx->kbdev->dev, "FBD tiler:\n"
+                       "flags = %x\n"
+                       "heap_free_address = %llx\n",
+                       fbd_tiler->flags,
+                       fbd_tiler->heap_free_address);
 #endif
        if (hierarchy_mask) {
                u32 weights[HIERARCHY_WEIGHTS];
-               u16 old_hierarchy_mask = (*fbd_tiler_flags) &
+               u16 old_hierarchy_mask = (fbd_tiler->flags) &
                                                       FBD_HIERARCHY_MASK_MASK;
                int i, j = 0;
 
@@ -312,18 +370,18 @@ static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
                        if (old_hierarchy_mask & (1 << i)) {
                                KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
                                weights[i] = fbd_tiler->weights[j++];
-                       }
-                       else
+                       } else {
                                weights[i] = default_weight;
+                       }
                }
 
 
-               dev_dbg(dev,
-                             "Old hierarchy mask=%x  New hierarchy mask=%x\n",
-                                          old_hierarchy_mask, hierarchy_mask);
+               dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x  New hierarchy mask=%x\n",
+                               old_hierarchy_mask, hierarchy_mask);
+
                for (i = 0; i < HIERARCHY_WEIGHTS; i++)
-                       dev_dbg(dev, " Hierarchy weight %02d: %08x\n",
-                                                               i, weights[i]);
+                       dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
+                                       i, weights[i]);
 
                j = 0;
 
@@ -331,7 +389,7 @@ static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
                        if (hierarchy_mask & (1 << i)) {
                                KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
 
-                               dev_dbg(dev,
+                               dev_dbg(kctx->kbdev->dev,
                                " Writing hierarchy level %02d (%08x) to %d\n",
                                                             i, weights[i], j);
 
@@ -342,15 +400,12 @@ static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
                for (; j < FBD_HIERARCHY_WEIGHTS; j++)
                        fbd_tiler->weights[j] = 0;
 
-               *fbd_tiler_flags = hierarchy_mask | (1 << 16);
+               fbd_tiler->flags = hierarchy_mask | (1 << 16);
        }
 
        fbd_tiler->heap_free_address = tiler_heap_free;
 
-       if (flags_different_page)
-               kbasep_unmap_page_sync(fbd_tiler_flags, phys_addr_flags);
-
-       kbasep_unmap_page_sync(fbd_tiler, phys_addr);
+       kbasep_unmap(kctx, &map);
 
        return MALI_ERROR_NONE;
 }
@@ -373,34 +428,43 @@ static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
  *
  * @return MALI_ERROR_NONE on success, error code on failure
  */
-static mali_error kbasep_replay_reset_tiler_job(kbase_context *kctx,
-                                               mali_addr64 job_header,
-                                               mali_addr64 tiler_heap_free,
-                                               u16 hierarchy_mask,
-                                               u32 default_weight,
-                                               mali_bool job_64)
+static mali_error kbasep_replay_reset_tiler_job(struct kbase_context *kctx,
+               mali_addr64 job_header, mali_addr64 tiler_heap_free,
+               u16 hierarchy_mask, u32 default_weight, mali_bool job_64)
 {
+       struct kbasep_map_struct map;
        mali_addr64 fbd_address;
 
        if (job_64) {
-               dev_err(kctx->kbdev->dev,
-                                     "64-bit job descriptor not supported\n");
-               return MALI_ERROR_FUNCTION_FAILED;
+               u64 *job_ext;
+
+               job_ext = kbasep_map(kctx,
+                               job_header + JOB_HEADER_64_FBD_OFFSET,
+                               sizeof(*job_ext), &map);
+
+               if (!job_ext) {
+                       dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
+                       return MALI_ERROR_FUNCTION_FAILED;
+               }
+
+               fbd_address = *job_ext;
+
+               kbasep_unmap(kctx, &map);
        } else {
-               u32 *job_ext;   
+               u32 *job_ext;
+
+               job_ext = kbasep_map(kctx,
+                               job_header + JOB_HEADER_32_FBD_OFFSET,
+                               sizeof(*job_ext), &map);
 
-               job_ext = kbasep_map_page(kctx,
-                                        job_header + JOB_HEADER_32_FBD_OFFSET,
-                                                                        NULL);
                if (!job_ext) {
-                       dev_err(kctx->kbdev->dev,
-                         "kbasep_replay_reset_tiler_job: failed to map jc\n");
+                       dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
                        return MALI_ERROR_FUNCTION_FAILED;
                }
 
                fbd_address = *job_ext;
 
-               kbasep_unmap_page(job_ext);
+               kbasep_unmap(kctx, &map);
        }
 
        if (fbd_address & FBD_TYPE) {
@@ -447,33 +511,28 @@ static mali_error kbasep_replay_reset_tiler_job(kbase_context *kctx,
  *
  * @return MALI_ERROR_NONE on success, error code on failure
  */
-static mali_error kbasep_replay_reset_job(kbase_context *kctx,
-                                               mali_addr64 *job_header,
-                                               mali_addr64 prev_jc,
-                                               mali_addr64 tiler_heap_free,
-                                               u16 hierarchy_mask,
-                                               u32 default_weight,
-                                               u16 hw_job_id_offset,
-                                               mali_bool first_in_chain,
-                                               mali_bool fragment_chain)
+static mali_error kbasep_replay_reset_job(struct kbase_context *kctx,
+               mali_addr64 *job_header, mali_addr64 prev_jc,
+               mali_addr64 tiler_heap_free, u16 hierarchy_mask,
+               u32 default_weight, u16 hw_job_id_offset,
+               mali_bool first_in_chain, mali_bool fragment_chain)
 {
-       job_head *job;
-       u64 phys_addr;
+       struct job_head *job;
        mali_addr64 new_job_header;
-       struct device *dev = kctx->kbdev->dev;
+       struct kbasep_map_struct map;
 
-       job = kbasep_map_page_sync(kctx, *job_header, &phys_addr);
+       job = kbasep_map(kctx, *job_header, sizeof(*job), &map);
        if (!job) {
-               dev_err(dev, "kbasep_replay_parse_jc: failed to map jc\n");
+               dev_err(kctx->kbdev->dev,
+                                "kbasep_replay_parse_jc: failed to map jc\n");
                return MALI_ERROR_FUNCTION_FAILED;
        }
 
        dump_job_head(kctx, "Job header:", job);
 
        if (job->status == JOB_NOT_STARTED && !fragment_chain) {
-               dev_err(dev, "Job already not started\n");
-               kbasep_unmap_page_sync(job, phys_addr);
-               return MALI_ERROR_FUNCTION_FAILED;
+               dev_err(kctx->kbdev->dev, "Job already not started\n");
+               goto out_unmap;
        }
        job->status = JOB_NOT_STARTED;
 
@@ -481,9 +540,8 @@ static mali_error kbasep_replay_reset_job(kbase_context *kctx,
                job->flags = (job->flags & ~JOB_TYPE_MASK) | JOB_TYPE_NULL;
 
        if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_FUSED) {
-               dev_err(dev, "Fused jobs can not be replayed\n");
-               kbasep_unmap_page_sync(job, phys_addr);
-               return MALI_ERROR_FUNCTION_FAILED;
+               dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n");
+               goto out_unmap;
        }
 
        if (first_in_chain)
@@ -492,9 +550,9 @@ static mali_error kbasep_replay_reset_job(kbase_context *kctx,
        if ((job->dependencies[0] + hw_job_id_offset) > JOB_HEADER_ID_MAX ||
            (job->dependencies[1] + hw_job_id_offset) > JOB_HEADER_ID_MAX ||
            (job->index + hw_job_id_offset) > JOB_HEADER_ID_MAX) {
-               dev_err(dev, "Job indicies/dependencies out of valid range\n");
-               kbasep_unmap_page_sync(job, phys_addr);
-               return MALI_ERROR_FUNCTION_FAILED;
+               dev_err(kctx->kbdev->dev,
+                            "Job indicies/dependencies out of valid range\n");
+               goto out_unmap;
        }
 
        if (job->dependencies[0])
@@ -516,51 +574,47 @@ static mali_error kbasep_replay_reset_job(kbase_context *kctx,
        dump_job_head(kctx, "Updated to:", job);
 
        if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_TILER) {
-               kbasep_unmap_page_sync(job, phys_addr);
+               mali_bool job_64 = (job->flags & JOB_FLAG_DESC_SIZE) != 0;
+
                if (kbasep_replay_reset_tiler_job(kctx, *job_header,
-                                       tiler_heap_free, hierarchy_mask, 
-                                       default_weight,
-                                       job->flags & JOB_FLAG_DESC_SIZE) !=
-                                                       MALI_ERROR_NONE)
-                       return MALI_ERROR_FUNCTION_FAILED;
+                               tiler_heap_free, hierarchy_mask,
+                               default_weight, job_64) != MALI_ERROR_NONE)
+                       goto out_unmap;
 
        } else if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_FRAGMENT) {
                u64 fbd_address;
 
-               if (job->flags & JOB_FLAG_DESC_SIZE) {
-                       kbasep_unmap_page_sync(job, phys_addr);
-                       dev_err(dev, "64-bit job descriptor not supported\n");
-                       return MALI_ERROR_FUNCTION_FAILED;
-               } else {
+               if (job->flags & JOB_FLAG_DESC_SIZE)
+                       fbd_address = job->fragment_fbd._64;
+               else
                        fbd_address = (u64)job->fragment_fbd._32;
-               }
-
-               kbasep_unmap_page_sync(job, phys_addr);
 
                if (fbd_address & FBD_TYPE) {
                        if (kbasep_replay_reset_mfbd(kctx,
-                                               fbd_address & FBD_POINTER_MASK,
-                                               tiler_heap_free,
-                                               hierarchy_mask,
-                                               default_weight) !=
-                                                              MALI_ERROR_NONE)
-                               return MALI_ERROR_FUNCTION_FAILED;
+                                       fbd_address & FBD_POINTER_MASK,
+                                       tiler_heap_free,
+                                       hierarchy_mask,
+                                       default_weight) != MALI_ERROR_NONE)
+                               goto out_unmap;
                } else {
                        if (kbasep_replay_reset_sfbd(kctx,
-                                               fbd_address & FBD_POINTER_MASK,
-                                               tiler_heap_free,
-                                               hierarchy_mask,
-                                               default_weight) !=
-                                                              MALI_ERROR_NONE)
-                               return MALI_ERROR_FUNCTION_FAILED;
+                                       fbd_address & FBD_POINTER_MASK,
+                                       tiler_heap_free,
+                                       hierarchy_mask,
+                                       default_weight) != MALI_ERROR_NONE)
+                               goto out_unmap;
                }
-       } else {
-               kbasep_unmap_page_sync(job, phys_addr);
        }
 
+       kbasep_unmap(kctx, &map);
+
        *job_header = new_job_header;
 
        return MALI_ERROR_NONE;
+
+out_unmap:
+       kbasep_unmap(kctx, &map);
+       return MALI_ERROR_FUNCTION_FAILED;
 }
 
 /**
@@ -572,18 +626,17 @@ static mali_error kbasep_replay_reset_job(kbase_context *kctx,
  *
  * @return MALI_ERROR_NONE on success, error code on failure
  */
-static mali_error kbasep_replay_find_hw_job_id(kbase_context *kctx,
-                                               mali_addr64 jc,
-                                               u16 *hw_job_id)
+static mali_error kbasep_replay_find_hw_job_id(struct kbase_context *kctx,
+               mali_addr64 jc, u16 *hw_job_id)
 {
        while (jc) {
-               job_head *job;
-               u64 phys_addr;
+               struct job_head *job;
+               struct kbasep_map_struct map;
 
                dev_dbg(kctx->kbdev->dev,
                        "kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
 
-               job = kbasep_map_page_sync(kctx, jc, &phys_addr);
+               job = kbasep_map(kctx, jc, sizeof(*job), &map);
                if (!job) {
                        dev_err(kctx->kbdev->dev, "failed to map jc\n");
 
@@ -598,7 +651,7 @@ static mali_error kbasep_replay_find_hw_job_id(kbase_context *kctx,
                else
                        jc = job->next._32;
 
-               kbasep_unmap_page_sync(job, phys_addr);
+               kbasep_unmap(kctx, &map);
        }
 
        return MALI_ERROR_NONE;
@@ -626,39 +679,32 @@ static mali_error kbasep_replay_find_hw_job_id(kbase_context *kctx,
  *
  * @return MALI_ERROR_NONE on success, error code otherwise
  */
-static mali_error kbasep_replay_parse_jc(kbase_context *kctx,
-                                               mali_addr64 jc,
-                                               mali_addr64 prev_jc,
-                                               mali_addr64 tiler_heap_free,
-                                               u16 hierarchy_mask,
-                                               u32 default_weight,
-                                               u16 hw_job_id_offset,
-                                               mali_bool fragment_chain)
+static mali_error kbasep_replay_parse_jc(struct kbase_context *kctx,
+               mali_addr64 jc, mali_addr64 prev_jc,
+               mali_addr64 tiler_heap_free, u16 hierarchy_mask,
+               u32 default_weight, u16 hw_job_id_offset,
+               mali_bool fragment_chain)
 {
        mali_bool first_in_chain = MALI_TRUE;
        int nr_jobs = 0;
 
-       dev_dbg(kctx->kbdev->dev,
-                             "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
-                                                        jc, hw_job_id_offset);
+       dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
+                       jc, hw_job_id_offset);
 
        while (jc) {
-               dev_dbg(kctx->kbdev->dev,
-                                  "kbasep_replay_parse_jc: parsing jc=%llx\n",
-                                                                          jc);
+               dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc);
 
                if (kbasep_replay_reset_job(kctx, &jc, prev_jc,
                                tiler_heap_free, hierarchy_mask,
                                default_weight, hw_job_id_offset,
-                               first_in_chain, fragment_chain) != 
-                                                            MALI_ERROR_NONE)
+                               first_in_chain, fragment_chain) != MALI_ERROR_NONE)
                        return MALI_ERROR_FUNCTION_FAILED;
 
                first_in_chain = MALI_FALSE;
 
                nr_jobs++;
                if (fragment_chain &&
-                               nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
+                   nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
                        dev_err(kctx->kbdev->dev,
                                "Exceeded maximum number of jobs in fragment chain\n");
                        return MALI_ERROR_FUNCTION_FAILED;
@@ -677,11 +723,11 @@ static mali_error kbasep_replay_parse_jc(kbase_context *kctx,
  * @param[in] katom     The atom to be reset
  * @param[in] dep_atom  The dependency to be attached to the atom
  */
-static void kbasep_replay_reset_softjob(kbase_jd_atom *katom,
-                                                      kbase_jd_atom *dep_atom)
+static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom,
+               struct kbase_jd_atom *dep_atom)
 {
        katom->status = KBASE_JD_ATOM_STATE_QUEUED;
-       kbase_jd_katom_dep_set(&katom->dep[0],dep_atom, BASE_JD_DEP_TYPE_DATA);
+       kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA);
        list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
 }
 
@@ -696,9 +742,9 @@ static void kbasep_replay_reset_softjob(kbase_jd_atom *katom,
  * @param[in] kctx      Context pointer
  * @return An atom ID, or -1 on failure
  */
-static int kbasep_allocate_katom(kbase_context *kctx)
+static int kbasep_allocate_katom(struct kbase_context *kctx)
 {
-       kbase_jd_context *jctx = &kctx->jctx;
+       struct kbase_jd_context *jctx = &kctx->jctx;
        int i;
 
        for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
@@ -724,24 +770,24 @@ static int kbasep_allocate_katom(kbase_context *kctx)
  * @param[in] kctx      Context pointer
  * @param[in] atom_id   ID of atom to release
  */
-static void kbasep_release_katom(kbase_context *kctx, int atom_id)
+static void kbasep_release_katom(struct kbase_context *kctx, int atom_id)
 {
-       kbase_jd_context *jctx = &kctx->jctx;
+       struct kbase_jd_context *jctx = &kctx->jctx;
 
-       dev_dbg(kctx->kbdev->dev,
-                                   "kbasep_release_katom: Released atom %d\n",
-                                                                     atom_id);
+       dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n",
+                       atom_id);
 
        while (!list_empty(&jctx->atoms[atom_id].dep_head[0]))
                list_del(jctx->atoms[atom_id].dep_head[0].next);
+
        while (!list_empty(&jctx->atoms[atom_id].dep_head[1]))
                list_del(jctx->atoms[atom_id].dep_head[1].next);
 
        jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED;
 }
 
-static void kbasep_replay_create_atom(kbase_context *kctx,
-                                     base_jd_atom_v2 *atom,
+static void kbasep_replay_create_atom(struct kbase_context *kctx,
+                                     struct base_jd_atom_v2 *atom,
                                      int atom_nr,
                                      int prio)
 {
@@ -778,10 +824,9 @@ static void kbasep_replay_create_atom(kbase_context *kctx,
  *                         job)
  * @return MALI_ERROR_NONE on success, error code on failure
  */
-static mali_error kbasep_replay_create_atoms(kbase_context *kctx,
-                                            base_jd_atom_v2 *t_atom,
-                                            base_jd_atom_v2 *f_atom,
-                                            int prio)
+static mali_error kbasep_replay_create_atoms(struct kbase_context *kctx,
+               struct base_jd_atom_v2 *t_atom,
+               struct base_jd_atom_v2 *f_atom, int prio)
 {
        int t_atom_nr, f_atom_nr;
 
@@ -807,7 +852,7 @@ static mali_error kbasep_replay_create_atoms(kbase_context *kctx,
 }
 
 #ifdef CONFIG_MALI_DEBUG
-static void payload_dump(kbase_context *kctx, base_jd_replay_payload *payload)
+static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload)
 {
        mali_addr64 next;
 
@@ -815,19 +860,20 @@ static void payload_dump(kbase_context *kctx, base_jd_replay_payload *payload)
        next = payload->tiler_jc_list;
 
        while (next) {
-               base_jd_replay_jc *jc_struct = kbasep_map_page(kctx, next, NULL);
+               struct kbasep_map_struct map;
+               base_jd_replay_jc *jc_struct;
+
+               jc_struct = kbasep_map(kctx, next, sizeof(*jc_struct), &map);
 
                if (!jc_struct)
                        return;
 
-               dev_dbg(kctx->kbdev->dev,
-                                         "* jc_struct=%p jc=%llx next=%llx\n",
-                                                                    jc_struct,
-                                                                jc_struct->jc,
-                                                             jc_struct->next);
+               dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n",
+                               jc_struct, jc_struct->jc, jc_struct->next);
+
                next = jc_struct->next;
 
-               kbasep_unmap_page(jc_struct);
+               kbasep_unmap(kctx, &map);
        }
 }
 #endif
@@ -843,45 +889,42 @@ static void payload_dump(kbase_context *kctx, base_jd_replay_payload *payload)
  * @param[in] f_atom       Atom to use for fragment jobs
  * @return  MALI_ERROR_NONE on success, error code on failure
  */
-static mali_error kbasep_replay_parse_payload(kbase_context *kctx, 
-                                             kbase_jd_atom *replay_atom,
-                                             base_jd_atom_v2 *t_atom,
-                                             base_jd_atom_v2 *f_atom)
+static mali_error kbasep_replay_parse_payload(struct kbase_context *kctx,
+                                             struct kbase_jd_atom *replay_atom,
+                                             struct base_jd_atom_v2 *t_atom,
+                                             struct base_jd_atom_v2 *f_atom)
 {
        base_jd_replay_payload *payload;
        mali_addr64 next;
        mali_addr64 prev_jc = 0;
        u16 hw_job_id_offset = 0;
        mali_error ret = MALI_ERROR_FUNCTION_FAILED;
-       u64 phys_addr;
-       struct device *dev = kctx->kbdev->dev;
+       struct kbasep_map_struct map;
 
-       dev_dbg(dev,
-                       "kbasep_replay_parse_payload: replay_atom->jc = %llx  "
-                       "sizeof(payload) = %d\n",
-                                            replay_atom->jc, sizeof(payload));
+       dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n",
+                       replay_atom->jc, sizeof(payload));
 
        kbase_gpu_vm_lock(kctx);
 
-       payload = kbasep_map_page_sync(kctx, replay_atom->jc, &phys_addr);
+       payload = kbasep_map(kctx, replay_atom->jc, sizeof(*payload), &map);
 
        if (!payload) {
                kbase_gpu_vm_unlock(kctx);
-               dev_err(dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
+               dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
                return MALI_ERROR_FUNCTION_FAILED;
        }
 
 #ifdef CONFIG_MALI_DEBUG
-       dev_dbg(dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
-       dev_dbg(dev, "Payload structure:\n"
-                                       "tiler_jc_list            = %llx\n"
-                                       "fragment_jc              = %llx\n"
-                                       "tiler_heap_free          = %llx\n"
-                                       "fragment_hierarchy_mask  = %x\n"
-                                       "tiler_hierarchy_mask     = %x\n"
-                                       "hierarchy_default_weight = %x\n"
-                                       "tiler_core_req           = %x\n"
-                                       "fragment_core_req        = %x\n",
+       dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
+       dev_dbg(kctx->kbdev->dev, "Payload structure:\n"
+                                 "tiler_jc_list            = %llx\n"
+                                 "fragment_jc              = %llx\n"
+                                 "tiler_heap_free          = %llx\n"
+                                 "fragment_hierarchy_mask  = %x\n"
+                                 "tiler_hierarchy_mask     = %x\n"
+                                 "hierarchy_default_weight = %x\n"
+                                 "tiler_core_req           = %x\n"
+                                 "fragment_core_req        = %x\n",
                                                        payload->tiler_jc_list,
                                                          payload->fragment_jc,
                                                      payload->tiler_heap_free,
@@ -903,23 +946,26 @@ static mali_error kbasep_replay_parse_payload(kbase_context *kctx,
                              ~BASE_JD_REQ_COHERENT_GROUP) != BASE_JD_REQ_FS ||
             t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES ||
             f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
-               dev_err(dev, "Invalid core requirements\n");
+               dev_err(kctx->kbdev->dev, "Invalid core requirements\n");
                goto out;
        }
-       
+
        /* Process tiler job chains */
        next = payload->tiler_jc_list;
        if (!next) {
-               dev_err(dev, "Invalid tiler JC list\n");
+               dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n");
                goto out;
        }
 
        while (next) {
-               base_jd_replay_jc *jc_struct = kbasep_map_page(kctx, next, NULL);
+               base_jd_replay_jc *jc_struct;
+               struct kbasep_map_struct jc_map;
                mali_addr64 jc;
 
+               jc_struct = kbasep_map(kctx, next, sizeof(*jc_struct), &jc_map);
+
                if (!jc_struct) {
-                       dev_err(dev, "Failed to map jc struct\n");
+                       dev_err(kctx->kbdev->dev, "Failed to map jc struct\n");
                        goto out;
                }
 
@@ -928,21 +974,21 @@ static mali_error kbasep_replay_parse_payload(kbase_context *kctx,
                if (next)
                        jc_struct->jc = 0;
 
-               kbasep_unmap_page(jc_struct);
+               kbasep_unmap(kctx, &jc_map);
 
                if (jc) {
                        u16 max_hw_job_id = 0;
 
                        if (kbasep_replay_find_hw_job_id(kctx, jc,
-                                           &max_hw_job_id) != MALI_ERROR_NONE)
+                                       &max_hw_job_id) != MALI_ERROR_NONE)
                                goto out;
 
                        if (kbasep_replay_parse_jc(kctx, jc, prev_jc,
-                                            payload->tiler_heap_free,
-                                            payload->tiler_hierarchy_mask,
-                                            payload->hierarchy_default_weight,
-                                            hw_job_id_offset, MALI_FALSE) !=
-                                                            MALI_ERROR_NONE) {
+                                       payload->tiler_heap_free,
+                                       payload->tiler_hierarchy_mask,
+                                       payload->hierarchy_default_weight,
+                                       hw_job_id_offset, MALI_FALSE) !=
+                                       MALI_ERROR_NONE) {
                                goto out;
                        }
 
@@ -956,78 +1002,50 @@ static mali_error kbasep_replay_parse_payload(kbase_context *kctx,
        /* Process fragment job chain */
        f_atom->jc = payload->fragment_jc;
        if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0,
-                                        payload->tiler_heap_free,
-                                        payload->fragment_hierarchy_mask,
-                                        payload->hierarchy_default_weight, 0,
-                                              MALI_TRUE) != MALI_ERROR_NONE) {
+                       payload->tiler_heap_free,
+                       payload->fragment_hierarchy_mask,
+                       payload->hierarchy_default_weight, 0,
+                       MALI_TRUE) != MALI_ERROR_NONE) {
                goto out;
        }
 
        if (!t_atom->jc || !f_atom->jc) {
-               dev_err(dev, "Invalid payload\n");
+               dev_err(kctx->kbdev->dev, "Invalid payload\n");
                goto out;
        }
 
-       dev_dbg(dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
-                                                      t_atom->jc, f_atom->jc);
+       dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
+                       t_atom->jc, f_atom->jc);
        ret = MALI_ERROR_NONE;
 
-out:   
-       kbasep_unmap_page_sync(payload, phys_addr);
+out:
+       kbasep_unmap(kctx, &map);
 
        kbase_gpu_vm_unlock(kctx);
 
        return ret;
 }
 
-/**
- * @brief Process a replay job
- *
- * Called from kbase_process_soft_job.
- *
- * On exit, if the job has completed, katom->event_code will have been updated.
- * If the job has not completed, and is replaying jobs, then the atom status
- * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
- *
- * @param[in] katom  The atom to be processed
- * @return           MALI_REPLAY_STATUS_COMPLETE  if the atom has completed
- *                   MALI_REPLAY_STATUS_REPLAYING if the atom is replaying jobs
- *                   Set MALI_REPLAY_FLAG_JS_RESCHED if 
- *                   kbasep_js_try_schedule_head_ctx required
- */
-int kbase_replay_process(kbase_jd_atom *katom)
+static void kbase_replay_process_worker(struct work_struct *data)
 {
-       kbase_context *kctx = katom->kctx;
-       kbase_jd_context *jctx = &kctx->jctx;
-       mali_bool need_to_try_schedule_context = MALI_FALSE;
-       base_jd_atom_v2 t_atom, f_atom;
-       kbase_jd_atom *t_katom, *f_katom;
-       struct device *dev = kctx->kbdev->dev;
+       struct kbase_jd_atom *katom;
+       struct kbase_context *kctx;
+       struct kbase_jd_context *jctx;
+       bool need_to_try_schedule_context = false;
 
-       if (katom->event_code == BASE_JD_EVENT_DONE) {
-               dev_dbg(dev, "Previous job succeeded - not replaying\n");
-               return MALI_REPLAY_STATUS_COMPLETE;
-       }
+       struct base_jd_atom_v2 t_atom, f_atom;
+       struct kbase_jd_atom *t_katom, *f_katom;
 
-       if (jctx->sched_info.ctx.is_dying) {
-               dev_dbg(dev, "Not replaying; context is dying\n");
-               return MALI_REPLAY_STATUS_COMPLETE;
-       }
+       katom = container_of(data, struct kbase_jd_atom, work);
+       kctx = katom->kctx;
+       jctx = &kctx->jctx;
 
-       dev_warn(dev, "Replaying jobs retry=%d\n", katom->retry_count);
-
-       katom->retry_count++;
-       if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
-               dev_err(dev, "Replay exceeded limit - failing jobs\n");
-               /* katom->event_code is already set to the failure code of the
-                  previous job */
-               return MALI_REPLAY_STATUS_COMPLETE;
-       }
+       mutex_lock(&jctx->lock);
 
        if (kbasep_replay_create_atoms(kctx, &t_atom, &f_atom,
                                       katom->nice_prio) != MALI_ERROR_NONE) {
                katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
-               return MALI_REPLAY_STATUS_COMPLETE;
+               goto out;
        }
 
        t_katom = &jctx->atoms[t_atom.atom_number];
@@ -1038,32 +1056,226 @@ int kbase_replay_process(kbase_jd_atom *katom)
                kbasep_release_katom(kctx, t_atom.atom_number);
                kbasep_release_katom(kctx, f_atom.atom_number);
                katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
-               return MALI_REPLAY_STATUS_COMPLETE;
+               goto out;
        }
 
        kbasep_replay_reset_softjob(katom, f_katom);
 
        need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom);
        if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
-               dev_err(dev, "Replay failed to submit atom\n");
+               dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
                kbasep_release_katom(kctx, f_atom.atom_number);
                katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
-               katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
-               return MALI_REPLAY_STATUS_COMPLETE;
+               goto out;
        }
        need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom);
        if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
-               dev_err(dev, "Replay failed to submit atom\n");
+               dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
                katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
-               katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
-               return MALI_REPLAY_STATUS_COMPLETE;
+               goto out;
        }
 
        katom->event_code = BASE_JD_EVENT_DONE;
 
+out:
+       if (katom->event_code != BASE_JD_EVENT_DONE)
+               need_to_try_schedule_context |= jd_done_nolock(katom);
+
        if (need_to_try_schedule_context)
-               return MALI_REPLAY_STATUS_REPLAYING | 
-                                               MALI_REPLAY_FLAG_JS_RESCHED;
-       return MALI_REPLAY_STATUS_REPLAYING;
+               kbasep_js_try_schedule_head_ctx(kctx->kbdev);
+       mutex_unlock(&jctx->lock);
+}
+
+/**
+ * @brief Check job replay fault
+ *
+ * This will read the job payload, checks fault type and source, then decides
+ * whether replay is required.
+ *
+ * @param[in] katom       The atom to be processed
+ * @return  true (success) if replay required or false on failure.
+ */
+static bool kbase_replay_fault_check(struct kbase_jd_atom *katom)
+{
+       struct kbase_context *kctx = katom->kctx;
+       struct device *dev = kctx->kbdev->dev;
+       base_jd_replay_payload *payload;
+       mali_addr64 job_header;
+       mali_addr64 job_loop_detect;
+       struct job_head *job;
+       struct kbasep_map_struct job_map;
+       struct kbasep_map_struct map;
+       bool err = false;
+
+       /* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or
+        * BASE_JD_EVENT_TERMINATED.
+        */
+       if ((BASE_JD_EVENT_TERMINATED      == katom->event_code) ||
+           (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code)) {
+               return true;
+       } else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) {
+               /* No replay for faults of type other than
+                * BASE_JD_EVENT_DATA_INVALID_FAULT.
+                */
+               return false;
+       }
+
+       /* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc
+        * to find out whether the source of exception is POLYGON_LIST. Replay
+        * is required if the source of fault is POLYGON_LIST.
+        */
+       kbase_gpu_vm_lock(kctx);
+
+       payload = kbasep_map(kctx, katom->jc, sizeof(*payload), &map);
+       if (!payload) {
+               kbase_gpu_vm_unlock(kctx);
+               dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n");
+               return false;
+       }
+
+#ifdef CONFIG_MALI_DEBUG
+       dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload);
+       dev_dbg(dev, "\nPayload structure:\n"
+                    "fragment_jc              = 0x%llx\n"
+                    "fragment_hierarchy_mask  = 0x%x\n"
+                    "fragment_core_req        = 0x%x\n",
+                    payload->fragment_jc,
+                    payload->fragment_hierarchy_mask,
+                    payload->fragment_core_req);
+#endif
+
+       /* Process fragment job chain */
+       job_header      = (mali_addr64) payload->fragment_jc;
+       job_loop_detect = job_header;
+       while (job_header) {
+               job = kbasep_map(kctx, job_header, sizeof(*job), &job_map);
+               if (!job) {
+                       dev_err(dev, "failed to map jc\n");
+                       /* unmap payload*/
+                       kbasep_unmap(kctx, &map);
+                       kbase_gpu_vm_unlock(kctx);
+                       return false;
+               }
+
+
+#ifdef CONFIG_MALI_DEBUG
+               dev_dbg(dev, "\njob_head structure:\n"
+                            "Source ID:0x%x Access:0x%x Exception:0x%x\n"
+                            "at job addr               = %p\n"
+                            "not_complete_index        = 0x%x\n"
+                            "fault_addr                = 0x%llx\n"
+                            "flags                     = 0x%x\n"
+                            "index                     = 0x%x\n"
+                            "dependencies              = 0x%x,0x%x\n",
+                            JOB_SOURCE_ID(job->status),
+                            ((job->status >> 8) & 0x3),
+                            (job->status  & 0xFF),
+                            job,
+                            job->not_complete_index,
+                            job->fault_addr,
+                            job->flags,
+                            job->index,
+                            job->dependencies[0],
+                            job->dependencies[1]);
+#endif
+
+               /* Replay only when the polygon list reader caused the
+                * DATA_INVALID_FAULT */
+               if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) &&
+                   (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->status))) {
+                       err = true;
+                       kbasep_unmap(kctx, &job_map);
+                       break;
+               }
+
+               /* Move on to next fragment job in the list */
+               if (job->flags & JOB_FLAG_DESC_SIZE)
+                       job_header = job->next._64;
+               else
+                       job_header = job->next._32;
+
+               kbasep_unmap(kctx, &job_map);
+
+               /* Job chain loop detected */
+               if (job_header == job_loop_detect)
+                       break;
+       }
+
+       /* unmap payload*/
+       kbasep_unmap(kctx, &map);
+       kbase_gpu_vm_unlock(kctx);
+
+       return err;
 }
 
+
+/**
+ * @brief Process a replay job
+ *
+ * Called from kbase_process_soft_job.
+ *
+ * On exit, if the job has completed, katom->event_code will have been updated.
+ * If the job has not completed, and is replaying jobs, then the atom status
+ * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
+ *
+ * @param[in] katom  The atom to be processed
+ * @return           false if the atom has completed
+ *                   true if the atom is replaying jobs
+ */
+bool kbase_replay_process(struct kbase_jd_atom *katom)
+{
+       struct kbase_context *kctx = katom->kctx;
+       struct kbase_jd_context *jctx = &kctx->jctx;
+
+       if (katom->event_code == BASE_JD_EVENT_DONE) {
+               dev_dbg(kctx->kbdev->dev, "Previous job succeeded - not replaying\n");
+
+               if (katom->retry_count)
+                       kbase_disjoint_state_down(kctx->kbdev);
+
+               return false;
+       }
+
+       if (jctx->sched_info.ctx.is_dying) {
+               dev_dbg(kctx->kbdev->dev, "Not replaying; context is dying\n");
+
+               if (katom->retry_count)
+                       kbase_disjoint_state_down(kctx->kbdev);
+
+               return false;
+       }
+
+       /* Check job exception type and source before replaying. */
+       if (false == kbase_replay_fault_check(katom)) {
+               dev_dbg(kctx->kbdev->dev,
+                       "Replay cancelled on event %x\n", katom->event_code);
+               /* katom->event_code is already set to the failure code of the
+                * previous job.
+                */
+               return false;
+       }
+
+       dev_warn(kctx->kbdev->dev, "Replaying jobs retry=%d\n",
+                       katom->retry_count);
+
+       katom->retry_count++;
+
+       if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
+               dev_err(kctx->kbdev->dev, "Replay exceeded limit - failing jobs\n");
+
+               kbase_disjoint_state_down(kctx->kbdev);
+
+               /* katom->event_code is already set to the failure code of the
+                  previous job */
+               return false;
+       }
+
+       /* only enter the disjoint state once for the whole time while the replay is ongoing */
+       if (katom->retry_count == 1)
+               kbase_disjoint_state_up(kctx->kbdev);
+
+       INIT_WORK(&katom->work, kbase_replay_process_worker);
+       queue_work(kctx->event_workq, &katom->work);
+
+       return true;
+}
index babde223891afea3de95c281af9fbe3e5b581199..e7916374ffefc8dc28a76a42a6e5d4d6ded57e46 100755 (executable)
@@ -45,7 +45,7 @@ static inline mali_bool kbasep_am_i_root(void)
  * kbase_security_has_capability - see mali_kbase_caps.h for description.
  */
 
-mali_bool kbase_security_has_capability(kbase_context *kctx, kbase_security_capability cap, u32 flags)
+mali_bool kbase_security_has_capability(struct kbase_context *kctx, enum kbase_security_capability cap, u32 flags)
 {
        /* Assume failure */
        mali_bool access_allowed = MALI_FALSE;
index 783e2810d5b7115be283323e50a8c2e8c249c5d8..c8d4e53ae3b0b03ce648e7fbed70a5933231b7a6 100755 (executable)
 #define KBASE_SEC_FLAG_MASK    (KBASE_SEC_FLAG_AUDIT)  /* Mask of all valid flag bits */
 
 /* List of unique capabilities that have security access privileges */
-typedef enum {
+enum kbase_security_capability {
        /* Instrumentation Counters access privilege */
        KBASE_SEC_INSTR_HW_COUNTERS_COLLECT = 1,
        KBASE_SEC_MODIFY_PRIORITY
            /* Add additional access privileges here */
-} kbase_security_capability;
+};
 
 /**
  * kbase_security_has_capability - determine whether a task has a particular effective capability
@@ -47,6 +47,6 @@ typedef enum {
  * @return MALI_TRUE if success (capability is allowed), MALI_FALSE otherwise.
  */
 
-mali_bool kbase_security_has_capability(kbase_context *kctx, kbase_security_capability cap, u32 flags);
+mali_bool kbase_security_has_capability(struct kbase_context *kctx, enum kbase_security_capability cap, u32 flags);
 
 #endif                         /* _KBASE_SECURITY_H_ */
index 0324f9d502a9e9a008a2d788a450b56e88145e6c..7d1db94e0dfc57595ccefa52134b135d946488ba 100755 (executable)
@@ -19,6 +19,7 @@
 
 #include <mali_kbase.h>
 
+#include <linux/dma-mapping.h>
 #ifdef CONFIG_SYNC
 #include "sync.h"
 #include <linux/syscalls.h>
  * executed within the driver rather than being handed over to the GPU.
  */
 
-static int kbase_dump_cpu_gpu_time(kbase_jd_atom *katom)
+static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
 {
-       kbase_va_region *reg;
+       struct kbase_va_region *reg;
        phys_addr_t addr = 0;
        u64 pfn;
        u32 offset;
        char *page;
        struct timespec ts;
-       base_dump_cpu_gpu_counters data;
+       struct base_dump_cpu_gpu_counters data;
        u64 system_time;
        u64 cycle_counter;
        mali_addr64 jc = katom->jc;
-       kbase_context *kctx = katom->kctx;
+       struct kbase_context *kctx = katom->kctx;
        int pm_active_err;
 
        u32 hi1, hi2;
@@ -60,7 +61,7 @@ static int kbase_dump_cpu_gpu_time(kbase_jd_atom *katom)
         * long chain of dependencies */
        pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
        if (pm_active_err) {
-               kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
+               struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
 
                /* We're suspended - queue this on the list of suspended jobs
                 * Use dep_item[1], because dep_item[0] is in use for 'waiting_soft_jobs' */
@@ -126,8 +127,15 @@ static int kbase_dump_cpu_gpu_time(kbase_jd_atom *katom)
        if (!page)
                return 0;
 
+       dma_sync_single_for_cpu(katom->kctx->kbdev->dev,
+                       page_private(pfn_to_page(PFN_DOWN(addr))) +
+                       offset, sizeof(data),
+                       DMA_BIDIRECTIONAL);
        memcpy(page + offset, &data, sizeof(data));
-       kbase_sync_to_cpu(addr + offset, page + offset, sizeof(data));
+       dma_sync_single_for_device(katom->kctx->kbdev->dev,
+                       page_private(pfn_to_page(PFN_DOWN(addr))) +
+                       offset, sizeof(data),
+                       DMA_BIDIRECTIONAL);
        kunmap(pfn_to_page(PFN_DOWN(addr)));
 
        /* Atom was fine - mark it as done */
@@ -142,9 +150,9 @@ static int kbase_dump_cpu_gpu_time(kbase_jd_atom *katom)
  *
  * @param katom     The atom to complete
  */
-static void complete_soft_job(kbase_jd_atom *katom)
+static void complete_soft_job(struct kbase_jd_atom *katom)
 {
-       kbase_context *kctx = katom->kctx;
+       struct kbase_context *kctx = katom->kctx;
 
        mutex_lock(&kctx->jctx.lock);
        list_del(&katom->dep_item[0]);
@@ -154,7 +162,7 @@ static void complete_soft_job(kbase_jd_atom *katom)
        mutex_unlock(&kctx->jctx.lock);
 }
 
-static base_jd_event_code kbase_fence_trigger(kbase_jd_atom *katom, int result)
+static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
 {
        struct sync_pt *pt;
        struct sync_timeline *timeline;
@@ -181,10 +189,10 @@ static base_jd_event_code kbase_fence_trigger(kbase_jd_atom *katom, int result)
 
 static void kbase_fence_wait_worker(struct work_struct *data)
 {
-       kbase_jd_atom *katom;
-       kbase_context *kctx;
+       struct kbase_jd_atom *katom;
+       struct kbase_context *kctx;
 
-       katom = container_of(data, kbase_jd_atom, work);
+       katom = container_of(data, struct kbase_jd_atom, work);
        kctx = katom->kctx;
 
        complete_soft_job(katom);
@@ -192,8 +200,8 @@ static void kbase_fence_wait_worker(struct work_struct *data)
 
 static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
 {
-       kbase_jd_atom *katom = container_of(waiter, kbase_jd_atom, sync_waiter);
-       kbase_context *kctx;
+       struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
+       struct kbase_context *kctx;
 
        KBASE_DEBUG_ASSERT(NULL != katom);
 
@@ -205,9 +213,7 @@ static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fenc
         * If negative then cancel this atom and its dependencies.
         */
        if (fence->status < 0)
-       {
                katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
-       }
 
        /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
         *
@@ -220,7 +226,7 @@ static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fenc
        queue_work(kctx->jctx.job_done_wq, &katom->work);
 }
 
-static int kbase_fence_wait(kbase_jd_atom *katom)
+static int kbase_fence_wait(struct kbase_jd_atom *katom)
 {
        int ret;
 
@@ -249,7 +255,7 @@ static int kbase_fence_wait(kbase_jd_atom *katom)
        return 1;
 }
 
-static void kbase_fence_cancel_wait(kbase_jd_atom *katom)
+static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
 {
        if(!katom)
        {
@@ -266,12 +272,12 @@ static void kbase_fence_cancel_wait(kbase_jd_atom *katom)
                */
                goto finish_softjob;
        }
-       if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0)
-       {
+
+       if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
                /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
                return;
        }
-       
+
        /* Wait was cancelled - zap the atoms */
 finish_softjob:
        katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
@@ -280,14 +286,13 @@ finish_softjob:
 
        if (jd_done_nolock(katom))
                kbasep_js_try_schedule_head_ctx(katom->kctx->kbdev);
-       return;
 
+       return;
 }
 #endif /* CONFIG_SYNC */
 
-int kbase_process_soft_job(kbase_jd_atom *katom)
+int kbase_process_soft_job(struct kbase_jd_atom *katom)
 {
-       int status;
        switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
        case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
                return kbase_dump_cpu_gpu_time(katom);
@@ -303,17 +308,14 @@ int kbase_process_soft_job(kbase_jd_atom *katom)
                return kbase_fence_wait(katom);
 #endif                         /* CONFIG_SYNC */
        case BASE_JD_REQ_SOFT_REPLAY:
-               status = kbase_replay_process(katom);
-               if (status & MALI_REPLAY_FLAG_JS_RESCHED)
-                       pr_err("replay called from kbase_process_soft_job - missing resched!\n");
-               return status & MALI_REPLAY_STATUS_MASK;
+               return kbase_replay_process(katom);
        }
 
        /* Atom is complete */
        return 0;
 }
 
-void kbase_cancel_soft_job(kbase_jd_atom *katom)
+void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
 {
        switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
 #ifdef CONFIG_SYNC
@@ -327,20 +329,21 @@ void kbase_cancel_soft_job(kbase_jd_atom *katom)
        }
 }
 
-mali_error kbase_prepare_soft_job(kbase_jd_atom *katom)
+mali_error kbase_prepare_soft_job(struct kbase_jd_atom *katom)
 {
        switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
        case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
                {
-                       if(0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
+                       if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
                                return MALI_ERROR_FUNCTION_FAILED;
                }
                break;
 #ifdef CONFIG_SYNC
        case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
                {
-                       base_fence fence;
+                       struct base_fence fence;
                        int fd;
+
                        if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
                                return MALI_ERROR_FUNCTION_FAILED;
 
@@ -365,7 +368,8 @@ mali_error kbase_prepare_soft_job(kbase_jd_atom *katom)
                break;
        case BASE_JD_REQ_SOFT_FENCE_WAIT:
                {
-                       base_fence fence;
+                       struct base_fence fence;
+
                        if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
                                return MALI_ERROR_FUNCTION_FAILED;
 
@@ -385,7 +389,7 @@ mali_error kbase_prepare_soft_job(kbase_jd_atom *katom)
        return MALI_ERROR_NONE;
 }
 
-void kbase_finish_soft_job(kbase_jd_atom *katom)
+void kbase_finish_soft_job(struct kbase_jd_atom *katom)
 {
        switch (katom->core_req & BASEP_JD_REQ_ATOM_TYPE) {
        case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
@@ -402,7 +406,7 @@ void kbase_finish_soft_job(kbase_jd_atom *katom)
                break;
        case BASE_JD_REQ_SOFT_FENCE_WAIT:
                /* Release the reference to the fence object */
-               if(katom->fence){
+               if(katom->fence) {
                        sync_fence_put(katom->fence);
                        katom->fence = NULL;
                }
@@ -411,13 +415,14 @@ void kbase_finish_soft_job(kbase_jd_atom *katom)
        }
 }
 
-void kbase_resume_suspended_soft_jobs(kbase_device *kbdev)
+void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
 {
        LIST_HEAD(local_suspended_soft_jobs);
-       kbase_jd_atom *tmp_iter;
-       kbase_jd_atom *katom_iter;
-       kbasep_js_device_data *js_devdata;
+       struct kbase_jd_atom *tmp_iter;
+       struct kbase_jd_atom *katom_iter;
+       struct kbasep_js_device_data *js_devdata;
        mali_bool resched = MALI_FALSE;
+
        KBASE_DEBUG_ASSERT(kbdev);
 
        js_devdata = &kbdev->js_data;
@@ -431,7 +436,8 @@ void kbase_resume_suspended_soft_jobs(kbase_device *kbdev)
         * be re-added to the old list, but this is unlikely */
        list_for_each_entry_safe(katom_iter, tmp_iter, &local_suspended_soft_jobs, dep_item[1])
        {
-               kbase_context *kctx = katom_iter->kctx;
+               struct kbase_context *kctx = katom_iter->kctx;
+
                mutex_lock(&kctx->jctx.lock);
 
                /* Remove from the global list */
@@ -445,7 +451,7 @@ void kbase_resume_suspended_soft_jobs(kbase_device *kbdev)
                } else {
                        /* The job has not completed */
                        KBASE_DEBUG_ASSERT((katom_iter->core_req & BASEP_JD_REQ_ATOM_TYPE)
-                                                               != BASE_JD_REQ_SOFT_REPLAY);
+                                       != BASE_JD_REQ_SOFT_REPLAY);
                        list_add_tail(&katom_iter->dep_item[0], &kctx->waiting_soft_jobs);
                }
 
index f8db35e31c1f8d983b9bd2b64926cb770c190998..506b397176f6986bdfde2ab13adbe88d30f18c70 100755 (executable)
@@ -35,7 +35,7 @@ struct mali_sync_timeline {
 
 struct mali_sync_pt {
        struct sync_pt pt;
-       u32 order;
+       int order;
        int result;
 };
 
@@ -63,7 +63,6 @@ static struct sync_pt *timeline_dup(struct sync_pt *pt)
        new_mpt->result = mpt->result;
 
        return new_pt;
-
 }
 
 static int timeline_has_signaled(struct sync_pt *pt)
@@ -72,12 +71,10 @@ static int timeline_has_signaled(struct sync_pt *pt)
        struct mali_sync_timeline *mtl = to_mali_sync_timeline(pt->parent);
        int result = mpt->result;
 
-       long diff = atomic_read(&mtl->signalled) - mpt->order;
+       int diff = atomic_read(&mtl->signalled) - mpt->order;
 
        if (diff >= 0)
-       {
                return result < 0 ?  result : 1;
-       }
        else
                return 0;
 }
@@ -87,7 +84,7 @@ static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
        struct mali_sync_pt *ma = container_of(a, struct mali_sync_pt, pt);
        struct mali_sync_pt *mb = container_of(b, struct mali_sync_pt, pt);
 
-       long diff = ma->order - mb->order;
+       int diff = ma->order - mb->order;
 
        if (diff < 0)
                return -1;
@@ -97,16 +94,18 @@ static int timeline_compare(struct sync_pt *a, struct sync_pt *b)
                return 1;
 }
 
-static void timeline_value_str(struct sync_timeline *timeline, char * str,
+static void timeline_value_str(struct sync_timeline *timeline, char *str,
                               int size)
 {
        struct mali_sync_timeline *mtl = to_mali_sync_timeline(timeline);
+
        snprintf(str, size, "%d", atomic_read(&mtl->signalled));
 }
 
 static void pt_value_str(struct sync_pt *pt, char *str, int size)
 {
        struct mali_sync_pt *mpt = to_mali_sync_pt(pt);
+
        snprintf(str, size, "%d(%d)", mpt->order, mpt->result);
 }
 
@@ -117,10 +116,6 @@ static struct sync_timeline_ops mali_timeline_ops = {
        .compare = timeline_compare,
        .timeline_value_str = timeline_value_str,
        .pt_value_str       = pt_value_str,
-#if 0
-       .free_pt = timeline_free_pt,
-       .release_obj = timeline_release_obj
-#endif
 };
 
 int kbase_sync_timeline_is_ours(struct sync_timeline *timeline)
@@ -171,7 +166,6 @@ void kbase_sync_signal_pt(struct sync_pt *pt, int result)
        mpt->result = result;
 
        do {
-
                signalled = atomic_read(&mtl->signalled);
 
                diff = signalled - mpt->order;
index 53a936f6b05c5ad87353d526e3f551a7e4f7915c..c823226e151a0bb1bac2773ff87af606b07e41a9 100755 (executable)
@@ -38,6 +38,7 @@
 static int kbase_stream_close(struct inode *inode, struct file *file)
 {
        struct sync_timeline *tl;
+
        tl = (struct sync_timeline *)file->private_data;
        BUG_ON(!tl);
        sync_timeline_destroy(tl);
@@ -52,6 +53,7 @@ static const struct file_operations stream_fops = {
 mali_error kbase_stream_create(const char *name, int *const out_fd)
 {
        struct sync_timeline *tl;
+
        BUG_ON(!out_fd);
 
        tl = kbase_sync_timeline_alloc(name);
@@ -143,6 +145,7 @@ int kbase_stream_create_fence(int tl_fd)
 mali_error kbase_fence_validate(int fd)
 {
        struct sync_fence *fence;
+
        fence = sync_fence_fdget(fd);
        if (NULL != fence) {
                sync_fence_put(fence);
index 2e5b7443356df9a7334255d239e1bb5a99803489..e492ff2ca4815baf332fb4666b0605511e6362b5 100755 (executable)
@@ -68,105 +68,135 @@ int dummy_array[] = {
 /*
  * Core events
  */
-       KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY),   /* no info_val, no gpu_addr, no atom */
-       KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM),      /* no info_val, no gpu_addr, no atom */
-       KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ),       /* info_val == GPU_IRQ_STATUS register */
-       KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR), /* info_val == bits cleared */
-       KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE),  /* info_val == GPU_IRQ_STATUS register */
+       /* no info_val, no gpu_addr, no atom */
+       KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_DESTROY),
+       /* no info_val, no gpu_addr, no atom */
+       KBASE_TRACE_CODE_MAKE_CODE(CORE_CTX_HWINSTR_TERM),
+       /* info_val == GPU_IRQ_STATUS register */
+       KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ),
+       /* info_val == bits cleared */
+       KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_CLEAR),
+       /* info_val == GPU_IRQ_STATUS register */
+       KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_IRQ_DONE),
        KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_SOFT_RESET),
        KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_HARD_RESET),
        KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_CLEAR),
-       KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE),     /* GPU addr==dump address */
+       /* GPU addr==dump address */
+       KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_PRFCNT_SAMPLE),
        KBASE_TRACE_CODE_MAKE_CODE(CORE_GPU_CLEAN_INV_CACHES),
-
 /*
  * Job Slot management events
  */
-       KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ),     /* info_val==irq rawstat at start */
+       /* info_val==irq rawstat at start */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ),
+       /* info_val==jobs processed */
        KBASE_TRACE_CODE_MAKE_CODE(JM_IRQ_END),
-                                       /* info_val==jobs processed */
 /* In the following:
  *
  * - ctx is set if a corresponding job found (NULL otherwise, e.g. some soft-stop cases)
  * - uatom==kernel-side mapped uatom address (for correlation with user-side)
  */
-       KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE),        /* info_val==exit code; gpu_addr==chain gpuaddr */
+       /* info_val==exit code; gpu_addr==chain gpuaddr */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_JOB_DONE),
+       /* gpu_addr==JS_HEAD_NEXT written, info_val==lower 32 bits of affinity */
        KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT),
-                                       /* gpu_addr==JSn_HEAD_NEXT written, info_val==lower 32 bits of affinity */
-/* gpu_addr is as follows:
* - If JSn_STATUS active after soft-stop, val==gpu addr written to JSn_HEAD on submit
- * - otherwise gpu_addr==0 */
+       /* gpu_addr is as follows:
+        * - If JS_STATUS active after soft-stop, val==gpu addr written to
       *   JS_HEAD on submit
       * - otherwise gpu_addr==0 */
        KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP),
        KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_0),
        KBASE_TRACE_CODE_MAKE_CODE(JM_SOFTSTOP_1),
-       KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP),        /* gpu_addr==JSn_HEAD read */
-       KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0),      /* gpu_addr==JSn_HEAD read */
-       KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1),      /* gpu_addr==JSn_HEAD read */
-       KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD),     /* gpu_addr==JSn_TAIL read */
+       /* gpu_addr==JS_HEAD read */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP),
+       /* gpu_addr==JS_HEAD read */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_0),
+       /* gpu_addr==JS_HEAD read */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_HARDSTOP_1),
+       /* gpu_addr==JS_TAIL read */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_UPDATE_HEAD),
 /* gpu_addr is as follows:
- * - If JSn_STATUS active before soft-stop, val==JSn_HEAD
- * - otherwise gpu_addr==0 */
-       KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD),      /* gpu_addr==JSn_HEAD read */
+ * - If JS_STATUS active before soft-stop, val==JS_HEAD
+ * - otherwise gpu_addr==0
+ */
+       /* gpu_addr==JS_HEAD read */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_CHECK_HEAD),
        KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS),
        KBASE_TRACE_CODE_MAKE_CODE(JM_FLUSH_WORKQS_DONE),
-       KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED),       /* info_val == is_scheduled */
+       /* info_val == is_scheduled */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_NON_SCHEDULED),
+       /* info_val == is_scheduled */
        KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_SCHEDULED),
-                                               /* info_val == is_scheduled */
        KBASE_TRACE_CODE_MAKE_CODE(JM_ZAP_DONE),
-       KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP),  /* info_val == nr jobs submitted */
-       KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT),      /* gpu_addr==JSn_HEAD_NEXT last written */
+       /* info_val == nr jobs submitted */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_SOFT_OR_HARD_STOP),
+       /* gpu_addr==JS_HEAD_NEXT last written */
+       KBASE_TRACE_CODE_MAKE_CODE(JM_SLOT_EVICT),
        KBASE_TRACE_CODE_MAKE_CODE(JM_SUBMIT_AFTER_RESET),
        KBASE_TRACE_CODE_MAKE_CODE(JM_BEGIN_RESET_WORKER),
        KBASE_TRACE_CODE_MAKE_CODE(JM_END_RESET_WORKER),
 /*
  * Job dispatch events
  */
-       KBASE_TRACE_CODE_MAKE_CODE(JD_DONE),/* gpu_addr==value to write into JSn_HEAD */
-       KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER),     /* gpu_addr==value to write into JSn_HEAD */
+       /* gpu_addr==value to write into JS_HEAD */
+       KBASE_TRACE_CODE_MAKE_CODE(JD_DONE),
+       /* gpu_addr==value to write into JS_HEAD */
+       KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER),
+       /* gpu_addr==value to write into JS_HEAD */
        KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_WORKER_END),
-                                               /* gpu_addr==value to write into JSn_HEAD */
+       /* gpu_addr==value to write into JS_HEAD */
        KBASE_TRACE_CODE_MAKE_CODE(JD_DONE_TRY_RUN_NEXT_JOB),
-                                                       /* gpu_addr==value to write into JSn_HEAD */
-       KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT),     /* gpu_addr==0, info_val==0, uatom==0 */
+       /* gpu_addr==0, info_val==0, uatom==0 */
+       KBASE_TRACE_CODE_MAKE_CODE(JD_ZAP_CONTEXT),
+       /* gpu_addr==value to write into JS_HEAD */
        KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL),
-                                       /* gpu_addr==value to write into JSn_HEAD */
+       /* gpu_addr==value to write into JS_HEAD */
        KBASE_TRACE_CODE_MAKE_CODE(JD_CANCEL_WORKER),
-                                               /* gpu_addr==value to write into JSn_HEAD */
 /*
  * Scheduler Core events
  */
        KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX_NOLOCK),
-       KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB), /* gpu_addr==value to write into JSn_HEAD */
-       KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB),      /* gpu_addr==last value written/would be written to JSn_HEAD */
+       /* gpu_addr==value to write into JS_HEAD */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_ADD_JOB),
+       /* gpu_addr==last value written/would be written to JS_HEAD */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_REMOVE_JOB),
        KBASE_TRACE_CODE_MAKE_CODE(JS_RETAIN_CTX),
        KBASE_TRACE_CODE_MAKE_CODE(JS_RELEASE_CTX),
        KBASE_TRACE_CODE_MAKE_CODE(JS_TRY_SCHEDULE_HEAD_CTX),
-       KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB),       /* gpu_addr==value to write into JSn_HEAD */
+       /* gpu_addr==value to write into JS_HEAD */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_TRY_RUN_NEXT_JOB),
+       /* gpu_addr==value to write into JS_HEAD */
        KBASE_TRACE_CODE_MAKE_CODE(JS_JOB_DONE_RETRY_NEEDED),
-                                                       /* gpu_addr==value to write into JSn_HEAD */
+       /* kctx is the one being evicted, info_val == kctx to put in  */
        KBASE_TRACE_CODE_MAKE_CODE(JS_FAST_START_EVICTS_CTX),
-                                                       /* kctx is the one being evicted, info_val == kctx to put in  */
        KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_SUBMIT_TO_BLOCKED),
-       KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT),        /* info_val == lower 32 bits of affinity */
+       /* info_val == lower 32 bits of affinity */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_AFFINITY_CURRENT),
+       /* info_val == lower 32 bits of affinity */
        KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_CORES_FAILED),
-                                                               /* info_val == lower 32 bits of affinity */
+       /* info_val == lower 32 bits of affinity */
        KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_INUSE_FAILED),
-                                                               /* info_val == lower 32 bits of affinity */
-       KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED),      /* info_val == lower 32 bits of rechecked affinity */
-       KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED),     /* info_val == lower 32 bits of rechecked affinity */
+       /* info_val == lower 32 bits of rechecked affinity */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED),
+       /* info_val == lower 32 bits of rechecked affinity */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED),
+       /* info_val == lower 32 bits of affinity */
        KBASE_TRACE_CODE_MAKE_CODE(JS_CORE_REF_AFFINITY_WOULD_VIOLATE),
-                                                               /* info_val == lower 32 bits of affinity */
-       KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX),     /* info_val == the ctx attribute now on ctx */
+       /* info_val == the ctx attribute now on ctx */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_CTX),
+       /* info_val == the ctx attribute now on runpool */
        KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_ON_RUNPOOL),
-                                                       /* info_val == the ctx attribute now on runpool */
-       KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX),/* info_val == the ctx attribute now off ctx */
-       KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL),        /* info_val == the ctx attribute now off runpool */
+       /* info_val == the ctx attribute now off ctx */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_CTX),
+       /* info_val == the ctx attribute now off runpool */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_CTX_ATTR_NOW_OFF_RUNPOOL),
 /*
  * Scheduler Policy events
  */
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_INIT_CTX),
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TERM_CTX),
-       KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX),    /* info_val == whether it was evicted */
+       /* info_val == whether it was evicted */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TRY_EVICT_CTX),
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_FOREACH_CTX_JOBS),
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_CTX),
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_HEAD_CTX),
@@ -174,7 +204,8 @@ int dummy_array[] = {
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_RUNPOOL_REMOVE_CTX),
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB),
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_DEQUEUE_JOB_IRQ),
-       KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB),      /* gpu_addr==JSn_HEAD to write if the job were run */
+       /* gpu_addr==JS_HEAD to write if the job were run */
+       KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_ENQUEUE_JOB),
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_START),
        KBASE_TRACE_CODE_MAKE_CODE(JS_POLICY_TIMER_END),
 /*
@@ -215,16 +246,17 @@ int dummy_array[] = {
        KBASE_TRACE_CODE_MAKE_CODE(PM_CONTEXT_IDLE),
        KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_ON),
        KBASE_TRACE_CODE_MAKE_CODE(PM_GPU_OFF),
-       KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY),      /* info_val == policy number, or -1 for "Already changing" */
+       /* info_val == policy number, or -1 for "Already changing" */
+       KBASE_TRACE_CODE_MAKE_CODE(PM_SET_POLICY),
        KBASE_TRACE_CODE_MAKE_CODE(PM_CA_SET_POLICY),
-
-       KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT),     /* info_val == policy number */
-       KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM),     /* info_val == policy number */
+       /* info_val == policy number */
+       KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_INIT),
+       /* info_val == policy number */
+       KBASE_TRACE_CODE_MAKE_CODE(PM_CURRENT_POLICY_TERM),
 /* Unused code just to make it easier to not have a comma at the end.
  * All other codes MUST come before this */
        KBASE_TRACE_CODE_MAKE_CODE(DUMMY)
 
-
 #if 0 /* Dummy section to avoid breaking formatting */
 };
 #endif
index 0968025359c10036da6429af4c39fb0e1e58c8cc..49f5035a913e31e921abdad1171178565d1cdf98 100755 (executable)
@@ -95,7 +95,7 @@ static const struct file_operations kbasep_trace_timeline_debugfs_fops = {
        .release = seq_release_private,
 };
 
-mali_error kbasep_trace_timeline_debugfs_init(kbase_device *kbdev)
+mali_error kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev)
 {
        kbdev->timeline.dentry = debugfs_create_file("mali_timeline_defs",
                        S_IRUGO, kbdev->mali_debugfs_directory, NULL,
@@ -106,17 +106,17 @@ mali_error kbasep_trace_timeline_debugfs_init(kbase_device *kbdev)
        return MALI_ERROR_NONE;
 }
 
-void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev)
+void kbasep_trace_timeline_debugfs_term(struct kbase_device *kbdev)
 {
        debugfs_remove(kbdev->timeline.dentry);
 }
 
-void kbase_timeline_job_slot_submit(kbase_device *kbdev, kbase_context *kctx,
-                                    kbase_jd_atom *katom, int js)
+void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+                                    struct kbase_jd_atom *katom, int js)
 {
        lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
 
-       if(kbdev->timeline.slot_atoms_submitted[js] > 0) {
+       if (kbdev->timeline.slot_atoms_submitted[js] > 0) {
                KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 1);
        } else {
                base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
@@ -128,8 +128,8 @@ void kbase_timeline_job_slot_submit(kbase_device *kbdev, kbase_context *kctx,
        KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
 }
 
-void kbase_timeline_job_slot_done(kbase_device *kbdev, kbase_context *kctx,
-                                  kbase_jd_atom *katom, int js,
+void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+                                  struct kbase_jd_atom *katom, int js,
                                   kbasep_js_atom_done_code done_code)
 {
        lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
@@ -137,19 +137,19 @@ void kbase_timeline_job_slot_done(kbase_device *kbdev, kbase_context *kctx,
        if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) {
                KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 0);
        } else {
-               /* Job finished in JSn_HEAD */
+               /* Job finished in JS_HEAD */
                base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
                KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 0);
                KBASE_TIMELINE_JOB_STOP(kctx, js, atom_number);
-               /* see if we need to trace the job in JSn_NEXT moving to JSn_HEAD */
+               /* see if we need to trace the job in JS_NEXT moving to JS_HEAD */
                if (kbdev->timeline.slot_atoms_submitted[js] > 1) {
                        /* Tag events with next_katom's kctx */
-                       kbase_jm_slot *slot = &kbdev->jm_slots[js];
-                       kbase_jd_atom *next_katom;
-                       kbase_context *next_kctx;
+                       struct kbase_jm_slot *slot = &kbdev->jm_slots[js];
+                       struct kbase_jd_atom *next_katom;
+                       struct kbase_context *next_kctx;
                        KBASE_DEBUG_ASSERT(kbasep_jm_nr_jobs_submitted(slot) > 0);
 
-                       /* Peek the next atom - note that the atom in JSn_HEAD will already
+                       /* Peek the next atom - note that the atom in JS_HEAD will already
                         * have been dequeued */
                        next_katom = kbasep_jm_peek_idx_submit_slot(slot, 0);
                        next_kctx = next_katom->kctx;
@@ -164,7 +164,7 @@ void kbase_timeline_job_slot_done(kbase_device *kbdev, kbase_context *kctx,
        KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
 }
 
-void kbase_timeline_pm_send_event(kbase_device *kbdev, kbase_timeline_pm_event event_sent)
+void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
 {
        int uid = 0;
        int old_uid;
@@ -178,14 +178,14 @@ void kbase_timeline_pm_send_event(kbase_device *kbdev, kbase_timeline_pm_event e
                uid = atomic_inc_return(&kbdev->timeline.pm_event_uid_counter);
 
        /* Try to use this UID */
-       if ( old_uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event_sent], old_uid, uid))
+       if (old_uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event_sent], old_uid, uid))
                /* If it changed, raced with another producer: we've lost this UID */
                uid = 0;
 
        KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_sent, uid);
 }
 
-void kbase_timeline_pm_check_handle_event(kbase_device *kbdev, kbase_timeline_pm_event event)
+void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
 {
        int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
 
@@ -198,7 +198,7 @@ void kbase_timeline_pm_check_handle_event(kbase_device *kbdev, kbase_timeline_pm
        }
 }
 
-void kbase_timeline_pm_handle_event(kbase_device *kbdev, kbase_timeline_pm_event event)
+void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
 {
        int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
 
@@ -209,7 +209,7 @@ void kbase_timeline_pm_handle_event(kbase_device *kbdev, kbase_timeline_pm_event
        KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
 }
 
-void kbase_timeline_pm_l2_transition_start(kbase_device *kbdev)
+void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
 {
        lockdep_assert_held(&kbdev->pm.power_change_lock);
        /* Simply log the start of the transition */
@@ -217,11 +217,11 @@ void kbase_timeline_pm_l2_transition_start(kbase_device *kbdev)
        KBASE_TIMELINE_POWERING_L2(kbdev);
 }
 
-void kbase_timeline_pm_l2_transition_done(kbase_device *kbdev)
+void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
 {
        lockdep_assert_held(&kbdev->pm.power_change_lock);
        /* Simply log the end of the transition */
-       if( MALI_FALSE != kbdev->timeline.l2_transitioning )
+       if (MALI_FALSE != kbdev->timeline.l2_transitioning)
        {
                kbdev->timeline.l2_transitioning = MALI_FALSE;
                KBASE_TIMELINE_POWERED_L2(kbdev);
index fc2a383ad1dfc24d3615aa4fd48100d484b53e92..63cacff66e1c36c4fe8729a7b5a47ddb5305de08 100755 (executable)
@@ -30,9 +30,9 @@ typedef enum
 } kbase_trace_timeline_code;
 
 /** Initialize Timeline DebugFS entries */
-mali_error kbasep_trace_timeline_debugfs_init(kbase_device *kbdev);
+mali_error kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
 /** Terminate Timeline DebugFS entries */
-void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
+void kbasep_trace_timeline_debugfs_term(struct kbase_device *kbdev);
 
 /* mali_timeline.h defines kernel tracepoints used by the KBASE_TIMELINE
  * functions.
@@ -64,7 +64,6 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
                                         atom_id);                         \
        } while (0)
 
-
 /* Trace number of atoms submitted to job slot js
  *
  * NOTE: This uses a different tracepoint to the head/next/soft-stop actions,
@@ -84,7 +83,7 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
        } while (0)
 
 
-/* Trace atoms present in JSn_NEXT */
+/* Trace atoms present in JS_NEXT */
 #define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count)                             \
        do                                                                          \
        {                                                                           \
@@ -96,7 +95,7 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
                                                    js, count);                     \
        } while (0)
 
-/* Trace atoms present in JSn_HEAD */
+/* Trace atoms present in JS_HEAD */
 #define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count)                             \
        do                                                                          \
        {                                                                           \
@@ -116,7 +115,7 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
                getnstimeofday(&ts);                                                \
                trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec,          \
                                                    SW_SET_GPU_SLOT_STOPPING,       \
-                                                   (kctx)?(int)kctx->timeline.owner_tgid:0, \
+                                                   (kctx) ? (int)kctx->timeline.owner_tgid : 0, \
                                                    js, count);                     \
        } while (0)
 
@@ -163,7 +162,7 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
                trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec,         \
                                                             SW_SET_GPU_POWER_L2_ACTIVE,    \
                                                             hweight64(bitmap));            \
-       }while(0)
+       } while (0)
 
 /* Trace state of L2 cache*/
 #define KBASE_TIMELINE_POWERING_L2(kbdev)                                   \
@@ -174,7 +173,7 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
                trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec,              \
                                                    SW_FLOW_GPU_POWER_L2_POWERING,  \
                                                    1);                             \
-       }while(0)
+       } while (0)
 
 #define KBASE_TIMELINE_POWERED_L2(kbdev)                                    \
        do                                                                      \
@@ -184,7 +183,7 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
                trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec,          \
                                                    SW_FLOW_GPU_POWER_L2_ACTIVE,    \
                                                     1);                            \
-       }while(0)
+       } while (0)
 
 /* Trace kbase_pm_send_event message send */
 #define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) \
@@ -209,7 +208,7 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
        } while (0)
 
 
-/* Trace atom_id starting in JSn_HEAD */
+/* Trace atom_id starting in JS_HEAD */
 #define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number)     \
        do                                                                  \
        {                                                                   \
@@ -221,7 +220,7 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
                                              js, _consumerof_atom_number);     \
        } while (0)
 
-/* Trace atom_id stopping on JSn_HEAD */
+/* Trace atom_id stopping on JS_HEAD */
 #define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) \
        do                                                                  \
        {                                                                   \
@@ -245,6 +244,17 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
                                                  1);     \
        } while (0)
 
+/* Trace number of contexts active */
+#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count)                           \
+       do                                                                    \
+       {                                                                     \
+               struct timespec ts;                                           \
+               getnstimeofday(&ts);                                          \
+               trace_mali_timeline_context_active(ts.tv_sec, ts.tv_nsec,     \
+                                                  count);                    \
+       } while (0)
+
+
 /* NOTE: kbase_timeline_pm_cores_func() is in mali_kbase_pm_policy.c */
 
 /**
@@ -252,15 +262,15 @@ void kbasep_trace_timeline_debugfs_term(kbase_device *kbdev);
  *
  * The caller must be holding kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_timeline_job_slot_submit(kbase_device *kbdev, kbase_context *kctx,
-                                    kbase_jd_atom *katom, int js);
+void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+                                    struct kbase_jd_atom *katom, int js);
 
 /**
  * Trace that an atom has done on a job slot
  *
  * 'Done' in this sense can occur either because:
- * - the atom in JSn_HEAD finished
- * - the atom in JSn_NEXT was evicted
+ * - the atom in JS_HEAD finished
+ * - the atom in JS_NEXT was evicted
  *
  * Whether the atom finished or was evicted is passed in @a done_code
  *
@@ -271,26 +281,26 @@ void kbase_timeline_job_slot_submit(kbase_device *kbdev, kbase_context *kctx,
  *
  * The caller must be holding kbasep_js_device_data::runpool_irq::lock
  */
-void kbase_timeline_job_slot_done(kbase_device *kbdev, kbase_context *kctx,
-                                  kbase_jd_atom *katom, int js,
+void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+                                  struct kbase_jd_atom *katom, int js,
                                   kbasep_js_atom_done_code done_code);
 
 
 /** Trace a pm event starting */
-void kbase_timeline_pm_send_event(kbase_device *kbdev,
-                                  kbase_timeline_pm_event event_sent);
+void kbase_timeline_pm_send_event(struct kbase_device *kbdev,
+                                  enum kbase_timeline_pm_event event_sent);
 
 /** Trace a pm event finishing */
-void kbase_timeline_pm_check_handle_event(kbase_device *kbdev, kbase_timeline_pm_event event);
+void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event);
 
 /** Check whether a pm event was present, and if so trace finishing it */
-void kbase_timeline_pm_handle_event(kbase_device *kbdev, kbase_timeline_pm_event event);
+void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event);
 
 /** Trace L2 power-up start */
-void kbase_timeline_pm_l2_transition_start(kbase_device *kbdev);
+void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev);
 
 /** Trace L2 power-up done */
-void kbase_timeline_pm_l2_transition_done(kbase_device *kbdev);
+void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev);
 
 #else
 
@@ -328,37 +338,39 @@ void kbase_timeline_pm_l2_transition_done(kbase_device *kbdev);
 
 #define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) CSTD_NOP()
 
-static INLINE void kbase_timeline_job_slot_submit(kbase_device *kbdev, kbase_context *kctx,
-                                    kbase_jd_atom *katom, int js)
+#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) CSTD_NOP()
+
+static INLINE void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
+                                    struct kbase_jd_atom *katom, int js)
 {
        lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
 }
 
-static INLINE void kbase_timeline_job_slot_done(kbase_device *kbdev, kbase_context *kctx,
-                                    kbase_jd_atom *katom, int js,
+static INLINE void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
+                                    struct kbase_jd_atom *katom, int js,
                                     kbasep_js_atom_done_code done_code)
 {
        lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
 }
 
-static INLINE void kbase_timeline_pm_send_event(kbase_device *kbdev, kbase_timeline_pm_event event_sent)
+static INLINE void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
 {
 }
 
-static INLINE void kbase_timeline_pm_check_handle_event(kbase_device *kbdev, kbase_timeline_pm_event event)
+static INLINE void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
 {
 }
 
-static INLINE void kbase_timeline_pm_handle_event(kbase_device *kbdev, kbase_timeline_pm_event event)
+static INLINE void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
 {
 }
 
-static INLINE void kbase_timeline_pm_l2_transition_start(kbase_device *kbdev)
+static INLINE void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
 {
 
 }
 
-static INLINE void kbase_timeline_pm_l2_transition_done(kbase_device *kbdev)
+static INLINE void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
 {
 
 }
index 2795c0b70c00f3f9b79661c82ad488c9153b6621..04d414cd0db95f9a619461f0d6f0d488cc92aed4 100755 (executable)
@@ -92,6 +92,8 @@
        KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_POWERING,  "SW: GPU L2 powering",             "%d,%d", "_tgid,_writerof_l2_transitioning"),
        KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_ACTIVE,    "SW: GPU L2 powering done",        "%d,%d", "_tgid,_finalconsumerof_l2_transitioning"),
 
+       KBASE_TIMELINE_TRACE_CODE(SW_SET_CONTEXT_ACTIVE,          "SW: Context Active",              "%d,%d",    "_tgid,_value_active"),
+
        /*
         * BEGIN: Significant SW Functions that call kbase_pm_check_transitions_nolock()
         */
index f1dd105d8be0212b3094c50880631268b20c2004..37923fd658d7e3aeca4ee8d665babe059c5deccb 100755 (executable)
 
 #include "mali_kbase_gpuprops_types.h"
 
-#define BASE_UK_VERSION_MAJOR 7
+#define BASE_UK_VERSION_MAJOR 8
 #define BASE_UK_VERSION_MINOR 0
 
-typedef struct kbase_uk_mem_alloc {
-       uk_header header;
+struct kbase_uk_mem_alloc {
+       union uk_header header;
        /* IN */
        u64 va_pages;
        u64 commit_pages;
@@ -61,77 +61,73 @@ typedef struct kbase_uk_mem_alloc {
        u64 gpu_va;
        u16 va_alignment;
        u8  padding[6];
-} kbase_uk_mem_alloc;
+};
 
-typedef struct kbase_uk_mem_free {
-       uk_header header;
+struct kbase_uk_mem_free {
+       union uk_header header;
        /* IN */
        mali_addr64 gpu_addr;
        /* OUT */
-} kbase_uk_mem_free;
+};
 
-/* used by both aliasing and importing */
-#define KBASE_MEM_NEED_MMAP         (1UL << BASE_MEM_FLAGS_NR_BITS)
-
-typedef struct kbase_uk_mem_alias {
-       uk_header header;
+struct kbase_uk_mem_alias {
+       union uk_header header;
        /* IN/OUT */
        u64 flags;
        /* IN */
        u64 stride;
        u64 nents;
-       kbase_pointer ai;
+       union kbase_pointer ai;
        /* OUT */
        u64         gpu_va;
        u64         va_pages;
-} kbase_uk_mem_alias;
+};
 
-typedef struct kbase_uk_mem_import {
-       uk_header header;
+struct kbase_uk_mem_import {
+       union uk_header header;
        /* IN */
-       kbase_pointer phandle;
+       union kbase_pointer phandle;
        u32 type;
        u32 padding;
        /* IN/OUT */
-#define KBASE_MEM_IMPORT_HAVE_PAGES   (1UL << (BASE_MEM_FLAGS_NR_BITS + 1))
        u64         flags;
        /* OUT */
        mali_addr64 gpu_va;
        u64         va_pages;
-} kbase_uk_mem_import;
+};
 
-typedef struct kbase_uk_mem_flags_change {
-       uk_header header;
+struct kbase_uk_mem_flags_change {
+       union uk_header header;
        /* IN */
        mali_addr64 gpu_va;
        u64 flags;
        u64 mask;
-} kbase_uk_mem_flags_change;
+};
 
-typedef struct kbase_uk_job_submit {
-       uk_header header;
+struct kbase_uk_job_submit {
+       union uk_header header;
        /* IN */
-       kbase_pointer addr;
+       union kbase_pointer addr;
        u32 nr_atoms;
        u32 stride;             /* bytes between atoms, i.e. sizeof(base_jd_atom_v2) */
        /* OUT */
-} kbase_uk_job_submit;
+};
 
-typedef struct kbase_uk_post_term {
-       uk_header header;
-} kbase_uk_post_term;
+struct kbase_uk_post_term {
+       union uk_header header;
+};
 
-typedef struct kbase_uk_sync_now {
-       uk_header header;
+struct kbase_uk_sync_now {
+       union uk_header header;
 
        /* IN */
-       base_syncset sset;
+       struct base_syncset sset;
 
        /* OUT */
-} kbase_uk_sync_now;
+};
 
-typedef struct kbase_uk_hwcnt_setup {
-       uk_header header;
+struct kbase_uk_hwcnt_setup {
+       union uk_header header;
 
        /* IN */
        mali_addr64 dump_buffer;
@@ -142,51 +138,51 @@ typedef struct kbase_uk_hwcnt_setup {
        u32 mmu_l2_bm;
        u32 padding;
        /* OUT */
-} kbase_uk_hwcnt_setup;
+};
 
-typedef struct kbase_uk_hwcnt_dump {
-       uk_header header;
-} kbase_uk_hwcnt_dump;
+struct kbase_uk_hwcnt_dump {
+       union uk_header header;
+};
 
-typedef struct kbase_uk_hwcnt_clear {
-       uk_header header;
-} kbase_uk_hwcnt_clear;
+struct kbase_uk_hwcnt_clear {
+       union uk_header header;
+};
 
-typedef struct kbase_uk_fence_validate {
-       uk_header header;
+struct kbase_uk_fence_validate {
+       union uk_header header;
        /* IN */
        s32 fd;
        u32 padding;
        /* OUT */
-} kbase_uk_fence_validate;
+};
 
-typedef struct kbase_uk_stream_create {
-       uk_header header;
+struct kbase_uk_stream_create {
+       union uk_header header;
        /* IN */
        char name[32];
        /* OUT */
        s32 fd;
        u32 padding;
-} kbase_uk_stream_create;
+};
 
-typedef struct kbase_uk_cpuprops {
-       uk_header header;
+struct kbase_uk_cpuprops {
+       union uk_header header;
 
        /* IN */
        struct base_cpu_props props;
        /* OUT */
-} kbase_uk_cpuprops;
+};
 
-typedef struct kbase_uk_gpuprops {
-       uk_header header;
+struct kbase_uk_gpuprops {
+       union uk_header header;
 
        /* IN */
        struct mali_base_gpu_props props;
        /* OUT */
-} kbase_uk_gpuprops;
+};
 
-typedef struct kbase_uk_mem_query {
-       uk_header header;
+struct kbase_uk_mem_query {
+       union uk_header header;
        /* IN */
        mali_addr64 gpu_addr;
 #define KBASE_MEM_QUERY_COMMIT_SIZE  1
@@ -195,44 +191,51 @@ typedef struct kbase_uk_mem_query {
        u64         query;
        /* OUT */
        u64         value;
-} kbase_uk_mem_query;
+};
        
-typedef struct kbase_uk_mem_commit {
-       uk_header header;
+struct kbase_uk_mem_commit {
+       union uk_header header;
        /* IN */
        mali_addr64 gpu_addr;
        u64         pages;
        /* OUT */
        u32 result_subcode;
        u32 padding;
-} kbase_uk_mem_commit;
+};
 
-typedef struct kbase_uk_find_cpu_offset {
-       uk_header header;
+struct kbase_uk_find_cpu_offset {
+       union uk_header header;
        /* IN */
        mali_addr64 gpu_addr;
        u64 cpu_addr;
        u64 size;
        /* OUT */
        mali_size64 offset;
-} kbase_uk_find_cpu_offset;
+};
 
 #define KBASE_GET_VERSION_BUFFER_SIZE 64
-typedef struct kbase_uk_get_ddk_version {
-       uk_header header;
+struct kbase_uk_get_ddk_version {
+       union uk_header header;
        /* OUT */
        char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE];
        u32 version_string_size;
        u32 padding;
        u32 rk_version;
-} kbase_uk_get_ddk_version;
+};
+
+struct kbase_uk_disjoint_query {
+       union uk_header header;
+       /* OUT */
+       u32 counter;
+       u32 padding;
+};
 
-typedef struct kbase_uk_set_flags {
-       uk_header header;
+struct kbase_uk_set_flags {
+       union uk_header header;
        /* IN */
        u32 create_flags;
        u32 padding;
-} kbase_uk_set_flags;
+};
 
 #if MALI_UNIT_TEST
 #define TEST_ADDR_COUNT 4
@@ -240,58 +243,64 @@ typedef struct kbase_uk_set_flags {
 typedef struct kbase_exported_test_data {
        mali_addr64 test_addr[TEST_ADDR_COUNT];         /**< memory address */
        u32 test_addr_pages[TEST_ADDR_COUNT];           /**<  memory size in pages */
-       kbase_pointer kctx;                             /**<  base context created by process */
-       kbase_pointer mm;                               /**< pointer to process address space */
+       union kbase_pointer kctx;                               /**<  base context created by process */
+       union kbase_pointer mm;                         /**< pointer to process address space */
        u8 buffer1[KBASE_TEST_BUFFER_SIZE];   /**<  unit test defined parameter */
        u8 buffer2[KBASE_TEST_BUFFER_SIZE];   /**<  unit test defined parameter */
 } kbase_exported_test_data;
 
-typedef struct kbase_uk_set_test_data {
-       uk_header header;
+struct kbase_uk_set_test_data {
+       union uk_header header;
        /* IN */
-       kbase_exported_test_data test_data;
-} kbase_uk_set_test_data;
+       struct kbase_exported_test_data test_data;
+};
 
 #endif                         /* MALI_UNIT_TEST */
 
 #ifdef SUPPORT_MALI_ERROR_INJECT
-typedef struct kbase_uk_error_params {
-       uk_header header;
+struct kbase_uk_error_params {
+       union uk_header header;
        /* IN */
-       kbase_error_params params;
-} kbase_uk_error_params;
+       struct kbase_error_params params;
+};
 #endif                         /* SUPPORT_MALI_ERROR_INJECT */
 
 #ifdef SUPPORT_MALI_NO_MALI
-typedef struct kbase_uk_model_control_params {
-       uk_header header;
+struct kbase_uk_model_control_params {
+       union uk_header header;
        /* IN */
-       kbase_model_control_params params;
-} kbase_uk_model_control_params;
+       struct kbase_model_control_params params;
+};
 #endif                         /* SUPPORT_MALI_NO_MALI */
 
 #define KBASE_MAXIMUM_EXT_RESOURCES       255
 
-typedef struct kbase_uk_ext_buff_kds_data {
-       uk_header header;
-       kbase_pointer external_resource;
-       kbase_pointer file_descriptor;
+struct kbase_uk_ext_buff_kds_data {
+       union uk_header header;
+       union kbase_pointer external_resource;
+       union kbase_pointer file_descriptor;
        u32 num_res;            /* limited to KBASE_MAXIMUM_EXT_RESOURCES */
        u32 padding;
-} kbase_uk_ext_buff_kds_data;
+};
 
-typedef struct kbase_uk_keep_gpu_powered {
-       uk_header header;
+struct kbase_uk_keep_gpu_powered {
+       union uk_header header;
        u32       enabled;
        u32       padding;
-} kbase_uk_keep_gpu_powered;
+};
 
-typedef struct kbase_uk_profiling_controls {
-       uk_header header;
+struct kbase_uk_profiling_controls {
+       union uk_header header;
        u32 profiling_controls[FBDUMP_CONTROL_MAX];
-} kbase_uk_profiling_controls;
+};
+
+struct kbase_uk_debugfs_mem_profile_add {
+       union uk_header header;
+       u32 len;
+       union kbase_pointer buf;
+};
 
-typedef enum kbase_uk_function_id {
+enum kbase_uk_function_id {
        KBASE_FUNC_MEM_ALLOC = (UK_FUNC_ID + 0),
        KBASE_FUNC_MEM_IMPORT,
        KBASE_FUNC_MEM_COMMIT,
@@ -300,6 +309,10 @@ typedef enum kbase_uk_function_id {
        KBASE_FUNC_MEM_FLAGS_CHANGE,
        KBASE_FUNC_MEM_ALIAS,
 
+#ifdef BASE_LEGACY_UK6_SUPPORT
+       KBASE_FUNC_JOB_SUBMIT_UK6 = (UK_FUNC_ID + 7),
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+
        KBASE_FUNC_SYNC  = (UK_FUNC_ID + 8),
 
        KBASE_FUNC_POST_TERM,
@@ -327,11 +340,13 @@ typedef enum kbase_uk_function_id {
        KBASE_FUNC_STREAM_CREATE,
        KBASE_FUNC_GET_PROFILING_CONTROLS,
        KBASE_FUNC_SET_PROFILING_CONTROLS, /* to be used only for testing
-                                          * purposes, otherwise these controls
-                                          * are set through gator API */
-       KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 27)
+                                           * purposes, otherwise these controls
+                                           * are set through gator API */
 
-} kbase_uk_function_id;
+       KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD,
+       KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 28),
+       KBASE_FUNC_DISJOINT_QUERY
 
+};
 
 #endif                         /* _KBASE_UKU_H_ */
index c11c678e3b188f5af3061ecccf45b634476bb410..d159872306c5ccbf6059b9b62718819c191af5a7 100755 (executable)
@@ -22,6 +22,7 @@
 mali_bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry)
 {
        struct list_head *pos = base->next;
+
        while (pos != base) {
                if (pos == entry)
                        return MALI_TRUE;
diff --git a/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h
new file mode 100755 (executable)
index 0000000..2025059
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+
+#if !defined(_TRACE_MALI_KBASE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_KBASE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(mali_slot_template,
+       TP_PROTO(int jobslot, unsigned int info_val),
+       TP_ARGS(jobslot, info_val),
+       TP_STRUCT__entry(
+               __field(unsigned int, jobslot)
+               __field(unsigned int, info_val)
+       ),
+       TP_fast_assign(
+               __entry->jobslot = jobslot;
+               __entry->info_val = info_val;
+       ),
+       TP_printk("jobslot=%u info=%u", __entry->jobslot, __entry->info_val)
+);
+
+#define DEFINE_MALI_SLOT_EVENT(name) \
+DEFINE_EVENT(mali_slot_template, mali_##name, \
+       TP_PROTO(int jobslot, unsigned int info_val), \
+       TP_ARGS(jobslot, info_val))
+DEFINE_MALI_SLOT_EVENT(JM_SUBMIT);
+DEFINE_MALI_SLOT_EVENT(JM_JOB_DONE);
+DEFINE_MALI_SLOT_EVENT(JM_UPDATE_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_CHECK_HEAD);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_SOFTSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_0);
+DEFINE_MALI_SLOT_EVENT(JM_HARDSTOP_1);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_SOFT_OR_HARD_STOP);
+DEFINE_MALI_SLOT_EVENT(JM_SLOT_EVICT);
+DEFINE_MALI_SLOT_EVENT(JM_BEGIN_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JM_END_RESET_WORKER);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_SUBMIT_TO_BLOCKED);
+DEFINE_MALI_SLOT_EVENT(JS_AFFINITY_CURRENT);
+DEFINE_MALI_SLOT_EVENT(JD_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_CORES_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REGISTER_INUSE_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_REQUEST_ON_RECHECK_FAILED);
+DEFINE_MALI_SLOT_EVENT(JS_CORE_REF_AFFINITY_WOULD_VIOLATE);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_TRY_RUN_NEXT_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_JOB_DONE_RETRY_NEEDED);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB);
+DEFINE_MALI_SLOT_EVENT(JS_POLICY_DEQUEUE_JOB_IRQ);
+#undef DEFINE_MALI_SLOT_EVENT
+
+DECLARE_EVENT_CLASS(mali_refcount_template,
+       TP_PROTO(int refcount, unsigned int info_val),
+       TP_ARGS(refcount, info_val),
+       TP_STRUCT__entry(
+               __field(unsigned int, refcount)
+               __field(unsigned int, info_val)
+       ),
+       TP_fast_assign(
+               __entry->refcount = refcount;
+               __entry->info_val = info_val;
+       ),
+       TP_printk("refcount=%u info=%u", __entry->refcount, __entry->info_val)
+);
+
+#define DEFINE_MALI_REFCOUNT_EVENT(name) \
+DEFINE_EVENT(mali_refcount_template, mali_##name, \
+       TP_PROTO(int refcount, unsigned int info_val), \
+       TP_ARGS(refcount, info_val))
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX_NOLOCK);
+DEFINE_MALI_REFCOUNT_EVENT(JS_ADD_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_REMOVE_JOB);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RETAIN_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_RELEASE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_TRY_SCHEDULE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_INIT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TERM_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_ENQUEUE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_DEQUEUE_HEAD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_TRY_EVICT_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_ADD_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_RUNPOOL_REMOVE_CTX);
+DEFINE_MALI_REFCOUNT_EVENT(JS_POLICY_FOREACH_CTX_JOBS);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_ACTIVE);
+DEFINE_MALI_REFCOUNT_EVENT(PM_CONTEXT_IDLE);
+#undef DEFINE_MALI_REFCOUNT_EVENT
+
+DECLARE_EVENT_CLASS(mali_add_template,
+       TP_PROTO(int gpu_addr, unsigned int info_val),
+       TP_ARGS(gpu_addr, info_val),
+       TP_STRUCT__entry(
+               __field(unsigned int, gpu_addr)
+               __field(unsigned int, info_val)
+       ),
+       TP_fast_assign(
+               __entry->gpu_addr = gpu_addr;
+               __entry->info_val = info_val;
+       ),
+       TP_printk("gpu_addr=%u info=%u", __entry->gpu_addr, __entry->info_val)
+);
+
+#define DEFINE_MALI_ADD_EVENT(name) \
+DEFINE_EVENT(mali_add_template, mali_##name, \
+       TP_PROTO(int gpu_addr, unsigned int info_val), \
+       TP_ARGS(gpu_addr, info_val))
+DEFINE_MALI_ADD_EVENT(CORE_CTX_DESTROY);
+DEFINE_MALI_ADD_EVENT(CORE_CTX_HWINSTR_TERM);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_IRQ_DONE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_SOFT_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_HARD_RESET);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_SAMPLE);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_PRFCNT_CLEAR);
+DEFINE_MALI_ADD_EVENT(CORE_GPU_CLEAN_INV_CACHES);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE_WORKER_END);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL_WORKER);
+DEFINE_MALI_ADD_EVENT(JD_DONE);
+DEFINE_MALI_ADD_EVENT(JD_CANCEL);
+DEFINE_MALI_ADD_EVENT(JD_ZAP_CONTEXT);
+DEFINE_MALI_ADD_EVENT(JM_IRQ);
+DEFINE_MALI_ADD_EVENT(JM_IRQ_END);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS);
+DEFINE_MALI_ADD_EVENT(JM_FLUSH_WORKQS_DONE);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_NON_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_SCHEDULED);
+DEFINE_MALI_ADD_EVENT(JM_ZAP_DONE);
+DEFINE_MALI_ADD_EVENT(JM_SUBMIT_AFTER_RESET);
+DEFINE_MALI_ADD_EVENT(JM_JOB_COMPLETE);
+DEFINE_MALI_ADD_EVENT(JS_FAST_START_EVICTS_CTX);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_RUNPOOL);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_ON_CTX);
+DEFINE_MALI_ADD_EVENT(JS_CTX_ATTR_NOW_OFF_CTX);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_END);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_TIMER_START);
+DEFINE_MALI_ADD_EVENT(JS_POLICY_ENQUEUE_JOB);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_DESIRED);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERING_UP);
+DEFINE_MALI_ADD_EVENT(PM_JOB_SUBMIT_AFTER_POWERED_UP);
+DEFINE_MALI_ADD_EVENT(PM_PWRON);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWRON_L2);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_TILER);
+DEFINE_MALI_ADD_EVENT(PM_PWROFF_L2);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_L2);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED);
+DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED_TILER);
+DEFINE_MALI_ADD_EVENT(PM_UNREQUEST_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_INUSE);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_INUSE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE);
+DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE_TILER);
+DEFINE_MALI_ADD_EVENT(PM_GPU_ON);
+DEFINE_MALI_ADD_EVENT(PM_GPU_OFF);
+DEFINE_MALI_ADD_EVENT(PM_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_INIT);
+DEFINE_MALI_ADD_EVENT(PM_CURRENT_POLICY_TERM);
+DEFINE_MALI_ADD_EVENT(PM_CA_SET_POLICY);
+DEFINE_MALI_ADD_EVENT(PM_WAKE_WAITERS);
+#undef DEFINE_MALI_ADD_EVENT
+
+#endif /* _TRACE_MALI_KBASE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mali_linux_kbase_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index d3848ad9c5f6042759e5f9c8f09637c73299ee2b..8f9a788c6e30bcfe481bc4408a3abdbb2526f208 100755 (executable)
 #define L3_PWRACTIVE_LO         0x270  /* (RO) Level 3 cache active bitmap, low word */
 #define L3_PWRACTIVE_HI         0x274  /* (RO) Level 3 cache active bitmap, high word */
 
-#define SHADER_CONFIG           0xF04  /* (RW) Shader core configuration settings (Mali-T60x additional register) */
-#define L2_MMU_CONFIG           0xF0C  /* (RW) Configuration of the L2 cache and MMU (Mali-T60x additional register) */
+#define SHADER_CONFIG           0xF04  /* (RW) Shader core configuration settings (Implementation specific register) */
+#define TILER_CONFIG            0xF08   /* (RW) Tiler core configuration settings (Implementation specific register) */
+#define L2_MMU_CONFIG           0xF0C  /* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */
 
 #define JOB_CONTROL_BASE        0x1000
 
 
 #define JOB_SLOT_REG(n, r)      (JOB_CONTROL_REG(JOB_SLOT0 + ((n) << 7)) + (r))
 
-#define JSn_HEAD_LO             0x00   /* (RO) Job queue head pointer for job slot n, low word */
-#define JSn_HEAD_HI             0x04   /* (RO) Job queue head pointer for job slot n, high word */
-#define JSn_TAIL_LO             0x08   /* (RO) Job queue tail pointer for job slot n, low word */
-#define JSn_TAIL_HI             0x0C   /* (RO) Job queue tail pointer for job slot n, high word */
-#define JSn_AFFINITY_LO         0x10   /* (RO) Core affinity mask for job slot n, low word */
-#define JSn_AFFINITY_HI         0x14   /* (RO) Core affinity mask for job slot n, high word */
-#define JSn_CONFIG              0x18   /* (RO) Configuration settings for job slot n */
+#define JS_HEAD_LO             0x00    /* (RO) Job queue head pointer for job slot n, low word */
+#define JS_HEAD_HI             0x04    /* (RO) Job queue head pointer for job slot n, high word */
+#define JS_TAIL_LO             0x08    /* (RO) Job queue tail pointer for job slot n, low word */
+#define JS_TAIL_HI             0x0C    /* (RO) Job queue tail pointer for job slot n, high word */
+#define JS_AFFINITY_LO         0x10    /* (RO) Core affinity mask for job slot n, low word */
+#define JS_AFFINITY_HI         0x14    /* (RO) Core affinity mask for job slot n, high word */
+#define JS_CONFIG              0x18    /* (RO) Configuration settings for job slot n */
 
-#define JSn_COMMAND             0x20   /* (WO) Command register for job slot n */
-#define JSn_STATUS              0x24   /* (RO) Status register for job slot n */
+#define JS_COMMAND             0x20    /* (WO) Command register for job slot n */
+#define JS_STATUS              0x24    /* (RO) Status register for job slot n */
 
-#define JSn_HEAD_NEXT_LO        0x40   /* (RW) Next job queue head pointer for job slot n, low word */
-#define JSn_HEAD_NEXT_HI        0x44   /* (RW) Next job queue head pointer for job slot n, high word */
+#define JS_HEAD_NEXT_LO        0x40    /* (RW) Next job queue head pointer for job slot n, low word */
+#define JS_HEAD_NEXT_HI        0x44    /* (RW) Next job queue head pointer for job slot n, high word */
 
-#define JSn_AFFINITY_NEXT_LO    0x50   /* (RW) Next core affinity mask for job slot n, low word */
-#define JSn_AFFINITY_NEXT_HI    0x54   /* (RW) Next core affinity mask for job slot n, high word */
-#define JSn_CONFIG_NEXT         0x58   /* (RW) Next configuration settings for job slot n */
+#define JS_AFFINITY_NEXT_LO    0x50    /* (RW) Next core affinity mask for job slot n, low word */
+#define JS_AFFINITY_NEXT_HI    0x54    /* (RW) Next core affinity mask for job slot n, high word */
+#define JS_CONFIG_NEXT         0x58    /* (RW) Next configuration settings for job slot n */
 
-#define JSn_COMMAND_NEXT        0x60   /* (RW) Next command register for job slot n */
+#define JS_COMMAND_NEXT        0x60    /* (RW) Next command register for job slot n */
 
 #define MEMORY_MANAGEMENT_BASE  0x2000
 #define MMU_REG(r)              (MEMORY_MANAGEMENT_BASE + (r))
 
 #define MMU_AS_REG(n, r)        (MMU_REG(MMU_AS0 + ((n) << 6)) + (r))
 
-#define ASn_TRANSTAB_LO         0x00   /* (RW) Translation Table Base Address for address space n, low word */
-#define ASn_TRANSTAB_HI         0x04   /* (RW) Translation Table Base Address for address space n, high word */
-#define ASn_MEMATTR_LO          0x08   /* (RW) Memory attributes for address space n, low word. */
-#define ASn_MEMATTR_HI          0x0C   /* (RW) Memory attributes for address space n, high word. */
-#define ASn_LOCKADDR_LO         0x10   /* (RW) Lock region address for address space n, low word */
-#define ASn_LOCKADDR_HI         0x14   /* (RW) Lock region address for address space n, high word */
-#define ASn_COMMAND             0x18   /* (WO) MMU command register for address space n */
-#define ASn_FAULTSTATUS         0x1C   /* (RO) MMU fault status register for address space n */
-#define ASn_FAULTADDRESS_LO     0x20   /* (RO) Fault Address for address space n, low word */
-#define ASn_FAULTADDRESS_HI     0x24   /* (RO) Fault Address for address space n, high word */
-#define ASn_STATUS              0x28   /* (RO) Status flags for address space n */
+#define AS_TRANSTAB_LO         0x00    /* (RW) Translation Table Base Address for address space n, low word */
+#define AS_TRANSTAB_HI         0x04    /* (RW) Translation Table Base Address for address space n, high word */
+#define AS_MEMATTR_LO          0x08    /* (RW) Memory attributes for address space n, low word. */
+#define AS_MEMATTR_HI          0x0C    /* (RW) Memory attributes for address space n, high word. */
+#define AS_LOCKADDR_LO         0x10    /* (RW) Lock region address for address space n, low word */
+#define AS_LOCKADDR_HI         0x14    /* (RW) Lock region address for address space n, high word */
+#define AS_COMMAND             0x18    /* (WO) MMU command register for address space n */
+#define AS_FAULTSTATUS         0x1C    /* (RO) MMU fault status register for address space n */
+#define AS_FAULTADDRESS_LO     0x20    /* (RO) Fault Address for address space n, low word */
+#define AS_FAULTADDRESS_HI     0x24    /* (RO) Fault Address for address space n, high word */
+#define AS_STATUS              0x28    /* (RO) Status flags for address space n */
 
 /* End Register Offsets */
 
 /*
  * Begin MMU TRANSTAB register values
  */
-#define ASn_TRANSTAB_ADDR_SPACE_MASK   0xfffff000
-#define ASn_TRANSTAB_ADRMODE_UNMAPPED  (0u << 0)
-#define ASn_TRANSTAB_ADRMODE_IDENTITY  (1u << 1)
-#define ASn_TRANSTAB_ADRMODE_TABLE     (3u << 0)
-#define ASn_TRANSTAB_READ_INNER        (1u << 2)
-#define ASn_TRANSTAB_SHARE_OUTER       (1u << 4)
+#define AS_TRANSTAB_ADDR_SPACE_MASK   0xfffff000
+#define AS_TRANSTAB_ADRMODE_UNMAPPED  (0u << 0)
+#define AS_TRANSTAB_ADRMODE_IDENTITY  (1u << 1)
+#define AS_TRANSTAB_ADRMODE_TABLE     (3u << 0)
+#define AS_TRANSTAB_READ_INNER        (1u << 2)
+#define AS_TRANSTAB_SHARE_OUTER       (1u << 4)
 
 #define MMU_TRANSTAB_ADRMODE_MASK      0x00000003
 
 /*
  * Begin MMU STATUS register values
  */
-#define ASn_STATUS_FLUSH_ACTIVE 0x01
+#define AS_STATUS_AS_ACTIVE 0x01
 
-#define ASn_FAULTSTATUS_ACCESS_TYPE_MASK    (0x3<<8)
-#define ASn_FAULTSTATUS_ACCESS_TYPE_EX      (0x1<<8)
-#define ASn_FAULTSTATUS_ACCESS_TYPE_READ    (0x2<<8)
-#define ASn_FAULTSTATUS_ACCESS_TYPE_WRITE   (0x3<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK    (0x3<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX      (0x1<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ    (0x2<<8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE   (0x3<<8)
 
 /*
  * Begin Command Values
  */
 
-/* JSn_COMMAND register commands */
-#define JSn_COMMAND_NOP         0x00   /* NOP Operation. Writing this value is ignored */
-#define JSn_COMMAND_START       0x01   /* Start processing a job chain. Writing this value is ignored */
-#define JSn_COMMAND_SOFT_STOP   0x02   /* Gently stop processing a job chain */
-#define JSn_COMMAND_HARD_STOP   0x03   /* Rudely stop processing a job chain */
-#define JSn_COMMAND_SOFT_STOP_0 0x04   /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
-#define JSn_COMMAND_HARD_STOP_0 0x05   /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
-#define JSn_COMMAND_SOFT_STOP_1 0x06   /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
-#define JSn_COMMAND_HARD_STOP_1 0x07   /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
-
-/* ASn_COMMAND register commands */
-#define ASn_COMMAND_NOP         0x00   /* NOP Operation */
-#define ASn_COMMAND_UPDATE      0x01   /* Broadcasts the values in ASn_TRANSTAB and ASn_MEMATTR to all MMUs */
-#define ASn_COMMAND_LOCK        0x02   /* Issue a lock region command to all MMUs */
-#define ASn_COMMAND_UNLOCK      0x03   /* Issue a flush region command to all MMUs */
-#define ASn_COMMAND_FLUSH       0x04   /* Flush all L2 caches then issue a flush region command to all MMUs
+/* JS_COMMAND register commands */
+#define JS_COMMAND_NOP         0x00    /* NOP Operation. Writing this value is ignored */
+#define JS_COMMAND_START       0x01    /* Start processing a job chain. Writing this value is ignored */
+#define JS_COMMAND_SOFT_STOP   0x02    /* Gently stop processing a job chain */
+#define JS_COMMAND_HARD_STOP   0x03    /* Rudely stop processing a job chain */
+#define JS_COMMAND_SOFT_STOP_0 0x04    /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_HARD_STOP_0 0x05    /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_SOFT_STOP_1 0x06    /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
+#define JS_COMMAND_HARD_STOP_1 0x07    /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
+
+#define JS_COMMAND_MASK        0x07    /* Mask of bits currently in use by the HW */
+
+/* AS_COMMAND register commands */
+#define AS_COMMAND_NOP         0x00    /* NOP Operation */
+#define AS_COMMAND_UPDATE      0x01    /* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */
+#define AS_COMMAND_LOCK        0x02    /* Issue a lock region command to all MMUs */
+#define AS_COMMAND_UNLOCK      0x03    /* Issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH       0x04    /* Flush all L2 caches then issue a flush region command to all MMUs
                                           (deprecated - only for use with T60x) */
-#define ASn_COMMAND_FLUSH_PT    0x04   /* Flush all L2 caches then issue a flush region command to all MMUs */
-#define ASn_COMMAND_FLUSH_MEM   0x05   /* Wait for memory accesses to complete, flush all the L1s cache then
+#define AS_COMMAND_FLUSH_PT    0x04    /* Flush all L2 caches then issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH_MEM   0x05    /* Wait for memory accesses to complete, flush all the L1s cache then
                                           flush all L2 caches then issue a flush region command to all MMUs */
 
-/* Possible values of JSn_CONFIG and JSn_CONFIG_NEXT registers */
-#define JSn_CONFIG_START_FLUSH_NO_ACTION        (0u << 0)
-#define JSn_CONFIG_START_FLUSH_CLEAN            (1u << 8)
-#define JSn_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
-#define JSn_CONFIG_START_MMU                    (1u << 10)
-#define JSn_CONFIG_JOB_CHAIN_FLAG               (1u << 11)
-#define JSn_CONFIG_END_FLUSH_NO_ACTION          JSn_CONFIG_START_FLUSH_NO_ACTION
-#define JSn_CONFIG_END_FLUSH_CLEAN              (1u << 12)
-#define JSn_CONFIG_END_FLUSH_CLEAN_INVALIDATE   (3u << 12)
-#define JSn_CONFIG_THREAD_PRI(n)                ((n) << 16)
+/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
+#define JS_CONFIG_START_FLUSH_NO_ACTION        (0u << 0)
+#define JS_CONFIG_START_FLUSH_CLEAN            (1u << 8)
+#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
+#define JS_CONFIG_START_MMU                    (1u << 10)
+#define JS_CONFIG_JOB_CHAIN_FLAG               (1u << 11)
+#define JS_CONFIG_END_FLUSH_NO_ACTION          JS_CONFIG_START_FLUSH_NO_ACTION
+#define JS_CONFIG_END_FLUSH_CLEAN              (1u << 12)
+#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE   (3u << 12)
+#define JS_CONFIG_THREAD_PRI(n)                ((n) << 16)
 
-/* JSn_STATUS register values */
+/* JS_STATUS register values */
 
 /* NOTE: Please keep this values in sync with enum base_jd_event_code in mali_base_kernel.h.
  * The values are separated to avoid dependency of userspace and kernel code.
  */
 
 /* Group of values representing the job status insead a particular fault */
-#define JSn_STATUS_NO_EXCEPTION_BASE   0x00
-#define JSn_STATUS_INTERRUPTED         (JSn_STATUS_NO_EXCEPTION_BASE + 0x02)   /* 0x02 means INTERRUPTED */
-#define JSn_STATUS_STOPPED             (JSn_STATUS_NO_EXCEPTION_BASE + 0x03)   /* 0x03 means STOPPED */
-#define JSn_STATUS_TERMINATED          (JSn_STATUS_NO_EXCEPTION_BASE + 0x04)   /* 0x04 means TERMINATED */
+#define JS_STATUS_NO_EXCEPTION_BASE   0x00
+#define JS_STATUS_INTERRUPTED         (JS_STATUS_NO_EXCEPTION_BASE + 0x02)     /* 0x02 means INTERRUPTED */
+#define JS_STATUS_STOPPED             (JS_STATUS_NO_EXCEPTION_BASE + 0x03)     /* 0x03 means STOPPED */
+#define JS_STATUS_TERMINATED          (JS_STATUS_NO_EXCEPTION_BASE + 0x04)     /* 0x04 means TERMINATED */
 
 /* General fault values */
-#define JSn_STATUS_FAULT_BASE          0x40
-#define JSn_STATUS_CONFIG_FAULT        (JSn_STATUS_FAULT_BASE) /* 0x40 means CONFIG FAULT */
-#define JSn_STATUS_POWER_FAULT         (JSn_STATUS_FAULT_BASE + 0x01)  /* 0x41 means POWER FAULT */
-#define JSn_STATUS_READ_FAULT          (JSn_STATUS_FAULT_BASE + 0x02)  /* 0x42 means READ FAULT */
-#define JSn_STATUS_WRITE_FAULT         (JSn_STATUS_FAULT_BASE + 0x03)  /* 0x43 means WRITE FAULT */
-#define JSn_STATUS_AFFINITY_FAULT      (JSn_STATUS_FAULT_BASE + 0x04)  /* 0x44 means AFFINITY FAULT */
-#define JSn_STATUS_BUS_FAULT           (JSn_STATUS_FAULT_BASE + 0x08)  /* 0x48 means BUS FAULT */
+#define JS_STATUS_FAULT_BASE          0x40
+#define JS_STATUS_CONFIG_FAULT        (JS_STATUS_FAULT_BASE)   /* 0x40 means CONFIG FAULT */
+#define JS_STATUS_POWER_FAULT         (JS_STATUS_FAULT_BASE + 0x01)    /* 0x41 means POWER FAULT */
+#define JS_STATUS_READ_FAULT          (JS_STATUS_FAULT_BASE + 0x02)    /* 0x42 means READ FAULT */
+#define JS_STATUS_WRITE_FAULT         (JS_STATUS_FAULT_BASE + 0x03)    /* 0x43 means WRITE FAULT */
+#define JS_STATUS_AFFINITY_FAULT      (JS_STATUS_FAULT_BASE + 0x04)    /* 0x44 means AFFINITY FAULT */
+#define JS_STATUS_BUS_FAULT           (JS_STATUS_FAULT_BASE + 0x08)    /* 0x48 means BUS FAULT */
 
 /* Instruction or data faults */
-#define JSn_STATUS_INSTRUCTION_FAULT_BASE  0x50
-#define JSn_STATUS_INSTR_INVALID_PC        (JSn_STATUS_INSTRUCTION_FAULT_BASE) /* 0x50 means INSTR INVALID PC */
-#define JSn_STATUS_INSTR_INVALID_ENC       (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x01)  /* 0x51 means INSTR INVALID ENC */
-#define JSn_STATUS_INSTR_TYPE_MISMATCH     (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x02)  /* 0x52 means INSTR TYPE MISMATCH */
-#define JSn_STATUS_INSTR_OPERAND_FAULT     (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x03)  /* 0x53 means INSTR OPERAND FAULT */
-#define JSn_STATUS_INSTR_TLS_FAULT         (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x04)  /* 0x54 means INSTR TLS FAULT */
-#define JSn_STATUS_INSTR_BARRIER_FAULT     (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x05)  /* 0x55 means INSTR BARRIER FAULT */
-#define JSn_STATUS_INSTR_ALIGN_FAULT       (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x06)  /* 0x56 means INSTR ALIGN FAULT */
+#define JS_STATUS_INSTRUCTION_FAULT_BASE  0x50
+#define JS_STATUS_INSTR_INVALID_PC        (JS_STATUS_INSTRUCTION_FAULT_BASE)   /* 0x50 means INSTR INVALID PC */
+#define JS_STATUS_INSTR_INVALID_ENC       (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x01)    /* 0x51 means INSTR INVALID ENC */
+#define JS_STATUS_INSTR_TYPE_MISMATCH     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x02)    /* 0x52 means INSTR TYPE MISMATCH */
+#define JS_STATUS_INSTR_OPERAND_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x03)    /* 0x53 means INSTR OPERAND FAULT */
+#define JS_STATUS_INSTR_TLS_FAULT         (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x04)    /* 0x54 means INSTR TLS FAULT */
+#define JS_STATUS_INSTR_BARRIER_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x05)    /* 0x55 means INSTR BARRIER FAULT */
+#define JS_STATUS_INSTR_ALIGN_FAULT       (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x06)    /* 0x56 means INSTR ALIGN FAULT */
 /* NOTE: No fault with 0x57 code defined in spec. */
-#define JSn_STATUS_DATA_INVALID_FAULT      (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x08)  /* 0x58 means DATA INVALID FAULT */
-#define JSn_STATUS_TILE_RANGE_FAULT        (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x09)  /* 0x59 means TILE RANGE FAULT */
-#define JSn_STATUS_ADDRESS_RANGE_FAULT     (JSn_STATUS_INSTRUCTION_FAULT_BASE + 0x0A)  /* 0x5A means ADDRESS RANGE FAULT */
+#define JS_STATUS_DATA_INVALID_FAULT      (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x08)    /* 0x58 means DATA INVALID FAULT */
+#define JS_STATUS_TILE_RANGE_FAULT        (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x09)    /* 0x59 means TILE RANGE FAULT */
+#define JS_STATUS_ADDRESS_RANGE_FAULT     (JS_STATUS_INSTRUCTION_FAULT_BASE + 0x0A)    /* 0x5A means ADDRESS RANGE FAULT */
 
 /* Other faults */
-#define JSn_STATUS_MEMORY_FAULT_BASE   0x60
-#define JSn_STATUS_OUT_OF_MEMORY       (JSn_STATUS_MEMORY_FAULT_BASE)  /* 0x60 means OUT OF MEMORY */
-#define JSn_STATUS_UNKNOWN             0x7F    /* 0x7F means UNKNOWN */
+#define JS_STATUS_MEMORY_FAULT_BASE   0x60
+#define JS_STATUS_OUT_OF_MEMORY       (JS_STATUS_MEMORY_FAULT_BASE)    /* 0x60 means OUT OF MEMORY */
+#define JS_STATUS_UNKNOWN             0x7F     /* 0x7F means UNKNOWN */
 
 /* GPU_COMMAND values */
 #define GPU_COMMAND_NOP                0x00    /* No operation, nothing happens */
 
 /* AS<n>_MEMATTR values: */
 /* Use GPU implementation-defined  caching policy. */
-#define ASn_MEMATTR_IMPL_DEF_CACHE_POLICY 0x48
+#define AS_MEMATTR_IMPL_DEF_CACHE_POLICY 0x48
 /* The attribute set to force all resources to be cached. */
-#define ASn_MEMATTR_FORCE_TO_CACHE_ALL    0x4F
+#define AS_MEMATTR_FORCE_TO_CACHE_ALL    0x4F
 /* Inner write-alloc cache setup, no outer caching */
-#define ASn_MEMATTR_WRITE_ALLOC           0x4D
+#define AS_MEMATTR_WRITE_ALLOC           0x4D
 /* symbol for default MEMATTR to use */
-#define ASn_MEMATTR_INDEX_DEFAULT               0
+#define AS_MEMATTR_INDEX_DEFAULT               0
 /* HW implementation defined caching */
-#define ASn_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
+#define AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
 /* Force cache on */
-#define ASn_MEMATTR_INDEX_FORCE_TO_CACHE_ALL    1
+#define AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL    1
 /* Write-alloc inner */
-#define ASn_MEMATTR_INDEX_WRITE_ALLOC           2
+#define AS_MEMATTR_INDEX_WRITE_ALLOC           2
 
 /* GPU_ID register */
 #define GPU_ID_VERSION_STATUS_SHIFT       0
 #define GPU_ID_PI_T62X                    0x0620
 #define GPU_ID_PI_T76X                    0x0750
 #define GPU_ID_PI_T72X                    0x0720
+#ifdef MALI_INCLUDE_TFRX
+#define GPU_ID_PI_TFRX                    0x0880
+#endif /* MALI_INCLUDE_TFRX */
+#ifdef MALI_INCLUDE_TF2X
+#define GPU_ID_PI_TF2X                    0x0860
+#endif /* MALI_INCLUDE_TF2X */
 
 /* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */
 #define GPU_ID_S_15DEV0                   0x1
 
 /* JS<n>_FEATURES register */
 
-#define JSn_FEATURE_NULL_JOB              (1u << 1)
-#define JSn_FEATURE_SET_VALUE_JOB         (1u << 2)
-#define JSn_FEATURE_CACHE_FLUSH_JOB       (1u << 3)
-#define JSn_FEATURE_COMPUTE_JOB           (1u << 4)
-#define JSn_FEATURE_VERTEX_JOB            (1u << 5)
-#define JSn_FEATURE_GEOMETRY_JOB          (1u << 6)
-#define JSn_FEATURE_TILER_JOB             (1u << 7)
-#define JSn_FEATURE_FUSED_JOB             (1u << 8)
-#define JSn_FEATURE_FRAGMENT_JOB          (1u << 9)
+#define JS_FEATURE_NULL_JOB              (1u << 1)
+#define JS_FEATURE_SET_VALUE_JOB         (1u << 2)
+#define JS_FEATURE_CACHE_FLUSH_JOB       (1u << 3)
+#define JS_FEATURE_COMPUTE_JOB           (1u << 4)
+#define JS_FEATURE_VERTEX_JOB            (1u << 5)
+#define JS_FEATURE_GEOMETRY_JOB          (1u << 6)
+#define JS_FEATURE_TILER_JOB             (1u << 7)
+#define JS_FEATURE_FUSED_JOB             (1u << 8)
+#define JS_FEATURE_FRAGMENT_JOB          (1u << 9)
 
 /* End JS<n>_FEATURES register */
 
 #define SC_ENABLE_TEXGRD_FLAGS      (1ul << 25)
 /* End SHADER_CONFIG register */
 
+/* TILER_CONFIG register */
+
+#define TC_CLOCK_GATE_OVERRIDE      (1ul << 0)
+
+/* End TILER_CONFIG register */
+
+
 #endif                         /* _MIDGARD_REGMAP_H_ */
index f8b8710fcf9ef6dc9f4e22e8159367c30c9d7d2d..83a29cd304fb96667ebb86b0651fe64477f31ae6 100755 (executable)
@@ -358,6 +358,33 @@ TRACE_EVENT(mali_timeline_pm_checktrans,
 
 );
 
+TRACE_EVENT(mali_timeline_context_active,
+
+       TP_PROTO(u64 ts_sec,
+               u32 ts_nsec,
+               int count),
+
+       TP_ARGS(ts_sec,
+               ts_nsec,
+               count),
+
+       TP_STRUCT__entry(
+                       __field(u64, ts_sec)
+                       __field(u32, ts_nsec)
+                       __field(int, count)
+       ),
+
+       TP_fast_assign(
+               __entry->ts_sec = ts_sec;
+               __entry->ts_nsec = ts_nsec;
+               __entry->count = count;
+       ),
+
+       TP_printk("%i,%i.%.9i,0,%i", SW_SET_CONTEXT_ACTIVE,
+                               (int)__entry->ts_sec,
+                               (int)__entry->ts_nsec,
+                               __entry->count)
+);
 
 #endif                         /* _MALI_TIMELINE_H */
 
index c577e83220c3c77be60c58618bba4838146a85bc..5d9e0dd218ecb8966b4dfba94a098bba07993def 100755 (executable)
@@ -61,31 +61,31 @@ extern "C" {
  * provide a mapping of the identifier to the OS specific device name.
  *
  */
-       typedef enum uk_client_id {
+enum uk_client_id {
        /**
         * Value used to identify the Base driver UK client.
         */
-               UK_CLIENT_MALI_T600_BASE,
+       UK_CLIENT_MALI_T600_BASE,
 
        /** The number of uk clients supported. This must be the last member of the enum */
-               UK_CLIENT_COUNT
-       } uk_client_id;
+       UK_CLIENT_COUNT
+};
 
 /**
  * Each function callable through the UK interface has a unique number.
  * Functions provided by UK clients start from number UK_FUNC_ID.
  * Numbers below UK_FUNC_ID are used for internal UK functions.
  */
-       typedef enum uk_func {
-               UKP_FUNC_ID_CHECK_VERSION,   /**< UKK Core internal function */
+enum uk_func {
+       UKP_FUNC_ID_CHECK_VERSION,   /**< UKK Core internal function */
        /**
         * Each UK client numbers the functions they provide starting from
         * number UK_FUNC_ID. This number is then eventually assigned to the
-        * id field of the uk_header structure when preparing to make a
+        * id field of the union uk_header structure when preparing to make a
         * UK call. See your UK client for a list of their function numbers.
         */
-               UK_FUNC_ID = 512
-       } uk_func;
+       UK_FUNC_ID = 512
+};
 
 /**
  * Arguments for a UK call are stored in a structure. This structure consists
@@ -99,43 +99,43 @@ extern "C" {
  * of a 32 or 64-bit kernel. The uk_kernel_size_type type should be defined
  * accordingly in the OS specific mali_uk_os.h header file.
  */
-       typedef union uk_header {
-               /**
-                * 32-bit number identifying the UK function to be called.
-                * Also see uk_func.
-                */
-               u32 id;
-               /**
-                * The mali_error return code returned by the called UK function.
-                * See the specification of the particular UK function you are
-                * calling for the meaning of the error codes returned. All
-                * UK functions return MALI_ERROR_NONE on success.
-                */
-               u32 ret;
-               /*
-                * Used to ensure 64-bit alignment of this union. Do not remove.
-                * This field is used for padding and does not need to be initialized.
-                */
-               u64 sizer;
-       } uk_header;
+union uk_header {
+       /**
+        * 32-bit number identifying the UK function to be called.
+        * Also see uk_func.
+        */
+       u32 id;
+       /**
+        * The mali_error return code returned by the called UK function.
+        * See the specification of the particular UK function you are
+        * calling for the meaning of the error codes returned. All
+        * UK functions return MALI_ERROR_NONE on success.
+        */
+       u32 ret;
+       /*
+        * Used to ensure 64-bit alignment of this union. Do not remove.
+        * This field is used for padding and does not need to be initialized.
+        */
+       u64 sizer;
+};
 
 /**
  * This structure carries a 16-bit major and minor number and is sent along with an internal UK call
  * used during uku_open to identify the versions of the UK module in use by the user-side and kernel-side.
  */
-       typedef struct uku_version_check_args {
-               uk_header header;
-                         /**< UK call header */
-               u16 major;
-                  /**< This field carries the user-side major version on input and the kernel-side major version on output */
-               u16 minor;
-                  /**< This field carries the user-side minor version on input and the kernel-side minor version on output. */
-               u8 padding[4];
-       } uku_version_check_args;
+struct uku_version_check_args {
+       union uk_header header;
+                 /**< UK call header */
+       u16 major;
+          /**< This field carries the user-side major version on input and the kernel-side major version on output */
+       u16 minor;
+          /**< This field carries the user-side minor version on input and the kernel-side minor version on output. */
+       u8 padding[4];
+};
 
 /** @} end group uk_api */
 
-       /** @} *//* end group base_api */
+/** @} *//* end group base_api */
 
 #ifdef __cplusplus
 }
index ed1d07e93827e261c6bcd69855cd9b163900c8cc..15faf15bf13afb74ad3c99a6b962ecbca4da5c12 100755 (executable)
 #define MALI_IMPORT CSTD_LINK_IMPORT
 #define MALI_EXPORT CSTD_LINK_EXPORT
 #define MALI_IMPL   CSTD_LINK_IMPL
+#if defined(CONFIG_MALI_DEBUG) || !MALI_CUSTOMER_RELEASE
+#define MALI_LOCAL  CSTD_LINK_EXPORT
+#else
 #define MALI_LOCAL  CSTD_LINK_LOCAL
+#endif
 
 /** @brief Decorate exported function prototypes.
  *
index e0afe4b97fe117ae45e931bdebe80eec2689e99e..5dabe26ec01c29f82c57fbe2c19472853e0a22c4 100755 (executable)
@@ -86,19 +86,54 @@ typedef u8         mali_bool8;
  * Maximum and minimum values for integer types
  * @{
  */
-#define U64_MAX         UINT64_MAX
-#define U32_MAX         UINT32_MAX
-#define U16_MAX         UINT16_MAX
-#define U8_MAX  UINT8_MAX
+#ifndef U64_MAX
+#define U64_MAX  UINT64_MAX
+#endif
+
+#ifndef U32_MAX
+#define U32_MAX  UINT32_MAX
+#endif
+
+#ifndef U16_MAX
+#define U16_MAX  UINT16_MAX
+#endif
+
+#ifndef U8_MAX
+#define U8_MAX  UINT8_MAX
+#endif
 
+#ifndef S64_MAX
 #define S64_MAX  INT64_MAX
+#endif
+
+#ifndef S64_MIN
 #define S64_MIN  INT64_MIN
+#endif
+
+#ifndef S32_MAX
 #define S32_MAX  INT32_MAX
+#endif
+
+#ifndef S32_MIN
 #define S32_MIN  INT32_MIN
+#endif
+
+#ifndef S16_MAX
 #define S16_MAX  INT16_MAX
+#endif
+
+#ifndef S16_MIN
 #define S16_MIN  INT16_MIN
+#endif
+
+#ifndef S8_MAX
 #define S8_MAX   INT8_MAX
+#endif
+
+#ifndef S8_MIN
 #define S8_MIN   INT8_MIN
+#endif
+
 /* @} */
 
 /**
index 32e5fc64620490ca9096d08b7a1b4953560524d7..33dac1e2cd3ef14b933aa912bb2dfec5d82b41f2 100755 (executable)
@@ -147,7 +147,7 @@ static int mali_pm_notifier(struct notifier_block *nb,unsigned long event,void*
 /*
   rk3288 hardware specific initialization
  */
-mali_bool kbase_platform_rk_init(kbase_device *kbdev)
+mali_bool kbase_platform_rk_init(struct kbase_device *kbdev)
 {
        if(MALI_ERROR_NONE == kbase_platform_init(kbdev))
        {
@@ -164,7 +164,7 @@ mali_bool kbase_platform_rk_init(kbase_device *kbdev)
 /*
  rk3288  hardware specific termination
 */
-void kbase_platform_rk_term(kbase_device *kbdev)
+void kbase_platform_rk_term(struct kbase_device *kbdev)
 {
        unregister_pm_notifier(&mali_pm_nb);
 #ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
@@ -179,7 +179,7 @@ kbase_platform_funcs_conf platform_funcs = {
 };
 
 #ifdef CONFIG_MALI_MIDGARD_RT_PM
-static int pm_callback_power_on(kbase_device *kbdev)
+static int pm_callback_power_on(struct kbase_device *kbdev)
 {
        int result;
        int ret_val;
@@ -207,7 +207,7 @@ static int pm_callback_power_on(kbase_device *kbdev)
        return ret_val;
 }
 
-static void pm_callback_power_off(kbase_device *kbdev)
+static void pm_callback_power_off(struct kbase_device *kbdev)
 {
        struct device *dev = kbdev->dev;
        pm_schedule_suspend(dev, RUNTIME_PM_DELAY_TIME);
@@ -229,7 +229,7 @@ void kbase_device_runtime_disable(struct kbase_device *kbdev)
        pm_runtime_disable(kbdev->dev);
 }
 
-static int pm_callback_runtime_on(kbase_device *kbdev)
+static int pm_callback_runtime_on(struct kbase_device *kbdev)
 {
 #ifdef CONFIG_MALI_MIDGARD_DVFS        
        struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
@@ -264,7 +264,7 @@ static int pm_callback_runtime_on(kbase_device *kbdev)
        return 0;
 }
 
-static void pm_callback_runtime_off(kbase_device *kbdev)
+static void pm_callback_runtime_off(struct kbase_device *kbdev)
 {
 #ifdef CONFIG_MALI_MIDGARD_DVFS        
        struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
@@ -308,11 +308,6 @@ static kbase_pm_callback_conf pm_callbacks = {
 
 /* Please keep table config_attributes in sync with config_attributes_hw_issue_8408 */
 static kbase_attribute config_attributes[] = {
-#if 0  
-       {
-        KBASE_CONFIG_ATTR_MEMORY_PER_PROCESS_LIMIT,
-        KBASE_VE_MEMORY_PER_PROCESS_LIMIT},
-#endif
 #ifdef CONFIG_UMP
        {
         KBASE_CONFIG_ATTR_UMP_DEVICE,
@@ -323,113 +318,13 @@ static kbase_attribute config_attributes[] = {
         KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS,
         (uintptr_t)&pm_callbacks},
 #endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_MAX,
-        KBASE_VE_MEMORY_OS_SHARED_MAX},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_MEMORY_OS_SHARED_PERF_GPU,
-        KBASE_VE_MEMORY_OS_SHARED_PERF_GPU},
-#endif 
        {
         KBASE_CONFIG_ATTR_PLATFORM_FUNCS,
         (uintptr_t) &platform_funcs},
        
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX,
-        KBASE_VE_GPU_FREQ_KHZ_MAX},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN,
-        KBASE_VE_GPU_FREQ_KHZ_MIN},
-
-#ifdef CONFIG_MALI_DEBUG
-/* Use more aggressive scheduling timeouts in debug builds for testing purposes */
-#if 0
-       {
-        KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
-        KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG},
-
-       {
-        KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
-        KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG},
-
-       {
-        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
-        KBASE_VE_JS_HARD_STOP_TICKS_SS_DEBUG},
-
-       {
-        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
-        KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG},
-
-       {
-        KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
-        KBASE_VE_JS_RESET_TICKS_SS_DEBUG},
-
-       {
-        KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
-        KBASE_VE_JS_RESET_TICKS_NSS_DEBUG},
-#endif
-#else                          /* CONFIG_MALI_DEBUG */
-/* In release builds same as the defaults but scaled for 5MHz FPGA */
-#if 0
-       {
-        KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
-        KBASE_VE_JS_SCHEDULING_TICK_NS},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
-        KBASE_VE_JS_SOFT_STOP_TICKS},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
-        KBASE_VE_JS_HARD_STOP_TICKS_SS},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
-        KBASE_VE_JS_HARD_STOP_TICKS_NSS},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
-        KBASE_VE_JS_RESET_TICKS_SS},
-#endif
-#if 0 
-       {
-        KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
-        KBASE_VE_JS_RESET_TICKS_NSS},
-#endif
-#endif                         /* CONFIG_MALI_DEBUG */
-#if 1
        {
         KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS,
         KBASE_VE_JS_RESET_TIMEOUT_MS},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS,
-        KBASE_VE_JS_CTX_TIMESLICE_NS},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
-        KBASE_VE_CPU_SPEED_FUNC},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE,
-        KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE},
-#endif
-#if 0
-       {
-        KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US,
-        20},
-#endif
        {
         KBASE_CONFIG_ATTR_END,
         0}
index 5aa15c00322fcd12bd77d203534b6d5047526f36..1caa707378caa8f8574b701abd12a59d95c153aa 100755 (executable)
@@ -73,7 +73,7 @@ unsigned int MALI_DVFS_STEP = ARRAY_SIZE(mali_dvfs_infotbl);
 static struct cpufreq_frequency_table *mali_freq_table = NULL;
 #ifdef CONFIG_MALI_MIDGARD_DVFS
 typedef struct _mali_dvfs_status_type {
-       kbase_device *kbdev;
+       struct kbase_device *kbdev;
        int step;
        int utilisation;
        u32 temperature;
@@ -244,7 +244,7 @@ static void mali_dvfs_event_proc(struct work_struct *w)
                } else if ((dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) &&
                                   (dvfs_status->step < MALI_DVFS_STEP-1) && fps < fps_limit) {
                        level_up_time++;
-                       if (level_up_time == MALI_DVFS_TIME_INTERVAL) {
+                       if (level_up_time == MALI_DVFS_UP_TIME_INTERVAL) {
                                /*
                                printk("up,utilisation=%d,current clock=%d,fps = %d,temperature = %d",
                                                dvfs_status->utilisation, mali_dvfs_infotbl[dvfs_status->step].clock,
@@ -261,7 +261,7 @@ static void mali_dvfs_event_proc(struct work_struct *w)
                } else if ((dvfs_status->step > 0) &&
                                        (dvfs_status->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) {
                        level_down_time++;
-                       if (level_down_time==MALI_DVFS_TIME_INTERVAL) {
+                       if (level_down_time==MALI_DVFS_DOWN_TIME_INTERVAL) {
                                /*
                                printk("down,utilisation=%d,current clock=%d,fps = %d,temperature = %d",
                                                dvfs_status->utilisation,
@@ -312,7 +312,7 @@ int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
        platform = (struct rk_context *)kbdev->platform_context;
 
        spin_lock_irqsave(&mali_dvfs_spinlock, flags);
-       if (platform->time_tick < MALI_DVFS_TIME_INTERVAL) {
+       if (platform->time_tick < MALI_DVFS_UP_TIME_INTERVAL) {
                platform->time_tick++;
                platform->time_busy += kbdev->pm.metrics.time_busy;
                platform->time_idle += kbdev->pm.metrics.time_idle;
@@ -322,7 +322,7 @@ int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
                platform->time_tick = 0;
        }
 
-       if ((platform->time_tick == MALI_DVFS_TIME_INTERVAL) &&
+       if ((platform->time_tick == MALI_DVFS_UP_TIME_INTERVAL) &&
                (platform->time_idle + platform->time_busy > 0))
                platform->utilisation = (100 * platform->time_busy) /
                                                                (platform->time_idle + platform->time_busy);
@@ -615,7 +615,7 @@ void mali_dvfs_freq_under_unlock(void)
        printk(KERN_DEBUG " mali clock Under Lock Unset\n");
 }
 
-void kbase_platform_dvfs_set_clock(kbase_device *kbdev, int freq)
+void kbase_platform_dvfs_set_clock(struct kbase_device *kbdev, int freq)
 {
        struct rk_context *platform;
 
@@ -645,7 +645,7 @@ int kbase_platform_dvfs_get_level(int freq)
        }
        return -1;
 }
-void kbase_platform_dvfs_set_level(kbase_device *kbdev, int level)
+void kbase_platform_dvfs_set_level(struct kbase_device *kbdev, int level)
 {
        static int prev_level = -1;
 
index c1cb689554bdd100faa32bfe6f85782a7dac6554..52167d362cc59d91ded5a990057a5ea5c911792d 100755 (executable)
@@ -19,8 +19,8 @@
 #define KBASE_PM_DVFS_FREQUENCY                 100
 
 #define MALI_DVFS_KEEP_STAY_CNT 10
-#define MALI_DVFS_TIME_INTERVAL 2
-
+#define MALI_DVFS_UP_TIME_INTERVAL 1
+#define MALI_DVFS_DOWN_TIME_INTERVAL 2
 #define MALI_DVFS_CURRENT_FREQ 0
 #if 0
 #define MALI_DVFS_BL_CONFIG_FREQ 500
@@ -41,7 +41,7 @@ extern unsigned int MALI_DVFS_STEP;
 #define CONFIG_MALI_MIDGARD_FREQ_LOCK
 #endif
 
-void kbase_platform_dvfs_set_clock(kbase_device *kbdev, int freq);
+void kbase_platform_dvfs_set_clock(struct kbase_device *kbdev, int freq);
 void kbase_platform_dvfs_set_level(struct kbase_device *kbdev, int level);
 int kbase_platform_dvfs_get_level(int freq);
 
index 7ea3f2832da48e96a304fa96eaff231db0619cf7..7b8d83c1475a7b8e372ab092a2494ec58d672666 100755 (executable)
@@ -67,7 +67,7 @@ int mali_dvfs_clk_set(struct dvfs_node *node,unsigned long rate)
        }
        return ret;
 }
-static int kbase_platform_power_clock_init(kbase_device *kbdev)
+static int kbase_platform_power_clock_init(struct kbase_device *kbdev)
 {
        /*struct device *dev = kbdev->dev;*/
        struct rk_context *platform;
@@ -926,7 +926,7 @@ mali_error kbase_platform_init(struct kbase_device *kbdev)
        return MALI_ERROR_FUNCTION_FAILED;
 }
 
-void kbase_platform_term(kbase_device *kbdev)
+void kbase_platform_term(struct kbase_device *kbdev)
 {
        struct rk_context *platform;
 
index 49ef7b59a46a2085283fb838206029da43263fd7..2e031ccd3994e3f542c07835af24ced45a8968f1 100755 (executable)
@@ -40,7 +40,7 @@ int kbase_platform_create_sysfs_file(struct device *dev);
 void kbase_platform_remove_sysfs_file(struct device *dev);
 int kbase_platform_is_power_on(void);
 mali_error kbase_platform_init(struct kbase_device *kbdev);
-void kbase_platform_term(kbase_device *kbdev);
+void kbase_platform_term(struct kbase_device *kbdev);
 
 int kbase_platform_clock_on(struct kbase_device *kbdev);
 int kbase_platform_clock_off(struct kbase_device *kbdev);
diff --git a/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress/mali_kbase_config_platform.h
new file mode 100755 (executable)
index 0000000..8125948
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 5000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 5000
index 01d67fdc68bbb7415ec9b0e78582dd6361c501cb..90561fc5aa772b342c3add5b00825f6e2f85102c 100755 (executable)
@@ -27,9 +27,6 @@
  * and config_attributes_hw_issue_8408[]. Settings are not shared for
  * JS_HARD_STOP_TICKS_SS and JS_RESET_TICKS_SS.
  */
-#define KBASE_VE_GPU_FREQ_KHZ_MAX               5000
-#define KBASE_VE_GPU_FREQ_KHZ_MIN               5000
-
 #define KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG    15000000u      /* 15ms, an agressive tick for testing purposes. This will reduce performance significantly */
 #define KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG       1      /* between 15ms and 30ms before soft-stop a job */
 #define KBASE_VE_JS_SOFT_STOP_TICKS_CL_DEBUG    1      /* between 15ms and 30ms before soft-stop a CL job */
@@ -56,7 +53,6 @@
 
 #define KBASE_VE_JS_RESET_TIMEOUT_MS            3000   /* 3s before cancelling stuck jobs */
 #define KBASE_VE_JS_CTX_TIMESLICE_NS            1000000        /* 1ms - an agressive timeslice for testing purposes (causes lots of scheduling out for >4 ctxs) */
-#define KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE        ((uintptr_t)MALI_FALSE) /* By default we prefer performance over security on r0p0-15dev0 and KBASE_CONFIG_ATTR_ earlier */
 #define KBASE_VE_POWER_MANAGEMENT_CALLBACKS     ((uintptr_t)&pm_callbacks)
 #define KBASE_VE_CPU_SPEED_FUNC                 ((uintptr_t)&kbase_get_vexpress_cpu_clock_speed)
 
@@ -68,18 +64,19 @@ static kbase_io_resources io_resources = {
        .mmu_irq_number = 69,
        .gpu_irq_number = 70,
        .io_memory_region = {
-                            .start = 0xFC010000,
-                            .end = 0xFC010000 + (4096 * 4) - 1}
+       .start = 0xFC010000,
+       .end = 0xFC010000 + (4096 * 4) - 1
+       }
 };
-#endif
+#endif /* CONFIG_OF */
 
-static int pm_callback_power_on(kbase_device *kbdev)
+static int pm_callback_power_on(struct kbase_device *kbdev)
 {
        /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
        return 1;
 }
 
-static void pm_callback_power_off(kbase_device *kbdev)
+static void pm_callback_power_off(struct kbase_device *kbdev)
 {
 #if HARD_RESET_AT_POWER_OFF
        /* Cause a GPU hard reset to test whether we have actually idled the GPU
@@ -94,7 +91,7 @@ static void pm_callback_power_off(kbase_device *kbdev)
 #endif
 }
 
-static kbase_pm_callback_conf pm_callbacks = {
+static struct kbase_pm_callback_conf pm_callbacks = {
        .power_on_callback = pm_callback_power_on,
        .power_off_callback = pm_callback_power_off,
        .power_suspend_callback  = NULL,
@@ -102,15 +99,7 @@ static kbase_pm_callback_conf pm_callbacks = {
 };
 
 /* Please keep table config_attributes in sync with config_attributes_hw_issue_8408 */
-static kbase_attribute config_attributes[] = {
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX,
-        KBASE_VE_GPU_FREQ_KHZ_MAX},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN,
-        KBASE_VE_GPU_FREQ_KHZ_MIN},
-
+static struct kbase_attribute config_attributes[] = {
 #ifdef CONFIG_MALI_DEBUG
 /* Use more aggressive scheduling timeouts in debug builds for testing purposes */
        {
@@ -202,14 +191,6 @@ static kbase_attribute config_attributes[] = {
         KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
         KBASE_VE_CPU_SPEED_FUNC},
 
-       {
-        KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE,
-        KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US,
-        20},
-
        {
         KBASE_CONFIG_ATTR_END,
         0}
@@ -219,15 +200,7 @@ static kbase_attribute config_attributes[] = {
  * JS_HARD_STOP_TICKS_SS, JS_RESET_TICKS_SS that
  * are needed for BASE_HW_ISSUE_8408.
  */
-kbase_attribute config_attributes_hw_issue_8408[] = {
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX,
-        KBASE_VE_GPU_FREQ_KHZ_MAX},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN,
-        KBASE_VE_GPU_FREQ_KHZ_MIN},
-
+struct kbase_attribute config_attributes_hw_issue_8408[] = {
 #ifdef CONFIG_MALI_DEBUG
 /* Use more aggressive scheduling timeouts in debug builds for testing purposes */
        {
@@ -295,23 +268,19 @@ kbase_attribute config_attributes_hw_issue_8408[] = {
         KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
         KBASE_VE_CPU_SPEED_FUNC},
 
-       {
-        KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE,
-        KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE},
-
        {
         KBASE_CONFIG_ATTR_END,
         0}
 };
 
-static kbase_platform_config versatile_platform_config = {
+static struct kbase_platform_config versatile_platform_config = {
        .attributes = config_attributes,
 #ifndef CONFIG_OF
        .io_resources = &io_resources
 #endif
 };
 
-kbase_platform_config *kbase_get_platform_config(void)
+struct kbase_platform_config *kbase_get_platform_config(void)
 {
        return &versatile_platform_config;
 }
index 1b45d3cb0e35fb781d008e4097b29fbfc0631fc7..3dd6c0f5504604ee3b184d8dfa45bb4f007cdde8 100755 (executable)
@@ -77,8 +77,8 @@ int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock)
                u32 pa_divide = 0;
                u32 pb_divide = 0;
                u32 pc_divide = 0;
-               void *volatile pSysCfgReg = 0;
-               void *volatile pSCCReg = 0;
+               void __iomem *pSysCfgReg = NULL;
+               void __iomem *pSCCReg = NULL;
 
                /* Init the value case something goes wrong */
                *cpu_clock = 0;
@@ -162,7 +162,7 @@ int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock)
                raw_spin_unlock(&syscfg_lock);
                /* Convert result expressed in Hz to Mhz units. */
                *cpu_clock /= HZ_IN_MHZ;
-               if(!result)
+               if (!result)
                {
                        cpu_clock_speed = *cpu_clock;
                }
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/Kbuild
new file mode 100755 (executable)
index 0000000..d9bfabc
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# (C) COPYRIGHT 2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA  02110-1301, USA.
+#
+#
+
+
+obj-y += mali_kbase_config_vexpress.o
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_platform.h
new file mode 100755 (executable)
index 0000000..8125948
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 5000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 5000
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c b/drivers/gpu/arm/midgard/platform/vexpress_1xv7_a57/mali_kbase_config_vexpress.c
new file mode 100755 (executable)
index 0000000..0ec2821
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+/* Versatile Express (VE) configuration defaults shared between config_attributes[]
+ * and config_attributes_hw_issue_8408[]. Settings are not shared for
+ * JS_HARD_STOP_TICKS_SS and JS_RESET_TICKS_SS.
+ */
+#define KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG    3000000000u    /* 3s */
+#define KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG       10     /* 30s */
+#define KBASE_VE_JS_SOFT_STOP_TICKS_CL_DEBUG     5     /* 15s */
+#define KBASE_VE_JS_HARD_STOP_TICKS_SS_DEBUG    20     /* 60s */
+#define KBASE_VE_JS_HARD_STOP_TICKS_SS_8401_DEBUG 120  /* 360s */
+#define KBASE_VE_JS_HARD_STOP_TICKS_CL_DEBUG    10     /* 30s */
+#define KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG   6000   /* 18000s */
+#define KBASE_VE_JS_RESET_TICKS_SS_DEBUG        30     /* 180s */
+#define KBASE_VE_JS_RESET_TICKS_SS_8401_DEBUG   180    /* 540s */
+#define KBASE_VE_JS_RESET_TICKS_CL_DEBUG        30     /* 180s */
+#define KBASE_VE_JS_RESET_TICKS_NSS_DEBUG       6010   /* 18030s*/
+
+#define KBASE_VE_JS_SCHEDULING_TICK_NS          3000000000u    /* 3s */
+#define KBASE_VE_JS_SOFT_STOP_TICKS             10     /* 30s */
+#define KBASE_VE_JS_SOFT_STOP_TICKS_CL           5     /* 15s */
+#define KBASE_VE_JS_HARD_STOP_TICKS_SS          20     /* 60s */
+#define KBASE_VE_JS_HARD_STOP_TICKS_SS_8401     120    /* 360s */
+#define KBASE_VE_JS_HARD_STOP_TICKS_CL          10     /* 30s */
+#define KBASE_VE_JS_HARD_STOP_TICKS_NSS         6000   /* 18000s */
+#define KBASE_VE_JS_RESET_TICKS_SS              30     /* 180s */
+#define KBASE_VE_JS_RESET_TICKS_SS_8401         180    /* 540s */
+#define KBASE_VE_JS_RESET_TICKS_CL              30     /* 180s */
+#define KBASE_VE_JS_RESET_TICKS_NSS             6010   /* 18030s*/
+
+#define KBASE_VE_JS_RESET_TIMEOUT_MS            3000   /* 3s before cancelling stuck jobs */
+#define KBASE_VE_JS_CTX_TIMESLICE_NS            1000000        /* 1ms - an agressive timeslice for testing purposes (causes lots of scheduling out for >4 ctxs) */
+#define KBASE_VE_POWER_MANAGEMENT_CALLBACKS     ((uintptr_t)&pm_callbacks)
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+       .job_irq_number = 68,
+       .mmu_irq_number = 69,
+       .gpu_irq_number = 70,
+       .io_memory_region = {
+                            .start = 0x2f010000,
+                            .end = 0x2f010000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+       /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+       return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+       /* Cause a GPU hard reset to test whether we have actually idled the GPU
+        * and that we properly reconfigure the GPU on power up.
+        * Usually this would be dangerous, but if the GPU is working correctly it should
+        * be completely safe as the GPU should not be active at this point.
+        * However this is disabled normally because it will most likely interfere with
+        * bus logging etc.
+        */
+       KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+       kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+static struct kbase_pm_callback_conf pm_callbacks = {
+       .power_on_callback = pm_callback_power_on,
+       .power_off_callback = pm_callback_power_off,
+       .power_suspend_callback  = NULL,
+       .power_resume_callback = NULL
+};
+
+/* Please keep table config_attributes in sync with config_attributes_hw_issue_8408 */
+static struct kbase_attribute config_attributes[] = {
+#ifdef CONFIG_MALI_DEBUG
+/* Use more aggressive scheduling timeouts in debug builds for testing purposes */
+       {
+        KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
+        KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
+        KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL,
+        KBASE_VE_JS_SOFT_STOP_TICKS_CL_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
+        KBASE_VE_JS_HARD_STOP_TICKS_SS_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL,
+        KBASE_VE_JS_HARD_STOP_TICKS_CL_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
+        KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
+        KBASE_VE_JS_RESET_TICKS_SS_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL,
+        KBASE_VE_JS_RESET_TICKS_CL_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
+        KBASE_VE_JS_RESET_TICKS_NSS_DEBUG},
+#else                          /* CONFIG_MALI_DEBUG */
+/* In release builds same as the defaults but scaled for 5MHz FPGA */
+       {
+        KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
+        KBASE_VE_JS_SCHEDULING_TICK_NS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
+        KBASE_VE_JS_SOFT_STOP_TICKS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL,
+        KBASE_VE_JS_SOFT_STOP_TICKS_CL},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
+        KBASE_VE_JS_HARD_STOP_TICKS_SS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL,
+        KBASE_VE_JS_HARD_STOP_TICKS_CL},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
+        KBASE_VE_JS_HARD_STOP_TICKS_NSS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
+        KBASE_VE_JS_RESET_TICKS_SS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL,
+        KBASE_VE_JS_RESET_TICKS_CL},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
+        KBASE_VE_JS_RESET_TICKS_NSS},
+#endif                         /* CONFIG_MALI_DEBUG */
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS,
+        KBASE_VE_JS_RESET_TIMEOUT_MS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS,
+        KBASE_VE_JS_CTX_TIMESLICE_NS},
+
+       {
+        KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS,
+        KBASE_VE_POWER_MANAGEMENT_CALLBACKS},
+
+       {
+        KBASE_CONFIG_ATTR_END,
+        0}
+};
+
+/* as config_attributes array above except with different settings for
+ * JS_HARD_STOP_TICKS_SS, JS_RESET_TICKS_SS that
+ * are needed for BASE_HW_ISSUE_8408.
+ */
+struct kbase_attribute config_attributes_hw_issue_8408[] = {
+#ifdef CONFIG_MALI_DEBUG
+/* Use more aggressive scheduling timeouts in debug builds for testing purposes */
+       {
+        KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
+        KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
+        KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
+        KBASE_VE_JS_HARD_STOP_TICKS_SS_8401_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
+        KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
+        KBASE_VE_JS_RESET_TICKS_SS_8401_DEBUG},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
+        KBASE_VE_JS_RESET_TICKS_NSS_DEBUG},
+#else                          /* CONFIG_MALI_DEBUG */
+/* In release builds same as the defaults but scaled for 5MHz FPGA */
+       {
+        KBASE_CONFIG_ATTR_JS_SCHEDULING_TICK_NS,
+        KBASE_VE_JS_SCHEDULING_TICK_NS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS,
+        KBASE_VE_JS_SOFT_STOP_TICKS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS,
+        KBASE_VE_JS_HARD_STOP_TICKS_SS_8401},
+
+       {
+        KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS,
+        KBASE_VE_JS_HARD_STOP_TICKS_NSS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS,
+        KBASE_VE_JS_RESET_TICKS_SS_8401},
+
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS,
+        KBASE_VE_JS_RESET_TICKS_NSS},
+#endif                         /* CONFIG_MALI_DEBUG */
+       {
+        KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS,
+        KBASE_VE_JS_RESET_TIMEOUT_MS},
+
+       {
+        KBASE_CONFIG_ATTR_JS_CTX_TIMESLICE_NS,
+        KBASE_VE_JS_CTX_TIMESLICE_NS},
+
+       {
+        KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS,
+        KBASE_VE_POWER_MANAGEMENT_CALLBACKS},
+
+       {
+        KBASE_CONFIG_ATTR_END,
+        0}
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+       .attributes = config_attributes,
+#ifndef CONFIG_OF
+       .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+       return &versatile_platform_config;
+}
+
+int kbase_platform_early_init(void)
+{
+       /* Nothing needed at this stage */
+       return 0;
+}
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_6xvirtex7_10mhz/mali_kbase_config_platform.h
new file mode 100755 (executable)
index 0000000..ad6bfb2
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 10000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 10000
index 57e86d5c8a61e6f341ed0560d582a43ee7e2b942..f1921c02bb8c583f7a94d5443308d1b94f0c2519 100755 (executable)
@@ -27,9 +27,6 @@
  * and config_attributes_hw_issue_8408[]. Settings are not shared for
  * JS_HARD_STOP_TICKS_SS and JS_RESET_TICKS_SS.
  */
-#define KBASE_VE_GPU_FREQ_KHZ_MAX               10000
-#define KBASE_VE_GPU_FREQ_KHZ_MIN               10000
-
 #define KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG    15000000u      /* 15ms, an agressive tick for testing purposes. This will reduce performance significantly */
 #define KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG       1      /* between 15ms and 30ms before soft-stop a job */
 #define KBASE_VE_JS_SOFT_STOP_TICKS_CL_DEBUG    1      /* between 15ms and 30ms before soft-stop a CL job */
 
 #define KBASE_VE_JS_RESET_TIMEOUT_MS            3000   /* 3s before cancelling stuck jobs */
 #define KBASE_VE_JS_CTX_TIMESLICE_NS            1000000        /* 1ms - an agressive timeslice for testing purposes (causes lots of scheduling out for >4 ctxs) */
-#define KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE        ((uintptr_t)MALI_FALSE) /* By default we prefer performance over security on r0p0-15dev0 and KBASE_CONFIG_ATTR_ earlier */
 #define KBASE_VE_POWER_MANAGEMENT_CALLBACKS     ((uintptr_t)&pm_callbacks)
 #define KBASE_VE_CPU_SPEED_FUNC                 ((uintptr_t)&kbase_get_vexpress_cpu_clock_speed)
 
 #define HARD_RESET_AT_POWER_OFF 0
 
 #ifndef CONFIG_OF
-static kbase_io_resources io_resources = {
+static struct kbase_io_resources io_resources = {
        .job_irq_number = 75,
        .mmu_irq_number = 76,
        .gpu_irq_number = 77,
@@ -73,13 +69,13 @@ static kbase_io_resources io_resources = {
 };
 #endif
 
-static int pm_callback_power_on(kbase_device *kbdev)
+static int pm_callback_power_on(struct kbase_device *kbdev)
 {
        /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
        return 1;
 }
 
-static void pm_callback_power_off(kbase_device *kbdev)
+static void pm_callback_power_off(struct kbase_device *kbdev)
 {
 #if HARD_RESET_AT_POWER_OFF
        /* Cause a GPU hard reset to test whether we have actually idled the GPU
@@ -94,7 +90,7 @@ static void pm_callback_power_off(kbase_device *kbdev)
 #endif
 }
 
-static kbase_pm_callback_conf pm_callbacks = {
+static struct kbase_pm_callback_conf pm_callbacks = {
        .power_on_callback = pm_callback_power_on,
        .power_off_callback = pm_callback_power_off,
        .power_suspend_callback  = NULL,
@@ -102,15 +98,7 @@ static kbase_pm_callback_conf pm_callbacks = {
 };
 
 /* Please keep table config_attributes in sync with config_attributes_hw_issue_8408 */
-static kbase_attribute config_attributes[] = {
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX,
-        KBASE_VE_GPU_FREQ_KHZ_MAX},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN,
-        KBASE_VE_GPU_FREQ_KHZ_MIN},
-
+static struct kbase_attribute config_attributes[] = {
 #ifdef CONFIG_MALI_DEBUG
 /* Use more aggressive scheduling timeouts in debug builds for testing purposes */
        {
@@ -202,14 +190,6 @@ static kbase_attribute config_attributes[] = {
         KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
         KBASE_VE_CPU_SPEED_FUNC},
 
-       {
-        KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE,
-        KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US,
-        20},
-
        {
         KBASE_CONFIG_ATTR_END,
         0}
@@ -219,15 +199,7 @@ static kbase_attribute config_attributes[] = {
  * JS_HARD_STOP_TICKS_SS, JS_RESET_TICKS_SS that
  * are needed for BASE_HW_ISSUE_8408.
  */
-kbase_attribute config_attributes_hw_issue_8408[] = {
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX,
-        KBASE_VE_GPU_FREQ_KHZ_MAX},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN,
-        KBASE_VE_GPU_FREQ_KHZ_MIN},
-
+struct kbase_attribute config_attributes_hw_issue_8408[] = {
 #ifdef CONFIG_MALI_DEBUG
 /* Use more aggressive scheduling timeouts in debug builds for testing purposes */
        {
@@ -295,23 +267,19 @@ kbase_attribute config_attributes_hw_issue_8408[] = {
         KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
         KBASE_VE_CPU_SPEED_FUNC},
 
-       {
-        KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE,
-        KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE},
-
        {
         KBASE_CONFIG_ATTR_END,
         0}
 };
 
-static kbase_platform_config versatile_platform_config = {
+static struct kbase_platform_config versatile_platform_config = {
        .attributes = config_attributes,
 #ifndef CONFIG_OF
        .io_resources = &io_resources
 #endif
 };
 
-kbase_platform_config *kbase_get_platform_config(void)
+struct kbase_platform_config *kbase_get_platform_config(void)
 {
        return &versatile_platform_config;
 }
diff --git a/drivers/gpu/arm/midgard/platform/vexpress_virtex7_40mhz/mali_kbase_config_platform.h b/drivers/gpu/arm/midgard/platform/vexpress_virtex7_40mhz/mali_kbase_config_platform.h
new file mode 100755 (executable)
index 0000000..ad576a5
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 40000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 40000
index c403fde188bdc2d4dea5c8528e50e9cb4a67c2bd..c0ec1dd57f04be568c5ab60d856741d56ffbb9ae 100755 (executable)
@@ -26,9 +26,6 @@
  * and config_attributes_hw_issue_8408[]. Settings are not shared for
  * JS_HARD_STOP_TICKS_SS and JS_RESET_TICKS_SS.
  */
-#define KBASE_VE_GPU_FREQ_KHZ_MAX               40000
-#define KBASE_VE_GPU_FREQ_KHZ_MIN               40000
-
 #define KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG    15000000u      /* 15ms, an agressive tick for testing purposes. This will reduce performance significantly */
 #define KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG       1      /* between 15ms and 30ms before soft-stop a job */
 #define KBASE_VE_JS_SOFT_STOP_TICKS_CL_DEBUG    1      /* between 15ms and 30ms before soft-stop a CL job */
@@ -55,7 +52,6 @@
 
 #define KBASE_VE_JS_RESET_TIMEOUT_MS            3000   /* 3s before cancelling stuck jobs */
 #define KBASE_VE_JS_CTX_TIMESLICE_NS            1000000        /* 1ms - an agressive timeslice for testing purposes (causes lots of scheduling out for >4 ctxs) */
-#define KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE        ((uintptr_t)MALI_FALSE) /* By default we prefer performance over security on r0p0-15dev0 and KBASE_CONFIG_ATTR_ earlier */
 #define KBASE_VE_POWER_MANAGEMENT_CALLBACKS     ((uintptr_t)&pm_callbacks)
 #define KBASE_VE_CPU_SPEED_FUNC                 ((uintptr_t)&kbase_get_vexpress_cpu_clock_speed)
 
@@ -67,18 +63,20 @@ static kbase_io_resources io_resources = {
        .mmu_irq_number = 69,
        .gpu_irq_number = 70,
        .io_memory_region = {
-                            .start = 0xFC010000,
-                            .end = 0xFC010000 + (4096 * 4) - 1}
+       .start = 0xFC010000,
+       .end = 0xFC010000 + (4096 * 4) - 1
+       }
+
 };
-#endif
+#endif /* CONFIG_OF */
 
-static int pm_callback_power_on(kbase_device *kbdev)
+static int pm_callback_power_on(struct kbase_device *kbdev)
 {
        /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
        return 1;
 }
 
-static void pm_callback_power_off(kbase_device *kbdev)
+static void pm_callback_power_off(struct kbase_device *kbdev)
 {
 #if HARD_RESET_AT_POWER_OFF
        /* Cause a GPU hard reset to test whether we have actually idled the GPU
@@ -93,7 +91,7 @@ static void pm_callback_power_off(kbase_device *kbdev)
 #endif
 }
 
-static kbase_pm_callback_conf pm_callbacks = {
+static struct kbase_pm_callback_conf pm_callbacks = {
        .power_on_callback = pm_callback_power_on,
        .power_off_callback = pm_callback_power_off,
        .power_suspend_callback  = NULL,
@@ -101,15 +99,7 @@ static kbase_pm_callback_conf pm_callbacks = {
 };
 
 /* Please keep table config_attributes in sync with config_attributes_hw_issue_8408 */
-static kbase_attribute config_attributes[] = {
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX,
-        KBASE_VE_GPU_FREQ_KHZ_MAX},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN,
-        KBASE_VE_GPU_FREQ_KHZ_MIN},
-
+static struct kbase_attribute config_attributes[] = {
 #ifdef CONFIG_MALI_DEBUG
 /* Use more aggressive scheduling timeouts in debug builds for testing purposes */
        {
@@ -201,14 +191,6 @@ static kbase_attribute config_attributes[] = {
         KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
         KBASE_VE_CPU_SPEED_FUNC},
 
-       {
-        KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE,
-        KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_IRQ_THROTTLE_TIME_US,
-        20},
-
        {
         KBASE_CONFIG_ATTR_END,
         0}
@@ -218,15 +200,7 @@ static kbase_attribute config_attributes[] = {
  * JS_HARD_STOP_TICKS_SS, JS_RESET_TICKS_SS that
  * are needed for BASE_HW_ISSUE_8408.
  */
-kbase_attribute config_attributes_hw_issue_8408[] = {
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MAX,
-        KBASE_VE_GPU_FREQ_KHZ_MAX},
-
-       {
-        KBASE_CONFIG_ATTR_GPU_FREQ_KHZ_MIN,
-        KBASE_VE_GPU_FREQ_KHZ_MIN},
-
+struct kbase_attribute config_attributes_hw_issue_8408[] = {
 #ifdef CONFIG_MALI_DEBUG
 /* Use more aggressive scheduling timeouts in debug builds for testing purposes */
        {
@@ -294,23 +268,19 @@ kbase_attribute config_attributes_hw_issue_8408[] = {
         KBASE_CONFIG_ATTR_CPU_SPEED_FUNC,
         KBASE_VE_CPU_SPEED_FUNC},
 
-       {
-        KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE,
-        KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE},
-
        {
         KBASE_CONFIG_ATTR_END,
         0}
 };
 
-static kbase_platform_config virtex7_platform_config = {
+static struct kbase_platform_config virtex7_platform_config = {
        .attributes = config_attributes,
 #ifndef CONFIG_OF
        .io_resources = &io_resources
 #endif
 };
 
-kbase_platform_config *kbase_get_platform_config(void)
+struct kbase_platform_config *kbase_get_platform_config(void)
 {
        return &virtex7_platform_config;
 }
index 47d45e2daf6988eb5905f9684e2bc4c0667ef6ba..f0036a34545374744b2afd8e5b376a6adc91fe42 100755 (executable)
@@ -160,7 +160,7 @@ int kbase_get_vexpress_cpu_clock_speed(u32 *cpu_clock)
                raw_spin_unlock(&syscfg_lock);
                /* Convert result expressed in Hz to Mhz units. */
                *cpu_clock /= HZ_IN_MHZ;
-               if(!result)
+               if (!result)
                {
                        cpu_clock_speed = *cpu_clock;
                }
index 1923c891b99c80430a9f1de795d46fd9ec18724f..230ce4b7a23403ed13f7764d0de3b2e90a0d2c62 100755 (executable)
@@ -39,6 +39,8 @@ kbase_src = [Glob('#kernel/drivers/gpu/arm/midgard/*.c'),
              Glob('#kernel/drivers/gpu/arm/midgard/*.h'),
              ]
 
+kbase_src += [Glob('#kernel/drivers/gpu/arm/midgard/internal/*/*.c')]
+
 if Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock') and env['unit'] == '1':
        kbase_src += [Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock/*.c')]
        mock_test = 1
@@ -102,13 +104,11 @@ if int(env['ump']) == 1:
 patterns = ['*.mod.c', '*.o', '*.ko', '*.a', '.*.cmd', 'modules.order', '.tmp_versions', 'Module.symvers']
 
 for p in patterns:
-       Clean(cmd, Glob('#kernel/drivers/gpu/arm/midgard/%s' % p))
        Clean(cmd, Glob('#kernel/drivers/gpu/arm/midgard/%s' % p))
        Clean(cmd, Glob('#kernel/drivers/gpu/arm/midgard/config/%s' % p))
-       Clean(cmd, Glob('#kernel/drivers/gpu/arm/midgard/%s' % p))
        Clean(cmd, Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock/%s' % p))
        Clean(cmd, Glob('#kernel/drivers/gpu/arm/midgard/platform/%s/%s' % ((env['platform_config']), p) ))
-
+       Clean(cmd, Glob('#kernel/drivers/gpu/arm/midgard/internal/*/%s' % p))
 env.ProgTarget('kbase', cmd)
 
 env.AppendUnique(BASE=['cutils_list'])