# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r5p0-02dev0"
+MALI_RELEASE_NAME ?= "r5p0-06rel0"
# Paths required for build
KBASE_PATH = $(src)
mali_kbase_utility.c \
mali_kbase_debug.c \
mali_kbase_trace_timeline.c \
+ mali_kbase_gpu_memory_debugfs.c \
mali_kbase_mem_linux.c \
mali_kbase_core_linux.c \
mali_kbase_sync.c \
mali_kbase_disjoint_events.c \
mali_kbase_gator_api.c
-ifeq ($(CONFIG_DEBUG_FS),y)
- SRC += mali_kbase_gpu_memory_debugfs.c
-endif
-
ifeq ($(MALI_CUSTOMER_RELEASE),0)
SRC += \
mali_kbase_pm_ca_random.c \
endif
ifeq ($(CONFIG_MALI_PLATFORM_JUNO_SOC),y)
- SRC += platform/juno_soc/mali_kbase_config_vexpress.c
+ SRC += platform/juno_soc/mali_kbase_config_juno_soc.c
ccflags-y += -I$(src)/platform/juno_soc
endif
config MALI_DEVFREQ
bool "devfreq support for Mali"
- depends on PM_DEVFREQ
+ depends on MALI_MIDGARD && PM_DEVFREQ
help
Support devfreq for Mali.
config MALI_POWER_ACTOR
bool "Thermal API support for Mali"
- depends on DEVFREQ_THERMAL && THERMAL_POWER_ACTOR
+ depends on MALI_MIDGARD && DEVFREQ_THERMAL && THERMAL_POWER_ACTOR
help
Support the thermal API for Mali.
-
-
/**
* @file
* Software workarounds configuration for Hardware issues.
#include <malisw/mali_malisw.h>
-/**
- * List of all hw features.
- *
- */
-enum base_hw_feature {
- /* Allow soft/hard stopping of job depending on job chain flag */
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
-
- /* Allow writes to SHADER_PWRON and TILER_PWRON registers while these cores are currently transitioning to OFF power state */
- BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
-
- /* 33-bit VA space of the GPU (but still 40 bit PA) */
- BASE_HW_FEATURE_33BIT_VA,
-
- /* The BASE_HW_FEATURE_END value must be the last feature listed in this enumeration
- * and must be the last value in each array that contains the list of features
- * for a particular HW version.
- */
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_generic[] = {
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_t72x[] = {
- BASE_HW_FEATURE_33BIT_VA,
- BASE_HW_FEATURE_END
-};
-
-static const enum base_hw_feature base_hw_features_t76x[] = {
- BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
- BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
- BASE_HW_FEATURE_END
-};
-
-
-/**
- * List of all workarounds.
- *
- */
-
-enum base_hw_issue {
-
- /* The current version of the model doesn't support Soft-Stop */
- BASE_HW_ISSUE_5736,
-
- /* Need way to guarantee that all previously-translated memory accesses are commited */
- BASE_HW_ISSUE_6367,
-
- /* Result swizzling doesn't work for GRDESC/GRDESC_DER */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_6398,
-
- /* Unaligned load stores crossing 128 bit boundaries will fail */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_6402,
-
- /* On job complete with non-done the cache is not flushed */
- BASE_HW_ISSUE_6787,
-
- /* WLS allocation does not respect the Instances field in the Thread Storage Descriptor */
- BASE_HW_ISSUE_7027,
-
- /* The clamp integer coordinate flag bit of the sampler descriptor is reserved */
- BASE_HW_ISSUE_7144,
-
- /* TEX_INDEX LOD is always use converted */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_8073,
-
- /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a instrumentation dump if
- PRFCNT_TILER_EN is enabled */
- BASE_HW_ISSUE_8186,
-
- /* Do not set .skip flag on the GRDESC, GRDESC_DER, DELTA, MOV, and NOP texturing instructions */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_8215,
-
- /* TIB: Reports faults from a vtile which has not yet been allocated */
- BASE_HW_ISSUE_8245,
-
- /* WLMA memory goes wrong when run on shader cores other than core 0. */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_8250,
-
- /* Hierz doesn't work when stenciling is enabled */
- BASE_HW_ISSUE_8260,
-
- /* Livelock in L0 icache */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_8280,
-
- /* uTLB deadlock could occur when writing to an invalid page at the same time as
- * access to a valid page in the same uTLB cache line ( == 4 PTEs == 16K block of mapping) */
- BASE_HW_ISSUE_8316,
-
- /* TLS base address mismatch, must stay below 1MB TLS */
- BASE_HW_ISSUE_8381,
-
- /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is still executing */
- BASE_HW_ISSUE_8394,
-
- /* CSE : Sends a TERMINATED response for a task that should not be terminated */
- /* (Note that PRLAM-8379 also uses this workaround) */
- BASE_HW_ISSUE_8401,
-
- /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader, Cache Flush, Tiler)
- * jobs causes 0x58 error on tiler job. */
- BASE_HW_ISSUE_8408,
-
- /* Disable the Pause Buffer in the LS pipe. */
- BASE_HW_ISSUE_8443,
-
- /* Stencil test enable 1->0 sticks */
- BASE_HW_ISSUE_8456,
-
- /* Tiler heap issue using FBOs or multiple processes using the tiler simultaneously */
- /* (Note that PRLAM-9049 also uses this work-around) */
- BASE_HW_ISSUE_8564,
-
- /* Fragments are clamped instead of discarded when fragment depth bound op is discard and depth datum source is shader. */
- BASE_HW_ISSUE_8634,
-
- /* Arithmetic pipe mode which uses additional hardware to
- * suppress the generation of Inf (Inf => MAX_FLOAT) and NaN (NaN = 0.0) not supported. */
- BASE_HW_ISSUE_8778,
-
- /* Livelock issue using atomic instructions (particularly when using atomic_cmpxchg as a spinlock) */
- BASE_HW_ISSUE_8791,
-
- /* Fused jobs are not supported (for various reasons) */
- /* Jobs with relaxed dependencies do not support soft-stop */
- /* (Note that PRLAM-8803, PRLAM-8393, PRLAM-8559, PRLAM-8601 & PRLAM-8607 all use this work-around) */
- BASE_HW_ISSUE_8803,
-
- /* Blend shader output is wrong for certain formats */
- BASE_HW_ISSUE_8833,
-
- /* Occlusion queries can create false 0 result in boolean and counter modes */
- BASE_HW_ISSUE_8879,
-
- /* Output has half intensity with blend shaders enabled on 8xMSAA. */
- BASE_HW_ISSUE_8896,
-
- /* 8xMSAA does not work with CRC */
- BASE_HW_ISSUE_8975,
-
- /* Boolean occlusion queries don't work properly due to sdc issue. */
- BASE_HW_ISSUE_8986,
-
- /* Change in RMUs in use causes problems related with the core's SDC */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_8987,
-
- /* Occlusion query result is not updated if color writes are disabled. */
- BASE_HW_ISSUE_9010,
-
- /* Problem with number of work registers in the RSD if set to 0 */
- BASE_HW_ISSUE_9275,
-
- /* Translate load/store moves into decode instruction */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_9418,
-
- /* Incorrect coverage mask for 8xMSAA */
- BASE_HW_ISSUE_9423,
-
- /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop won't complete until all 4 tasks have completed */
- BASE_HW_ISSUE_9435,
-
- /* HT: Tiler returns TERMINATED for command that hasn't been terminated */
- BASE_HW_ISSUE_9510,
-
- /* Livelock issue using atomic_cmpxchg */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_9566,
-
- /* Occasionally the GPU will issue multiple page faults for the same address before the MMU page table has been read by the GPU */
- BASE_HW_ISSUE_9630,
-
- /* Must clear the 64 byte private state of the tiler information */
- BASE_HW_ISSUE_10127,
-
- /* RA DCD load request to SDC returns invalid load ignore causing colour buffer mismatch */
- BASE_HW_ISSUE_10327,
-
- /* Occlusion query result may be updated prematurely when fragment shader alters coverage */
- BASE_HW_ISSUE_10410,
-
- /* TEXGRD doesn't honor Sampler Descriptor LOD clamps nor bias */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_10471,
-
- /* MAG / MIN filter selection happens after image descriptor clamps were applied */
- BASE_HW_ISSUE_10472,
-
- /* GPU interprets sampler and image descriptor pointer array sizes as one bigger than they are defined in midg structures */
- BASE_HW_ISSUE_10487,
-
- /* ld_special 0x1n applies SRGB conversion */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_10607,
-
- /* LD_SPECIAL instruction reads incorrect RAW tile buffer value when internal tib format is R10G10B10A2 */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_10632,
-
- /* MMU TLB invalidation hazards */
- BASE_HW_ISSUE_10649,
-
- /* Missing cache flush in multi core-group configuration */
- BASE_HW_ISSUE_10676,
-
- /* Indexed format 95 cannot be used with a component swizzle of "set to 1" when sampled as integer texture */
- BASE_HW_ISSUE_10682,
-
- /* sometimes HW doesn't invalidate cached VPDs when it has to */
- BASE_HW_ISSUE_10684,
-
- /* Chicken bit on t72x to work for a HW workaround in compiler */
- BASE_HW_ISSUE_10797,
-
- /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */
- BASE_HW_ISSUE_10817,
-
- /* Fragment frontend heuristic bias to force early-z required */
- BASE_HW_ISSUE_10821,
-
- /* Intermittent missing interrupt on job completion */
- BASE_HW_ISSUE_10883,
-
- /* Depth bounds incorrectly normalized in hierz depth bounds test */
- BASE_HW_ISSUE_10931,
-
- /* Incorrect cubemap sampling */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_10946,
-
- /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR (similar to issue 10817) and can use BASE_HW_ISSUE_10817 workaround */
- BASE_HW_ISSUE_10959,
-
- /* Soft-stopped fragment shader job can restart with out-of-bound restart index */
- BASE_HW_ISSUE_10969,
-
- /* Instanced arrays conformance fail, workaround by unrolling */
- BASE_HW_ISSUE_10984,
-
- /* TEX_INDEX lod selection (immediate , register) not working with 8.8 encoding for levels > 1 */
- /* NOTE: compiler workaround: keep in sync with _essl_hwrev_needs_workaround() */
- BASE_HW_ISSUE_10995,
-
- /* LD_SPECIAL instruction reads incorrect RAW tile buffer value when internal tib format is RGB565 or RGBA5551 */
- BASE_HW_ISSUE_11012,
-
- /* Race condition can cause tile list corruption */
- BASE_HW_ISSUE_11020,
-
- /* Write buffer can cause tile list corruption */
- BASE_HW_ISSUE_11024,
-
- /* Pause buffer can cause a fragment job hang */
- BASE_HW_ISSUE_11035,
-
- /* T76X hw issues */
-
- /* Partial 16xMSAA support */
- BASE_HW_ISSUE_T76X_26,
-
- /* Forward pixel kill doesn't work with MRT */
- BASE_HW_ISSUE_T76X_2121,
-
- /* CRC not working with MFBD and more than one render target */
- BASE_HW_ISSUE_T76X_2315,
-
- /* Some indexed formats not supported for MFBD preload. */
- BASE_HW_ISSUE_T76X_2686,
-
- /* Must disable CRC if the tile output size is 8 bytes or less. */
- BASE_HW_ISSUE_T76X_2712,
-
- /* DBD clean pixel enable bit is reserved */
- BASE_HW_ISSUE_T76X_2772,
-
- /* AFBC is not supported for T76X beta. */
- BASE_HW_ISSUE_T76X_2906,
-
- /* RTD doesn't specify the row stride for AFBC surfaces. */
- BASE_HW_ISSUE_T76X_3086,
-
- /* Prevent MMU deadlock for T76X beta. */
- BASE_HW_ISSUE_T76X_3285,
-
- /* Clear encoder state for a hard stopped fragment job which is AFBC
- * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and r0p1_50rel0
- */
- BASE_HW_ISSUE_T76X_3542,
-
- /* Do not use 8xMSAA with 16x8 pixel tile size or 16xMSAA with 8x8 pixel
- * tile size.
- */
- BASE_HW_ISSUE_T76X_3556,
-
- /* T76X cannot disable uses_discard even if depth and stencil are read-only. */
- BASE_HW_ISSUE_T76X_3700,
-
- /* ST_TILEBUFFER is not supported on T76X-r0p0-beta */
- BASE_HW_ISSUE_T76X_3759,
-
- /* Preload ignores any size or bounding box restrictions of the output image. */
- BASE_HW_ISSUE_T76X_3793,
-
- /* Keep tiler module clock on to prevent GPU stall */
- BASE_HW_ISSUE_T76X_3953,
-
- /* The BASE_HW_ISSUE_END value must be the last issue listed in this enumeration
- * and must be the last value in each array that contains the list of workarounds
- * for a particular HW version.
- */
- BASE_HW_ISSUE_END
-};
-
-/**
- * Workarounds configuration for each HW revision
- */
-/* Mali T60x r0p0-15dev0 - 2011-W39-stable-9 */
-static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
- BASE_HW_ISSUE_6367,
- BASE_HW_ISSUE_6398,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_6787,
- BASE_HW_ISSUE_7027,
- BASE_HW_ISSUE_7144,
- BASE_HW_ISSUE_8073,
- BASE_HW_ISSUE_8186,
- BASE_HW_ISSUE_8215,
- BASE_HW_ISSUE_8245,
- BASE_HW_ISSUE_8250,
- BASE_HW_ISSUE_8260,
- BASE_HW_ISSUE_8280,
- BASE_HW_ISSUE_8316,
- BASE_HW_ISSUE_8381,
- BASE_HW_ISSUE_8394,
- BASE_HW_ISSUE_8401,
- BASE_HW_ISSUE_8408,
- BASE_HW_ISSUE_8443,
- BASE_HW_ISSUE_8456,
- BASE_HW_ISSUE_8564,
- BASE_HW_ISSUE_8634,
- BASE_HW_ISSUE_8778,
- BASE_HW_ISSUE_8791,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_8833,
- BASE_HW_ISSUE_8896,
- BASE_HW_ISSUE_8975,
- BASE_HW_ISSUE_8986,
- BASE_HW_ISSUE_8987,
- BASE_HW_ISSUE_9010,
- BASE_HW_ISSUE_9275,
- BASE_HW_ISSUE_9418,
- BASE_HW_ISSUE_9423,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_9510,
- BASE_HW_ISSUE_9566,
- BASE_HW_ISSUE_9630,
- BASE_HW_ISSUE_10410,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10487,
- BASE_HW_ISSUE_10607,
- BASE_HW_ISSUE_10632,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10676,
- BASE_HW_ISSUE_10682,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10969,
- BASE_HW_ISSUE_10984,
- BASE_HW_ISSUE_10995,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11035,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T60x r0p0-00rel0 - 2011-W46-stable-13c */
-static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
- BASE_HW_ISSUE_6367,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_6787,
- BASE_HW_ISSUE_7027,
- BASE_HW_ISSUE_8408,
- BASE_HW_ISSUE_8564,
- BASE_HW_ISSUE_8778,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_8975,
- BASE_HW_ISSUE_9010,
- BASE_HW_ISSUE_9275,
- BASE_HW_ISSUE_9418,
- BASE_HW_ISSUE_9423,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_9510,
- BASE_HW_ISSUE_10410,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10487,
- BASE_HW_ISSUE_10607,
- BASE_HW_ISSUE_10632,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10676,
- BASE_HW_ISSUE_10682,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10969,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11035,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T60x r0p1 */
-static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = {
- BASE_HW_ISSUE_6367,
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_6787,
- BASE_HW_ISSUE_7027,
- BASE_HW_ISSUE_8408,
- BASE_HW_ISSUE_8564,
- BASE_HW_ISSUE_8778,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_8975,
- BASE_HW_ISSUE_9010,
- BASE_HW_ISSUE_9275,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_9510,
- BASE_HW_ISSUE_10410,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10487,
- BASE_HW_ISSUE_10607,
- BASE_HW_ISSUE_10632,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10676,
- BASE_HW_ISSUE_10682,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11035,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T62x r0p1 */
-static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10127,
- BASE_HW_ISSUE_10327,
- BASE_HW_ISSUE_10410,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10487,
- BASE_HW_ISSUE_10607,
- BASE_HW_ISSUE_10632,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10676,
- BASE_HW_ISSUE_10682,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10817,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10959,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11024,
- BASE_HW_ISSUE_11035,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T62x r1p0 */
-static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10959,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11024,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T62x r1p1 */
-static const enum base_hw_issue base_hw_issues_t62x_r1p1[] =
-{
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10959,
- BASE_HW_ISSUE_11012,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T76x r0p0 beta */
-static const enum base_hw_issue base_hw_issues_t76x_r0p0_beta[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_10959,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11024,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_2121,
- BASE_HW_ISSUE_T76X_2315,
- BASE_HW_ISSUE_T76X_2686,
- BASE_HW_ISSUE_T76X_2712,
- BASE_HW_ISSUE_T76X_2772,
- BASE_HW_ISSUE_T76X_2906,
- BASE_HW_ISSUE_T76X_3285,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3759,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T76x r0p0 */
-static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11024,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T76x r0p1 */
-static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11024,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T76x r0p1_50rel0 */
-static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T76x r0p2 */
-static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11024,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T76x r0p3 */
-static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_T76X_26,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3542,
- BASE_HW_ISSUE_T76X_3556,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T76x r1p0 */
-static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-
-/* Mali T72x r0p0 */
-static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10797,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T72x r1p0 */
-static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10797,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-/* Mali T72x r1p1 */
-static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = {
- BASE_HW_ISSUE_6402,
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10684,
- BASE_HW_ISSUE_10797,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_10946,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-#ifdef MALI_INCLUDE_TFRX
-/* Mali TFRx r0p0 */
-static const enum base_hw_issue base_hw_issues_tFRx_r0p0[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-#endif /* MALI_INCLUDE_TFRX */
-
-#ifdef MALI_INCLUDE_TF2X
-/* Mali TF2x r0p0 */
-static const enum base_hw_issue base_hw_issues_tF2x_r0p0[] = {
- BASE_HW_ISSUE_8803,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10649,
- BASE_HW_ISSUE_10821,
- BASE_HW_ISSUE_10883,
- BASE_HW_ISSUE_10946,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- BASE_HW_ISSUE_T76X_3953,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-#endif /* MALI_INCLUDE_TF2X */
-
-/* Model configuration
- */
-static const enum base_hw_issue base_hw_issues_model_t72x[] =
-{
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_6402, /* NOTE: Fix is present in model r125162 but is not enabled until RTL is fixed */
- BASE_HW_ISSUE_9275,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10471,
- BASE_HW_ISSUE_10797,
- BASE_HW_ISSUE_10931,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t7xx[] =
-{
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9275,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11024,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-static const enum base_hw_issue base_hw_issues_model_t6xx[] =
-{
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_6402, /* NOTE: Fix is present in model r125162 but is not enabled until RTL is fixed */
- BASE_HW_ISSUE_8778,
- BASE_HW_ISSUE_9275,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10472,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_11012,
- BASE_HW_ISSUE_11020,
- BASE_HW_ISSUE_11024,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-
-#ifdef MALI_INCLUDE_TFRX
-static const enum base_hw_issue base_hw_issues_model_tFRx[] =
-{
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9275,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-#endif /* MALI_INCLUDE_TFRX */
-
-#ifdef MALI_INCLUDE_TF2X
-static const enum base_hw_issue base_hw_issues_model_tF2x[] =
-{
- BASE_HW_ISSUE_5736,
- BASE_HW_ISSUE_9275,
- BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10931,
- BASE_HW_ISSUE_T76X_3086,
- BASE_HW_ISSUE_T76X_3700,
- BASE_HW_ISSUE_T76X_3793,
- /* List of hardware issues must end with BASE_HW_ISSUE_END */
- BASE_HW_ISSUE_END
-};
-#endif /* MALI_INCLUDE_TF2X */
+#include "mali_base_hwconfig_issues.h"
+#include "mali_base_hwconfig_features.h"
-#endif /* _BASE_HWCONFIG_H_ */
+#endif /* _BASE_HWCONFIG_H_ */
--- /dev/null
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update user/midgard/mali_base_hwconfig_{issues,features}.h and
+ * re-run hwconfig_header_generator instead. This tool is available in
+ * progs_install directory for host builds. More information is available in
+ * base/tools/hwconfig_header_generator/README */
+
+#ifndef _BASE_HWCONFIG_FEATURES_H_
+#define _BASE_HWCONFIG_FEATURES_H_
+
+enum base_hw_feature {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ BASE_HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_generic[] = {
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t60x[] = {
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t62x[] = {
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t72x[] = {
+ BASE_HW_FEATURE_33BIT_VA,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
+ BASE_HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ BASE_HW_FEATURE_WARPING,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_t76x[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+static const enum base_hw_feature base_hw_features_tFxx[] = {
+ BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ BASE_HW_FEATURE_BRNDOUT_CC,
+ BASE_HW_FEATURE_BRNDOUT_KILL,
+ BASE_HW_FEATURE_LD_ST_LEA_TEX,
+ BASE_HW_FEATURE_LD_ST_TILEBUFFER,
+ BASE_HW_FEATURE_LINEAR_FILTER_FLOAT,
+ BASE_HW_FEATURE_MRT,
+ BASE_HW_FEATURE_MSAA_16X,
+ BASE_HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
+ BASE_HW_FEATURE_T7XX_PAIRING_RULES,
+ BASE_HW_FEATURE_TEST4_DATUM_MODE,
+ BASE_HW_FEATURE_END
+};
+
+#endif /* _BASE_HWCONFIG_FEATURES_H_ */
--- /dev/null
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/* AUTOMATICALLY GENERATED FILE. If you want to amend the issues/features,
+ * please update user/midgard/mali_base_hwconfig_{issues,features}.h and
+ * re-run hwconfig_header_generator instead. This tool is available in
+ * progs_install directory for host builds. More information is available in
+ * base/tools/hwconfig_header_generator/README */
+
+#ifndef _BASE_HWCONFIG_ISSUES_H_
+#define _BASE_HWCONFIG_ISSUES_H_
+
+enum base_hw_issue {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6398,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7144,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8073,
+ BASE_HW_ISSUE_8186,
+ BASE_HW_ISSUE_8215,
+ BASE_HW_ISSUE_8245,
+ BASE_HW_ISSUE_8250,
+ BASE_HW_ISSUE_8260,
+ BASE_HW_ISSUE_8280,
+ BASE_HW_ISSUE_8316,
+ BASE_HW_ISSUE_8381,
+ BASE_HW_ISSUE_8394,
+ BASE_HW_ISSUE_8401,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8443,
+ BASE_HW_ISSUE_8456,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8634,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8791,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_8833,
+ BASE_HW_ISSUE_8879,
+ BASE_HW_ISSUE_8896,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_8986,
+ BASE_HW_ISSUE_8987,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_9566,
+ BASE_HW_ISSUE_9630,
+ BASE_HW_ISSUE_10127,
+ BASE_HW_ISSUE_10327,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10817,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_10984,
+ BASE_HW_ISSUE_10995,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_2121,
+ BASE_HW_ISSUE_T76X_2315,
+ BASE_HW_ISSUE_T76X_2686,
+ BASE_HW_ISSUE_T76X_2712,
+ BASE_HW_ISSUE_T76X_2772,
+ BASE_HW_ISSUE_T76X_2906,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3285,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3759,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_generic[] = {
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6398,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7144,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8073,
+ BASE_HW_ISSUE_8186,
+ BASE_HW_ISSUE_8215,
+ BASE_HW_ISSUE_8245,
+ BASE_HW_ISSUE_8250,
+ BASE_HW_ISSUE_8260,
+ BASE_HW_ISSUE_8280,
+ BASE_HW_ISSUE_8316,
+ BASE_HW_ISSUE_8381,
+ BASE_HW_ISSUE_8394,
+ BASE_HW_ISSUE_8401,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8443,
+ BASE_HW_ISSUE_8456,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8634,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8791,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_8833,
+ BASE_HW_ISSUE_8896,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_8986,
+ BASE_HW_ISSUE_8987,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_9566,
+ BASE_HW_ISSUE_9630,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_10984,
+ BASE_HW_ISSUE_10995,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9418,
+ BASE_HW_ISSUE_9423,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10969,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = {
+ BASE_HW_ISSUE_6367,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_6787,
+ BASE_HW_ISSUE_7027,
+ BASE_HW_ISSUE_7304,
+ BASE_HW_ISSUE_8408,
+ BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_8975,
+ BASE_HW_ISSUE_9010,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_9510,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10127,
+ BASE_HW_ISSUE_10327,
+ BASE_HW_ISSUE_10410,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10487,
+ BASE_HW_ISSUE_10607,
+ BASE_HW_ISSUE_10632,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10676,
+ BASE_HW_ISSUE_10682,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10817,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11035,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t62x_r1p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p0_beta[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_10959,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_2121,
+ BASE_HW_ISSUE_T76X_2315,
+ BASE_HW_ISSUE_T76X_2686,
+ BASE_HW_ISSUE_T76X_2712,
+ BASE_HW_ISSUE_T76X_2772,
+ BASE_HW_ISSUE_T76X_2906,
+ BASE_HW_ISSUE_T76X_3285,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3759,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p0[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p1[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p2[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r0p3[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t76x_r1p0[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = {
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10684,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t72x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10471,
+ BASE_HW_ISSUE_10797,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t76x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t60x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_8778,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t62x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_6402,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10472,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_11012,
+ BASE_HW_ISSUE_11020,
+ BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_11042,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_END
+};
+
+#if defined(MALI_INCLUDE_TFRX)
+static const enum base_hw_issue base_hw_issues_tFRx_r0p0[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* defined(MALI_INCLUDE_TFRX) */
+
+#if defined(MALI_INCLUDE_TFRX)
+static const enum base_hw_issue base_hw_issues_tFRx_r0p1[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* defined(MALI_INCLUDE_TFRX) */
+
+#if defined(MALI_INCLUDE_TFRX)
+static const enum base_hw_issue base_hw_issues_tFRx_r0p2[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* defined(MALI_INCLUDE_TFRX) */
+
+#if defined(MALI_INCLUDE_TFRX)
+static const enum base_hw_issue base_hw_issues_model_tFRx[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* defined(MALI_INCLUDE_TFRX) */
+
+static const enum base_hw_issue base_hw_issues_t86x_r0p0[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_T76X_3960,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_t86x_r0p2[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_T76X_3953,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_model_t86x[] = {
+ BASE_HW_ISSUE_5736,
+ BASE_HW_ISSUE_9275,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10931,
+ BASE_HW_ISSUE_T76X_1963,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ BASE_HW_ISSUE_END
+};
+
+#endif /* _BASE_HWCONFIG_ISSUES_H_ */
/* Support UK6 IOCTLS */
#define BASE_LEGACY_UK6_SUPPORT 1
+/* Support UK7 IOCTLS */
+/* NB: To support UK6 we also need to support UK7 */
+#define BASE_LEGACY_UK7_SUPPORT 1
+
typedef mali_addr64 base_mem_handle;
#include "mali_base_mem_priv.h"
BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
/* SW defined exceptions */
- BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
- BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
- BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
- BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
- BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+ BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+ BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+ BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+ BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+ BASE_JD_EVENT_FORCE_REPLAY = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005,
- BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+ BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
/** End of HW fault and SW Error status codes */
BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
* @addtogroup base_api Base APIs
* @{
*/
-/**
- * @addtogroup basecpuprops Base CPU Properties
- * @{
- */
-
-/**
- * @brief CPU Property Flag for base_cpu_props::cpu_flags, indicating a
- * Little Endian System. If not set in base_cpu_props::cpu_flags, then the
- * system is Big Endian.
- *
- * The compile-time equivalent is @ref OSU_CONFIG_CPU_LITTLE_ENDIAN.
- */
-#define BASE_CPU_PROPERTY_FLAG_LITTLE_ENDIAN F_BIT_0
-
-
-/**
- * @brief Platform dynamic CPU ID properties structure
- */
-typedef struct base_cpu_id_props
-{
- /**
- * CPU ID
- */
- u32 id;
-
- /**
- * CPU Part number
- */
- u16 part;
-
- /**
- * ASCII code of implementer trademark
- */
- u8 implementer;
-
- /**
- * CPU Variant
- */
- u8 variant;
-
- /**
- * CPU Architecture
- */
- u8 arch;
-
- /**
- * CPU revision
- */
- u8 rev;
-
- /**
- Validity of CPU id where 0-invalid and
- 1-valid only if ALL the cpu_id props are valid
- */
- u8 valid;
-
- u8 padding[1];
-} base_cpu_id_props;
-
-
-/** @brief Platform Dynamic CPU properties structure */
-typedef struct base_cpu_props {
- u32 nr_cores; /**< Number of CPU cores */
-
- /**
- * CPU page size as a Logarithm to Base 2. The compile-time
- * equivalent is @ref OSU_CONFIG_CPU_PAGE_SIZE_LOG2
- */
- u32 cpu_page_size_log2;
-
- /**
- * CPU L1 Data cache line size as a Logarithm to Base 2. The compile-time
- * equivalent is @ref OSU_CONFIG_CPU_L1_DCACHE_LINE_SIZE_LOG2.
- */
- u32 cpu_l1_dcache_line_size_log2;
-
- /**
- * CPU L1 Data cache size, in bytes. The compile-time equivalient is
- * @ref OSU_CONFIG_CPU_L1_DCACHE_SIZE.
- *
- * This CPU Property is mainly provided to implement OpenCL's
- * clGetDeviceInfo(), which allows the CL_DEVICE_GLOBAL_MEM_CACHE_SIZE
- * hint to be queried.
- */
- u32 cpu_l1_dcache_size;
-
- /**
- * CPU Property Flags bitpattern.
- *
- * This is a combination of bits as specified by the macros prefixed with
- * 'BASE_CPU_PROPERTY_FLAG_'.
- */
- u32 cpu_flags;
-
- /**
- * Maximum clock speed in MHz.
- * @usecase 'Maximum' CPU Clock Speed information is required by OpenCL's
- * clGetDeviceInfo() function for the CL_DEVICE_MAX_CLOCK_FREQUENCY hint.
- */
- u32 max_cpu_clock_speed_mhz;
-
- /**
- * @brief Total memory, in bytes.
- *
- * This is the theoretical maximum memory available to the CPU. It is
- * unlikely that a client will be able to allocate all of this memory for
- * their own purposes, but this at least provides an upper bound on the
- * memory available to the CPU.
- *
- * This is required for OpenCL's clGetDeviceInfo() call when
- * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL CPU devices.
- */
- u64 available_memory_size;
-
- /**
- * CPU ID detailed info
- */
- struct base_cpu_id_props cpu_id;
-
- u32 padding;
-} base_cpu_props;
-/** @} end group basecpuprops */
/**
* @brief The payload for a replay job. This must be in GPU memory.
void kbase_synchronize_irqs(struct kbase_device *kbdev);
void kbase_synchronize_irqs(struct kbase_device *kbdev);
-struct kbase_context *kbase_create_context(struct kbase_device *kbdev);
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat);
void kbase_destroy_context(struct kbase_context *kctx);
mali_error kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags);
/* page_1 is a u32 pointer, offset is expressed in bytes */
page_1 += offset>>2;
dma_sync_single_for_cpu(katom->kctx->kbdev->dev,
- page_private(p) + offset,
+ kbase_dma_addr(p) + offset,
copy_size, DMA_BIDIRECTIONAL);
memcpy(dst, page_1, copy_size);
page_2 = kmap_atomic(p);
dma_sync_single_for_cpu(katom->kctx->kbdev->dev,
- page_private(p),
+ kbase_dma_addr(p),
JOB_HEADER_SIZE - copy_size, DMA_BIDIRECTIONAL);
memcpy(dst + copy_size, page_2, JOB_HEADER_SIZE - copy_size);
}
memcpy(page_1, dst, copy_size);
p = pfn_to_page(PFN_DOWN(page_array[page_index]));
dma_sync_single_for_device(katom->kctx->kbdev->dev,
- page_private(p) + offset,
+ kbase_dma_addr(p) + offset,
copy_size, DMA_TO_DEVICE);
if (copy_size < JOB_HEADER_SIZE) {
JOB_HEADER_SIZE - copy_size);
p = pfn_to_page(PFN_DOWN(page_array[page_index + 1]));
dma_sync_single_for_device(katom->kctx->kbdev->dev,
- page_private(p),
+ kbase_dma_addr(p),
JOB_HEADER_SIZE - copy_size,
DMA_TO_DEVICE);
}
#include <mali_kbase.h>
#include <mali_kbase_defs.h>
-#include <mali_kbase_cpuprops.h>
#include <mali_kbase_config_defaults.h>
/* Specifies how many attributes are permitted in the config (excluding terminating attribute).
KBASE_EXPORT_TEST_API(kbasep_get_next_attribute)
+int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed)
+{
+ KBASE_DEBUG_ASSERT(NULL != clock_speed);
+
+ *clock_speed = 100;
+ return 0;
+}
+
uintptr_t kbasep_get_config_value(struct kbase_device *kbdev, const struct kbase_attribute *attributes, int attribute_id)
{
const struct kbase_attribute *attr;
} kbase_pm_callback_conf;
+/**
+ * @brief Default implementation of @ref KBASE_CONFIG_ATTR_CPU_SPEED_FUNC.
+ *
+ * This function sets clock_speed to 100, so will be an underestimate for
+ * any real system.
+ *
+ * See @ref kbase_cpuprops_clock_speed_function for details on the parameters
+ * and return value.
+ */
+int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed);
+
/**
* Type of the function pointer for KBASE_CONFIG_ATTR_CPU_SPEED_FUNC.
*
#define _KBASE_CONFIG_DEFAULTS_H_
/* Include mandatory definitions per platform */
+#include <mali_kbase_config_platform.h>
/**
* Irq throttle. It is the minimum desired time in between two
* Attached value: number in micro seconds
*/
#define DEFAULT_IRQ_THROTTLE_TIME_US 20
-#define GPU_FREQ_KHZ_MAX 500000
-#define GPU_FREQ_KHZ_MIN 100000
+
/*** Begin Scheduling defaults ***/
/**
*
* Allocate and init a kernel base context.
*/
-struct kbase_context *kbase_create_context(struct kbase_device *kbdev)
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat)
{
struct kbase_context *kctx;
mali_error mali_err;
kctx->kbdev = kbdev;
kctx->as_nr = KBASEP_AS_NR_INVALID;
+ kctx->is_compat = is_compat;
#ifdef CONFIG_MALI_TRACE_TIMELINE
kctx->timeline.owner_tgid = task_tgid_nr(current);
#endif
#ifdef CONFIG_MALI_DEVFREQ
#include "mali_kbase_devfreq.h"
#endif /* CONFIG_MALI_DEVFREQ */
+#include <mali_kbase_cpuprops.h>
#ifdef CONFIG_MALI_NO_MALI
#include "mali_kbase_model_linux.h"
#endif /* CONFIG_MALI_NO_MALI */
+#include "mali_kbase_mem_profile_debugfs_buf_size.h"
#ifdef CONFIG_KDS
#include <linux/kds.h>
#endif /* MALI_UNIT_TEST */
#define KBASE_DRV_NAME "mali"
-#define ROCKCHIP_VERSION 0x0b
-
-/** process name + ( statistics in a single bin * number of bins + histogram header ) * number of histograms + total size
- * @note Must be kept in sync with CCTX
- */
-#define KBASE_MEM_PROFILE_MAX_BUF_SIZE (64 + (24 * 32 + 64) * 16 + 40)
static const char kbase_drv_name[] = KBASE_DRV_NAME;
return MALI_ERROR_FUNCTION_FAILED;
}
-static mali_bool kbasep_validate_kbase_pointer(union kbase_pointer *p)
+static mali_bool kbasep_validate_kbase_pointer(
+ struct kbase_context *kctx, union kbase_pointer *p)
{
-#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
+ if (kctx->is_compat) {
if (p->compat_value == 0)
return MALI_FALSE;
} else {
-#endif /* CONFIG_COMPAT */
if (NULL == p->value)
return MALI_FALSE;
-#ifdef CONFIG_COMPAT
}
-#endif /* CONFIG_COMPAT */
return MALI_TRUE;
}
return MALI_ERROR_FUNCTION_FAILED;
/* Check user space has provided valid data */
- if (!kbasep_validate_kbase_pointer(&args->external_resource) || !kbasep_validate_kbase_pointer(&args->file_descriptor) || (0 == args->num_res) || (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
+ if (!kbasep_validate_kbase_pointer(kctx, &args->external_resource) ||
+ !kbasep_validate_kbase_pointer(kctx, &args->file_descriptor) ||
+ (0 == args->num_res) ||
+ (args->num_res > KBASE_MAXIMUM_EXT_RESOURCES))
return MALI_ERROR_FUNCTION_FAILED;
ext_resource_size = sizeof(struct base_external_resource) * args->num_res;
struct base_external_resource __user *ext_res_user;
int __user *file_descriptor_user;
#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
+ if (kctx->is_compat) {
ext_res_user = compat_ptr(args->external_resource.compat_value);
file_descriptor_user = compat_ptr(args->file_descriptor.compat_value);
} else {
if (args_size == sizeof(struct uku_version_check_args)) {
struct uku_version_check_args *version_check = (struct uku_version_check_args *)args;
+ switch (version_check->major) {
#ifdef BASE_LEGACY_UK6_SUPPORT
- if (version_check->major == 6) {
+ case 6:
/* We are backwards compatible with version 6,
* so pretend to be the old version */
version_check->major = 6;
version_check->minor = 1;
- } else {
+ break;
+#endif /* BASE_LEGACY_UK6_SUPPORT */
+#ifdef BASE_LEGACY_UK7_SUPPORT
+ case 7:
+ /* We are backwards compatible with version 7,
+ * so pretend to be the old version */
+ version_check->major = 7;
+ version_check->minor = 1;
+ break;
+#endif /* BASE_LEGACY_UK7_SUPPORT */
+ default:
/* We return our actual version regardless if it
* matches the version returned by userspace -
* userspace can bail if it can't handle this
version_check->major = BASE_UK_VERSION_MAJOR;
version_check->minor = BASE_UK_VERSION_MINOR;
}
-#else
- version_check->major = BASE_UK_VERSION_MAJOR;
- version_check->minor = BASE_UK_VERSION_MINOR;
-#endif /* BASE_LEGACY_UK6_SUPPORT */
ukh->ret = MALI_ERROR_NONE;
} else {
if (sizeof(*mem_import) != args_size)
goto bad_size;
-#ifdef CONFIG_64BIT
- if (is_compat_task())
+#ifdef CONFIG_COMPAT
+ if (kctx->is_compat)
phandle = compat_ptr(mem_import->phandle.compat_value);
else
#endif
break;
}
-#ifdef CONFIG_64BIT
- if (is_compat_task())
+#ifdef CONFIG_COMPAT
+ if (kctx->is_compat)
user_ai = compat_ptr(alias->ai.compat_value);
else
#endif
break;
}
- case KBASE_FUNC_CPU_PROPS_REG_DUMP:
+#ifdef BASE_LEGACY_UK7_SUPPORT
+ case KBASE_FUNC_CPU_PROPS_REG_DUMP_OBSOLETE:
{
struct kbase_uk_cpuprops *setup = args;
ukh->ret = MALI_ERROR_FUNCTION_FAILED;
break;
}
+#endif /* BASE_LEGACY_UK7_SUPPORT */
case KBASE_FUNC_GPU_PROPS_REG_DUMP:
{
/* version buffer size check is made in compile time assert */
memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
- get_version->rk_version = ROCKCHIP_VERSION;
break;
}
goto out_bad;
}
-#ifdef CONFIG_64BIT
- if (is_compat_task())
+#ifdef CONFIG_COMPAT
+ if (kctx->is_compat)
user_buf = compat_ptr(add_data->buf.compat_value);
else
#endif
if (!kbdev)
return -ENODEV;
- kctx = kbase_create_context(kbdev);
+ kctx = kbase_create_context(kbdev, is_compat_task());
if (!kctx) {
ret = -ENOMEM;
goto out;
* values are known in advance */
struct kbase_context *kctx = filp->private_data;
- if (!is_compat_task() && !addr &&
+ if (!kctx->is_compat && !addr &&
kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA)) {
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
mali_error mali_err;
#endif /* CONFIG_MALI_NO_MALI */
#ifdef CONFIG_OF
+#ifdef CONFIG_MALI_PLATFORM_FAKE
struct kbase_platform_config *config;
int attribute_count;
attribute_count * sizeof(config->attributes[0]));
if (err)
return err;
+#endif /* CONFIG_MALI_PLATFORM_FAKE */
#endif /* CONFIG_OF */
kbdev = kbase_device_alloc();
#ifdef CONFIG_OF
static const struct of_device_id kbase_dt_ids[] = {
- { .compatible = "arm,malit7xx" },
+ { .compatible = "arm,malit6xx" },
{ .compatible = "arm,mali-midgard" },
{ /* sentinel */ }
};
* anymore when using Device Tree.
*/
#ifdef CONFIG_OF
-#if 0
module_platform_driver(kbase_platform_driver);
-#else
-static int __init rockchip_gpu_init_driver(void)
-{
- return platform_driver_register(&kbase_platform_driver);
-}
+#else /* CONFIG_MALI_PLATFORM_FAKE */
+
+extern int kbase_platform_early_init(void);
-late_initcall(rockchip_gpu_init_driver);
-#endif
-#else
#ifdef CONFIG_MALI_PLATFORM_FAKE
extern int kbase_platform_fake_register(void);
extern void kbase_platform_fake_unregister(void);
*/
#include "mali_kbase.h"
+#ifdef BASE_LEGACY_UK7_SUPPORT
+
#include "mali_kbase_cpuprops.h"
#include "mali_kbase_uku.h"
#include <mali_kbase_config.h>
/*Below value sourced from OSK*/
#define L1_DCACHE_SIZE ((u32)0x00008000)
-
/**
* @brief Retrieves detailed CPU info from given cpu_val ( ID reg )
*
}
#endif
-int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed)
-{
- KBASE_DEBUG_ASSERT(NULL != clock_speed);
-
- *clock_speed = 100;
- return 0;
-}
+/**
+ * This function (and file!) is kept for the backward compatibility reasons.
+ * It shall be removed as soon as KBASE_FUNC_CPU_PROPS_REG_DUMP_OBSOLETE
+ * (previously KBASE_FUNC_CPU_PROPS_REG_DUMP) ioctl call
+ * is removed. Removal of KBASE_FUNC_CPU_PROPS_REG_DUMP is part of having
+ * the function for reading cpu properties moved from base to osu.
+ */
mali_error kbase_cpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_cpuprops * const kbase_props)
{
/* check if kernel supports dynamic frequency scaling */
max_cpu_freq = cpufreq_quick_get_max(KBASE_DEFAULT_CPU_NUM);
- if (max_cpu_freq != 0)
- {
+ if (max_cpu_freq != 0) {
/* convert from kHz to mHz */
kbase_props->props.max_cpu_clock_speed_mhz = max_cpu_freq / 1000;
- }
- else
- {
+ } else {
/* fallback if CONFIG_CPU_FREQ turned off */
int result;
kbase_cpuprops_clock_speed_function kbase_cpuprops_uk_get_clock_speed;
return MALI_ERROR_NONE;
}
+
+#endif /* BASE_LEGACY_UK7_SUPPORT */
-
+#include "mali_kbase.h"
+#ifdef BASE_LEGACY_UK7_SUPPORT
/**
* @file mali_kbase_cpuprops.h
struct kbase_uk_cpuprops;
/**
- * @brief Default implementation of @ref KBASE_CONFIG_ATTR_CPU_SPEED_FUNC.
- *
- * This function sets clock_speed to 100, so will be an underestimate for
- * any real system.
- *
- * See @ref kbase_cpuprops_clock_speed_function for details on the parameters
- * and return value.
+ * This file is kept for the backward compatibility reasons.
+ * It shall be removed as soon as KBASE_FUNC_CPU_PROPS_REG_DUMP_OBSOLETE
+ * (previously KBASE_FUNC_CPU_PROPS_REG_DUMP) ioctl call
+ * is removed. Removal of KBASE_FUNC_CPU_PROPS_REG_DUMP is part of having
+ * the function for reading cpu properties moved from base to osu.
*/
-int kbase_cpuprops_get_default_clock_speed(u32 * const clock_speed);
/**
* @brief Provides CPU properties data.
mali_error kbase_cpuprops_uk_get_props(struct kbase_context *kctx, struct kbase_uk_cpuprops * const kbase_props);
#endif /*_KBASE_CPUPROPS_H_*/
+#endif /* BASE_LEGACY_UK7_SUPPORT */
#endif
#define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW)
-#define GROWABLE_FLAGS_MASK (GROWABLE_FLAGS_REQUIRED | KBASE_REG_FREE)
/** setting in kbase_context::as_nr that indicates it's invalid */
#define KBASEP_AS_NR_INVALID (-1)
struct list_head entry;
struct device *dev;
- unsigned int kbase_group_error;
struct miscdevice mdev;
u64 reg_start;
size_t reg_size;
#ifdef CONFIG_PM_DEVFREQ
struct devfreq_dev_profile devfreq_profile;
struct devfreq *devfreq;
- bool reset_utilization;
+ unsigned long freq;
#ifdef CONFIG_DEVFREQ_THERMAL
struct devfreq_cooling_device *devfreq_cooling;
#ifdef CONFIG_MALI_POWER_ACTOR
struct workqueue_struct *event_workq;
u64 mem_attrs;
+ bool is_compat;
atomic_t setup_complete;
atomic_t setup_in_progress;
unsigned long freq = 0;
int err;
-
- kbdev->reset_utilization = true;
-
freq = *target_freq;
rcu_read_lock();
return PTR_ERR(opp);
}
+ /*
+ * Only update if there is a change of frequency
+ */
+ if (kbdev->freq == freq) {
+ *target_freq = freq;
+ return 0;
+ }
+
err = clk_set_rate(kbdev->clock, freq);
if (err) {
dev_err(dev, "Failed to set clock %lu (target %lu)\n",
return err;
}
+ kbdev->freq = freq;
*target_freq = freq;
+ kbase_pm_reset_dvfs_utilisation(kbdev);
+
return 0;
}
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
- *freq = clk_get_rate(kbdev->clock);
+ *freq = kbdev->freq;
return 0;
}
kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
- int err;
- err = kbase_devfreq_cur_freq(dev, &stat->current_frequency);
- if (err)
- return err;
+ stat->current_frequency = kbdev->freq;
kbase_pm_get_dvfs_utilisation(kbdev,
- &stat->total_time, &stat->busy_time,
- kbdev->reset_utilization);
+ &stat->total_time, &stat->busy_time);
- /* TODO vsync info for governor? */
stat->private_data = NULL;
return 0;
if (!kbdev->clock)
return -ENODEV;
+ kbdev->freq = clk_get_rate(kbdev->clock);
+
dp = &kbdev->devfreq_profile;
- dp->initial_freq = clk_get_rate(kbdev->clock);
- dp->polling_ms = 1000;
+ dp->initial_freq = kbdev->freq;
+ dp->polling_ms = 100;
dp->target = kbase_devfreq_target;
dp->get_dev_status = kbase_devfreq_status;
dp->get_cur_freq = kbase_devfreq_cur_freq;
address |= kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx", status & 0xFF, kbase_exception_name(status), address);
- kbdev->kbase_group_error++;
if (multiple)
dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
}
struct kbase_gator_hwcnt_handles {
struct kbase_device *kbdev;
struct kbase_context *kctx;
- struct kbase_hwc_dma_mapping kernel_dump_buffer_handle;
+ mali_addr64 hwcnt_gpu_va;
+ void *hwcnt_cpu_va;
+ struct kbase_vmap_struct hwcnt_map;
};
const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_number_of_counters)
*total_number_of_counters = ARRAY_SIZE(hardware_counter_names_mali_t76x);
break;
#endif /* MALI_INCLUDE_TRFX */
-#ifdef MALI_INCLUDE_TF2X
- /* If we are using a Mali-TF2X device - for now just mimic the T760 counters */
- case GPU_ID_PI_TF2X:
+ /* If we are using a Mali-T86X device - for now just mimic the T760 counters */
+ case GPU_ID_PI_T86X:
hardware_counter_names = hardware_counter_names_mali_t76x;
*total_number_of_counters = ARRAY_SIZE(hardware_counter_names_mali_t76x);
break;
-#endif /* MALI_INCLUDE_TF2X */
default:
hardware_counter_names = NULL;
*total_number_of_counters = 0;
struct kbase_uk_hwcnt_setup setup;
mali_error err;
uint32_t dump_size = 0, i = 0;
+ struct kbase_va_region *reg;
+ u64 flags;
+ u64 nr_pages;
+ u16 va_alignment = 0;
if (!in_out_info)
return NULL;
goto free_hand;
/* Create a kbase_context */
- hand->kctx = kbase_create_context(hand->kbdev);
+ hand->kctx = kbase_create_context(hand->kbdev, true);
if (!hand->kctx)
goto release_device;
#ifdef MALI_INCLUDE_TFRX
|| (in_out_info->gpu_id == GPU_ID_PI_TFRX)
#endif /* MALI_INCLUDE_TFRX */
-#ifdef MALI_INCLUDE_TF2X
- || (in_out_info->gpu_id == GPU_ID_PI_TF2X)
-#endif /* MALI_INCLUDE_TF2X */
+ || (in_out_info->gpu_id == GPU_ID_PI_T86X)
+#ifdef MALI_INCLUDE_TGAL
+ || (in_out_info->gpu_id == GPU_ID_PI_TGAL)
+#endif
) {
uint32_t nr_l2, nr_sc, j;
uint64_t core_mask;
in_out_info->size = dump_size;
- in_out_info->kernel_dump_buffer = kbase_va_alloc(hand->kctx, dump_size, &hand->kernel_dump_buffer_handle);
- if (!in_out_info->kernel_dump_buffer)
+ flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_WR;
+ nr_pages = PFN_UP(dump_size);
+ reg = kbase_mem_alloc(hand->kctx, nr_pages, nr_pages, 0,
+ &flags, &hand->hwcnt_gpu_va, &va_alignment);
+ if (!reg)
goto free_layout;
- setup.dump_buffer = (uintptr_t)in_out_info->kernel_dump_buffer;
+ hand->hwcnt_cpu_va = kbase_vmap(hand->kctx, hand->hwcnt_gpu_va,
+ dump_size, &hand->hwcnt_map);
+
+ if (!hand->hwcnt_cpu_va)
+ goto free_buffer;
+
+ in_out_info->kernel_dump_buffer = hand->hwcnt_cpu_va;
+
+ /*setup.dump_buffer = (uintptr_t)in_out_info->kernel_dump_buffer;*/
+ setup.dump_buffer = hand->hwcnt_gpu_va;
setup.jm_bm = in_out_info->bitmask[0];
setup.tiler_bm = in_out_info->bitmask[1];
setup.shader_bm = in_out_info->bitmask[2];
err = kbase_instr_hwcnt_enable(hand->kctx, &setup);
if (err != MALI_ERROR_NONE)
- goto free_buffer;
+ goto free_unmap;
kbase_instr_hwcnt_clear(hand->kctx);
return hand;
+free_unmap:
+ kbase_vunmap(hand->kctx, &hand->hwcnt_map);
+
free_buffer:
- kbase_va_free(hand->kctx, &hand->kernel_dump_buffer_handle);
+ kbase_mem_free(hand->kctx, hand->hwcnt_gpu_va);
free_layout:
kfree(in_out_info->hwc_layout);
if (opaque_handles) {
kbase_instr_hwcnt_disable(opaque_handles->kctx);
- kbase_va_free(opaque_handles->kctx, &opaque_handles->kernel_dump_buffer_handle);
+ kbase_vunmap(opaque_handles->kctx, &opaque_handles->hwcnt_map);
+ kbase_mem_free(opaque_handles->kctx, opaque_handles->hwcnt_gpu_va);
kbase_destroy_context(opaque_handles->kctx);
kbase_release_device(opaque_handles->kbdev);
kfree(opaque_handles);
list_for_each_entry(element, &kbdev->kctx_list, link) {
/* output the memory usage and cap for each kctx
* opened on this device */
- ret = seq_printf(sfile, " %s-0x%p %10u %10u %10u %10u\n", \
- "kctx", \
+ ret = seq_printf(sfile, " %s-0x%p %10u\n", \
+ "kctx",
element->kctx, \
- element->kctx->pid, \
- atomic_read(&(element->kctx->osalloc.free_list_size)), \
- atomic_read(&(element->kctx->used_pages)), \
- atomic_read(&(element->kctx->nonmapped_pages)));
+ atomic_read(&(element->kctx->used_pages)));
}
mutex_unlock(&kbdev->kctx_list_lock);
}
u32 gpu_id;
gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
+ gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
+ gpu_id = gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT;
switch (gpu_id) {
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 0, 1):
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 1):
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9):
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1):
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1):
- case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0):
+ case GPU_ID_PI_T76X:
+ features = base_hw_features_t76x;
+ break;
#ifdef MALI_INCLUDE_TFRX
- case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 0, 1):
+ case GPU_ID_PI_TFRX:
+ /* Fall through */
#endif /* MALI_INCLUDE_TFRX */
-#ifdef MALI_INCLUDE_TF2X
- case GPU_ID_MAKE(GPU_ID_PI_TF2X, 0, 0, 1):
-#endif /* MALI_INCLUDE_TF2X */
- features = base_hw_features_t76x;
+ case GPU_ID_PI_T86X:
+ features = base_hw_features_tFxx;
break;
- case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 0):
- case GPU_ID_MAKE(GPU_ID_PI_T72X, 0, 0, 1):
- case GPU_ID_MAKE(GPU_ID_PI_T72X, 1, 0, 0):
+ case GPU_ID_PI_T72X:
features = base_hw_features_t72x;
break;
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 0, 1, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 0, 0):
+ case GPU_ID_MAKE(GPU_ID_PI_T62X, 1, 1, 0):
+ features = base_hw_features_t62x;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_15DEV0):
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 0, GPU_ID_S_EAC):
+ case GPU_ID_MAKE(GPU_ID_PI_T60X, 0, 1, 0):
+ features = base_hw_features_t60x;
+ break;
default:
features = base_hw_features_generic;
break;
case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 0, 1):
issues = base_hw_issues_tFRx_r0p0;
break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 1, 2):
+ issues = base_hw_issues_tFRx_r0p1;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_TFRX, 0, 2, 0):
+ issues = base_hw_issues_tFRx_r0p2;
+ break;
#endif /* MALI_INCLUDE_TFRX */
-#ifdef MALI_INCLUDE_TF2X
- case GPU_ID_MAKE(GPU_ID_PI_TF2X, 0, 0, 1):
- issues = base_hw_issues_tF2x_r0p0;
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 0, 0, 1):
+ issues = base_hw_issues_t86x_r0p0;
+ break;
+ case GPU_ID_MAKE(GPU_ID_PI_T86X, 0, 2, 0):
+ issues = base_hw_issues_t86x_r0p2;
break;
-#endif /* MALI_INCLUDE_TF2X */
default:
dev_err(kbdev->dev, "Unknown GPU ID %x", gpu_id);
return MALI_ERROR_FUNCTION_FAILED;
/* Software model */
switch (gpu_id >> GPU_ID_VERSION_PRODUCT_ID_SHIFT) {
case GPU_ID_PI_T60X:
+ issues = base_hw_issues_model_t60x;
+ break;
case GPU_ID_PI_T62X:
- issues = base_hw_issues_model_t6xx;
+ issues = base_hw_issues_model_t62x;
break;
case GPU_ID_PI_T72X:
issues = base_hw_issues_model_t72x;
break;
case GPU_ID_PI_T76X:
- issues = base_hw_issues_model_t7xx;
+ issues = base_hw_issues_model_t76x;
break;
#ifdef MALI_INCLUDE_TFRX
case GPU_ID_PI_TFRX:
issues = base_hw_issues_model_tFRx;
break;
#endif /* MALI_INCLUDE_TFRX */
-#ifdef MALI_INCLUDE_TF2X
- case GPU_ID_PI_TF2X:
- issues = base_hw_issues_model_tF2x;
+ case GPU_ID_PI_T86X:
+ issues = base_hw_issues_model_t86x;
break;
-#endif /* MALI_INCLUDE_TF2X */
default:
dev_err(kbdev->dev, "Unknown GPU ID %x", gpu_id);
return MALI_ERROR_FUNCTION_FAILED;
* - to the event subsystem (signals the completion/failure of bag/job-chains).
*/
-static void __user *get_compat_pointer(const union kbase_pointer *p)
+static void __user *
+get_compat_pointer(struct kbase_context *kctx, const union kbase_pointer *p)
{
#ifdef CONFIG_COMPAT
- if (is_compat_task())
+ if (kctx->is_compat)
return compat_ptr(p->compat_value);
else
#endif
katom->nr_extres);
if (copy_from_user(input_extres,
- get_compat_pointer(&user_atom->extres_list),
+ get_compat_pointer(katom->kctx, &user_atom->extres_list),
sizeof(*input_extres) * katom->nr_extres) != 0) {
err_ret_val = MALI_ERROR_FUNCTION_FAILED;
goto early_err_out;
return err_ret_val;
}
-STATIC INLINE void jd_resolve_dep(struct list_head *out_list, struct kbase_jd_atom *katom, u8 d)
+STATIC INLINE void jd_resolve_dep(struct list_head *out_list, struct kbase_jd_atom *katom, u8 d, bool ctx_is_dying)
{
u8 other_d = !d;
while (!list_empty(&katom->dep_head[d])) {
struct kbase_jd_atom *dep_atom;
+ u8 dep_type;
dep_atom = list_entry(katom->dep_head[d].next,
struct kbase_jd_atom, dep_item[d]);
+ dep_type = kbase_jd_katom_dep_type(&dep_atom->dep[d]);
list_del(katom->dep_head[d].next);
kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
- if (katom->event_code != BASE_JD_EVENT_DONE) {
+ if (katom->event_code != BASE_JD_EVENT_DONE &&
+ (dep_type != BASE_JD_DEP_TYPE_ORDER || ctx_is_dying)) {
/* Atom failed, so remove the other dependencies and immediately fail the atom */
if (kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
list_del(&dep_atom->dep_item[other_d]);
dep_atom->kds_dep_satisfied = MALI_TRUE;
}
#endif
-
/* at this point a dependency to the failed job is already removed */
- if (!(kbase_jd_katom_dep_type(&dep_atom->dep[d]) == BASE_JD_DEP_TYPE_ORDER &&
- katom->event_code > BASE_JD_EVENT_ACTIVE)) {
- dep_atom->event_code = katom->event_code;
- KBASE_DEBUG_ASSERT(dep_atom->status != KBASE_JD_ATOM_STATE_UNUSED);
- dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
- }
+ dep_atom->event_code = katom->event_code;
+ KBASE_DEBUG_ASSERT(dep_atom->status != KBASE_JD_ATOM_STATE_UNUSED);
+ dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
list_add_tail(&dep_atom->dep_item[0], out_list);
} else if (!kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
if (kbdev->force_replay_count >= kbdev->force_replay_limit) {
kbdev->force_replay_count = 0;
- katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT;
+ katom->event_code = BASE_JD_EVENT_FORCE_REPLAY;
if (kbdev->force_replay_random)
kbdev->force_replay_limit =
mali_bool jd_done_nolock(struct kbase_jd_atom *katom)
{
struct kbase_context *kctx = katom->kctx;
+ struct kbasep_js_kctx_info *js_kctx_info = &kctx->jctx.sched_info;
struct kbase_device *kbdev = kctx->kbdev;
struct list_head completed_jobs;
struct list_head runnable_jobs;
KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
for (i = 0; i < 2; i++)
- jd_resolve_dep(&runnable_jobs, katom, i);
+ jd_resolve_dep(&runnable_jobs, katom, i,
+ js_kctx_info->ctx.is_dying);
if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
kbase_jd_post_external_resources(katom);
KBASE_DEBUG_ASSERT(node->status != KBASE_JD_ATOM_STATE_UNUSED);
- if (katom->event_code == BASE_JD_EVENT_DONE) {
+ if (node->status != KBASE_JD_ATOM_STATE_COMPLETED) {
need_to_try_schedule_context |= jd_run_atom(node);
} else {
node->event_code = katom->event_code;
return MALI_ERROR_FUNCTION_FAILED;
}
- user_addr = get_compat_pointer(&submit_data->addr);
+ user_addr = get_compat_pointer(kctx, &submit_data->addr);
KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(submit_data->nr_atoms, &kctx->timeline.jd_atoms_in_flight));
js_policy = &kbdev->js_data.policy;
KBASE_TRACE_ADD(kbdev, JD_DONE_WORKER, kctx, katom, katom->jc, 0);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) && katom->event_code != BASE_JD_EVENT_DONE && !(katom->event_code & BASE_JD_SW_EVENT))
+ kbasep_jd_cacheclean(kbdev); /* cache flush when jobs complete with non-done codes */
+ else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
+ if (kbdev->gpu_props.num_core_groups > 1 &&
+ !(katom->affinity & kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
+ (katom->affinity & kbdev->gpu_props.props.coherency_info.group[1].core_mask)) {
+ dev_dbg(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
+ kbasep_jd_cacheclean(kbdev);
+ }
+ }
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969) &&
+ (katom->core_req & BASE_JD_REQ_FS) &&
+ katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT &&
+ (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
+ !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) {
+ dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n");
+ if (kbasep_10969_workaround_clamp_coordinates(katom)) {
+ /* The job had a TILE_RANGE_FAULT after was soft-stopped.
+ * Due to an HW issue we try to execute the job
+ * again.
+ */
+ dev_dbg(kbdev->dev, "Clamping has been executed, try to rerun the job\n");
+ katom->event_code = BASE_JD_EVENT_STOPPED;
+ katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
+
+ /* The atom will be requeued, but requeing does not submit more
+ * jobs. If this was the last job, we must also ensure that more
+ * jobs will be run on slot 0 - this is a Fragment job. */
+ kbasep_js_set_job_retry_submit_slot(katom, 0);
+ }
+ }
+
/*
* Begin transaction on JD context and JS context
*/
*/
KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled != MALI_FALSE);
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6787) && katom->event_code != BASE_JD_EVENT_DONE && !(katom->event_code & BASE_JD_SW_EVENT))
- kbasep_jd_cacheclean(kbdev); /* cache flush when jobs complete with non-done codes */
- else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
- if (kbdev->gpu_props.num_core_groups > 1 &&
- !(katom->affinity & kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
- (katom->affinity & kbdev->gpu_props.props.coherency_info.group[1].core_mask)) {
- dev_dbg(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
- kbasep_jd_cacheclean(kbdev);
- }
- }
-
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10969) &&
- (katom->core_req & BASE_JD_REQ_FS) &&
- katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT &&
- (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
- !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)) {
- dev_dbg(kbdev->dev, "Soft-stopped fragment shader job got a TILE_RANGE_FAULT. Possible HW issue, trying SW workaround\n");
- if (kbasep_10969_workaround_clamp_coordinates(katom)) {
- /* The job had a TILE_RANGE_FAULT after was soft-stopped.
- * Due to an HW issue we try to execute the job
- * again.
- */
- dev_dbg(kbdev->dev, "Clamping has been executed, try to rerun the job\n");
- katom->event_code = BASE_JD_EVENT_STOPPED;
- katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
-
- /* The atom will be requeued, but requeing does not submit more
- * jobs. If this was the last job, we must also ensure that more
- * jobs will be run on slot 0 - this is a Fragment job. */
- kbasep_js_set_job_retry_submit_slot(katom, 0);
- }
- }
-
/* If job was rejected due to BASE_JD_EVENT_PM_EVENT but was not
* specifically targeting core group 1, then re-submit targeting core
* group 0 */
/* fall throught */
default:
dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)", i, completion_code, kbase_exception_name(completion_code));
- kbdev->kbase_group_error++;
}
}
/* Disable IRQ to avoid IRQ handlers to kick in after releaseing the spinlock;
* this also clears any outstanding interrupts */
kbase_pm_disable_interrupts(kbdev);
- /* Ensure that any IRQ handlers have finished */
- kbase_synchronize_irqs(kbdev);
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ /* Ensure that any IRQ handlers have finished
+ * Must be done without any locks IRQ handlers will take */
+ kbase_synchronize_irqs(kbdev);
+
/* Reset the GPU */
kbase_pm_init_hw(kbdev, MALI_TRUE);
/* IRQs were re-enabled by kbase_pm_init_hw, and GPU is still powered */
#endif
#ifdef CONFIG_64BIT
- if (is_compat_task())
+ if (kctx->is_compat)
same_va_bits = 32;
else if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
same_va_bits = 33;
#ifdef CONFIG_64BIT
/* only 32-bit clients have the other two zones */
- if (is_compat_task()) {
+ if (kctx->is_compat) {
#endif
if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) {
kbase_free_alloced_region(same_va_reg);
BUG_ON(!p);
BUG_ON(offset + size > PAGE_SIZE);
- dma_addr = page_private(p) + offset;
+ dma_addr = kbase_dma_addr(p) + offset;
sync_fn(kctx->kbdev->dev, dma_addr, size, DMA_BIDIRECTIONAL);
}
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg) {
+ if (!reg || (reg->flags & KBASE_REG_FREE)) {
dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
gpu_addr);
err = MALI_ERROR_FUNCTION_FAILED;
kbase_atomic_add_pages(nr_pages_requested, &alloc->imported.kctx->used_pages);
kbase_atomic_add_pages(nr_pages_requested, &alloc->imported.kctx->kbdev->memdev.used_pages);
+ /* Increase mm counters before we allocate pages so that this
+ * allocation is visible to the OOM killer */
+ kbase_process_page_usage_inc(alloc->imported.kctx, nr_pages_requested);
+
if (MALI_ERROR_NONE != kbase_mem_allocator_alloc(&alloc->imported.kctx->osalloc, nr_pages_requested, alloc->pages + alloc->nents))
goto no_alloc;
alloc->nents += nr_pages_requested;
-
- kbase_process_page_usage_inc(alloc->imported.kctx, nr_pages_requested);
done:
return 0;
no_alloc:
+ kbase_process_page_usage_dec(alloc->imported.kctx, nr_pages_requested);
kbase_atomic_sub_pages(nr_pages_requested, &alloc->imported.kctx->used_pages);
kbase_atomic_sub_pages(nr_pages_requested, &alloc->imported.kctx->kbdev->memdev.used_pages);
void kbase_wait_write_flush(struct kbase_context *kctx);
#endif
+static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
+{
+ SetPagePrivate(p);
+ if (sizeof(dma_addr_t) > sizeof(p->private)) {
+ /* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
+ * private filed stays the same. So we have to be clever and
+ * use the fact that we only store DMA addresses of whole pages,
+ * so the low bits should be zero */
+ KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
+ set_page_private(p, dma_addr >> PAGE_SHIFT);
+ } else {
+ set_page_private(p, dma_addr);
+ }
+}
+
+static inline dma_addr_t kbase_dma_addr(struct page *p)
+{
+ if (sizeof(dma_addr_t) > sizeof(p->private))
+ return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
+
+ return (dma_addr_t)page_private(p);
+}
+
/**
* @brief Process a bus or page fault.
*
p = list_first_entry(&allocator->free_list_head, struct page,
lru);
list_del(&p->lru);
- dma_unmap_page(allocator->kbdev->dev, page_private(p),
+ dma_unmap_page(allocator->kbdev->dev, kbase_dma_addr(p),
PAGE_SIZE,
DMA_BIDIRECTIONAL);
ClearPagePrivate(p);
int i;
int num_from_free_list;
struct list_head from_free_list = LIST_HEAD_INIT(from_free_list);
+ gfp_t gfp;
might_sleep();
if (i == nr_pages)
return MALI_ERROR_NONE;
- /* If not all pages were sourced from the pool, request new ones. */
- for (; i < nr_pages; i++) {
- dma_addr_t dma_addr;
#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
- /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
- p = alloc_page(GFP_USER);
+ /* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
+ gfp = GFP_USER;
#else
- p = alloc_page(GFP_HIGHUSER);
+ gfp = GFP_HIGHUSER;
#endif
+
+ if (current->flags & PF_KTHREAD) {
+ /* Don't trigger OOM killer from kernel threads, e.g. when
+ * growing memory on GPU page fault */
+ gfp |= __GFP_NORETRY;
+ }
+
+ /* If not all pages were sourced from the pool, request new ones. */
+ for (; i < nr_pages; i++) {
+ dma_addr_t dma_addr;
+ p = alloc_page(gfp);
if (NULL == p)
goto err_out_roll_back;
mp = kmap(p);
}
SetPagePrivate(p);
- set_page_private(p, dma_addr);
+ kbase_set_dma_addr(p, dma_addr);
pages[i] = PFN_PHYS(page_to_pfn(p));
BUG_ON(dma_addr != pages[i]);
}
struct page *p;
p = pfn_to_page(PFN_DOWN(pages[i]));
pages[i] = (phys_addr_t)0;
- dma_unmap_page(allocator->kbdev->dev, page_private(p),
+ dma_unmap_page(allocator->kbdev->dev, kbase_dma_addr(p),
PAGE_SIZE,
DMA_BIDIRECTIONAL);
ClearPagePrivate(p);
struct page *p;
p = pfn_to_page(PFN_DOWN(pages[i]));
- dma_unmap_page(allocator->kbdev->dev, page_private(p),
+ dma_unmap_page(allocator->kbdev->dev, kbase_dma_addr(p),
PAGE_SIZE,
DMA_BIDIRECTIONAL);
ClearPagePrivate(p);
*/
if (sync_back)
dma_sync_single_for_cpu(allocator->kbdev->dev,
- page_private(p),
+ kbase_dma_addr(p),
PAGE_SIZE,
DMA_BIDIRECTIONAL);
static struct page *kbase_carveout_get_page(struct kbase_mem_allocator *allocator)
{
struct page *p = NULL;
+ gfp_t gfp;
mutex_lock(&kbase_carveout_free_list_lock);
if (!list_empty(&kbase_carveout_free_list)) {
dma_addr_t dma_addr;
#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
/* DMA cache sync fails for HIGHMEM before 3.5 on ARM */
- p = alloc_page(GFP_USER);
+ gfp = GFP_USER;
#else
- p = alloc_page(GFP_HIGHUSER);
+ gfp = GFP_HIGHUSER;
#endif
+
+ if (current->flags & PF_KTHREAD) {
+ /* Don't trigger OOM killer from kernel threads, e.g.
+ * when growing memory on GPU page fault */
+ gfp |= __GFP_NORETRY;
+ }
+
+ p = alloc_page(gfp);
if (!p)
goto out;
goto out;
}
- SetPagePrivate(p);
- set_page_private(p, dma_addr);
+ kbase_set_dma_addr(p, dma_addr);
BUG_ON(dma_addr != PFN_PHYS(page_to_pfn(p)));
atomic_inc(&kbase_carveout_system_pages);
}
atomic_dec(&kbase_carveout_used_pages);
mutex_unlock(&kbase_carveout_free_list_lock);
} else {
- dma_unmap_page(allocator->kbdev->dev, page_private(p),
+ dma_unmap_page(allocator->kbdev->dev, kbase_dma_addr(p),
PAGE_SIZE,
DMA_BIDIRECTIONAL);
ClearPagePrivate(p);
if (dma_mapping_error(dev, dma_addr))
goto out_rollback;
- SetPagePrivate(p);
- set_page_private(p, dma_addr);
+ kbase_set_dma_addr(p, dma_addr);
BUG_ON(dma_addr != PFN_PHYS(page_to_pfn(p)));
list_add_tail(&p->lru, &kbase_carveout_free_list);
struct page *p;
p = list_first_entry(&kbase_carveout_free_list, struct page, lru);
- dma_unmap_page(dev, page_private(p),
+ dma_unmap_page(dev, kbase_dma_addr(p),
PAGE_SIZE,
DMA_BIDIRECTIONAL);
ClearPagePrivate(p);
memset(mp, 0x00, PAGE_SIZE); /* instead of __GFP_ZERO, so we can
do cache maintenance */
dma_sync_single_for_device(allocator->kbdev->dev,
- page_private(p),
+ kbase_dma_addr(p),
PAGE_SIZE,
DMA_BIDIRECTIONAL);
kunmap(p);
*/
if (sync_back)
dma_sync_single_for_cpu(allocator->kbdev->dev,
- page_private(p),
+ kbase_dma_addr(p),
PAGE_SIZE,
DMA_BIDIRECTIONAL);
list_add(&p->lru, &new_free_list_items);
goto bad_size;
#if defined(CONFIG_64BIT)
- if (is_compat_task())
+ if (kctx->is_compat)
cpu_va_bits = 32;
else
/* force SAME_VA if a 64-bit client */
*flags &= ~BASE_MEM_SAME_VA;
#ifdef CONFIG_64BIT
- if (!is_compat_task()) {
+ if (!kctx->is_compat) {
/* 64-bit tasks must MMAP anyway, but not expose this address to clients */
*flags |= BASE_MEM_NEED_MMAP;
reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
*num_pages = nents * stride;
#ifdef CONFIG_64BIT
- if (!is_compat_task()) {
+ if (!kctx->is_compat) {
/* 64-bit tasks must MMAP anyway, but not expose this address to
* clients */
*flags |= BASE_MEM_NEED_MMAP;
}
#ifdef CONFIG_64BIT
- if (!is_compat_task()) {
+ if (!kctx->is_compat) {
/* Bind to a cookie */
if (!kctx->cookies) {
dev_err(kctx->kbdev->dev, "No cookies available for allocation!");
KBASE_DEBUG_ASSERT(flags);
#ifdef CONFIG_64BIT
- if (!is_compat_task())
+ if (!kctx->is_compat)
*flags |= BASE_MEM_SAME_VA;
#endif
KBASE_EXPORT_TEST_API(kbase_mmap)
+void *kbase_vmap(struct kbase_context *kctx, mali_addr64 gpu_addr, size_t size,
+ struct kbase_vmap_struct *map)
+{
+ struct kbase_va_region *reg;
+ unsigned long page_index;
+ unsigned int offset = gpu_addr & ~PAGE_MASK;
+ size_t page_count = PFN_UP(offset + size);
+ phys_addr_t *page_array;
+ struct page **pages;
+ void *cpu_addr = NULL;
+ pgprot_t prot;
+ size_t i;
+
+ if (!size || !map)
+ return NULL;
+
+ /* check if page_count calculation will wrap */
+ if (size > ((size_t)-1 / PAGE_SIZE))
+ return NULL;
+
+ kbase_gpu_vm_lock(kctx);
+
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+ if (!reg || (reg->flags & KBASE_REG_FREE))
+ goto out_unlock;
+
+ page_index = (gpu_addr >> PAGE_SHIFT) - reg->start_pfn;
+
+ /* check if page_index + page_count will wrap */
+ if (-1UL - page_count < page_index)
+ goto out_unlock;
+
+ if (page_index + page_count > kbase_reg_current_backed_size(reg))
+ goto out_unlock;
+
+ page_array = kbase_get_phy_pages(reg);
+ if (!page_array)
+ goto out_unlock;
+
+ pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto out_unlock;
+
+ for (i = 0; i < page_count; i++)
+ pages[i] = pfn_to_page(PFN_DOWN(page_array[page_index + i]));
+
+ prot = PAGE_KERNEL;
+ if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
+ /* Map uncached */
+ prot = pgprot_writecombine(prot);
+ }
+
+ cpu_addr = vmap(pages, page_count, VM_MAP, prot);
+
+ kfree(pages);
+
+ if (!cpu_addr)
+ goto out_unlock;
+
+ map->gpu_addr = gpu_addr;
+ map->alloc = kbase_mem_phy_alloc_get(reg->alloc);
+ map->pages = &kbase_get_phy_pages(reg)[page_index];
+ map->addr = (void *)((uintptr_t)cpu_addr + offset);
+ map->size = size;
+ map->is_cached = (reg->flags & KBASE_REG_CPU_CACHED) != 0;
+
+ if (map->is_cached) {
+ /* Sync first page */
+ size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+ phys_addr_t pa = map->pages[0] + offset;
+
+ kbase_sync_single(kctx, pa, sz, dma_sync_single_for_cpu);
+
+ /* Sync middle pages (if any) */
+ for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+ pa = map->pages[i];
+ kbase_sync_single(kctx, pa, PAGE_SIZE, dma_sync_single_for_cpu);
+ }
+
+ /* Sync last page (if any) */
+ if (page_count > 1) {
+ pa = map->pages[page_count - 1];
+ sz = ((offset + size - 1) & ~PAGE_MASK) + 1;
+ kbase_sync_single(kctx, pa, sz, dma_sync_single_for_cpu);
+ }
+ }
+
+ kbase_gpu_vm_unlock(kctx);
+
+ return map->addr;
+
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return NULL;
+}
+
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map)
+{
+ void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK);
+
+ vunmap(addr);
+
+ if (map->is_cached) {
+ off_t offset = (uintptr_t)map->addr & ~PAGE_MASK;
+ size_t size = map->size;
+ size_t page_count = PFN_UP(offset + size);
+ size_t i;
+
+ /* Sync first page */
+ size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+ phys_addr_t pa = map->pages[0] + offset;
+
+ kbase_sync_single(kctx, pa, sz, dma_sync_single_for_device);
+
+ /* Sync middle pages (if any) */
+ for (i = 1; page_count > 2 && i < page_count - 1; i++) {
+ pa = map->pages[i];
+ kbase_sync_single(kctx, pa, PAGE_SIZE, dma_sync_single_for_device);
+ }
+
+ /* Sync last page (if any) */
+ if (page_count > 1) {
+ pa = map->pages[page_count - 1];
+ sz = ((offset + size - 1) & ~PAGE_MASK) + 1;
+ kbase_sync_single(kctx, pa, sz, dma_sync_single_for_device);
+ }
+ }
+
+ map->gpu_addr = 0;
+ map->alloc = kbase_mem_phy_alloc_put(map->alloc);
+ map->pages = NULL;
+ map->addr = NULL;
+ map->size = 0;
+ map->is_cached = false;
+}
+
void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
{
struct mm_struct *mm;
int kbase_mem_commit(struct kbase_context * kctx, mali_addr64 gpu_addr, u64 new_pages, enum base_backing_threshold_status * failure_reason);
int kbase_mmap(struct file *file, struct vm_area_struct *vma);
+struct kbase_vmap_struct {
+ mali_addr64 gpu_addr;
+ struct kbase_mem_phy_alloc *alloc;
+ phys_addr_t *pages;
+ void *addr;
+ size_t size;
+ bool is_cached;
+};
+void *kbase_vmap(struct kbase_context *kctx, mali_addr64 gpu_addr, size_t size,
+ struct kbase_vmap_struct *map);
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map);
+
/** @brief Allocate memory from kernel space and map it onto the GPU
*
* @param kctx The context used for the allocation/mapping
+++ /dev/null
-/*
- *
- * (C) COPYRIGHT ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained
- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-
-
-
-
-#include <mali_kbase.h>
-
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <asm/cacheflush.h>
-
-void kbase_sync_to_memory(phys_addr_t paddr, void *vaddr, size_t sz)
-{
-#ifdef CONFIG_ARM
- __cpuc_flush_dcache_area(vaddr, sz);
- outer_flush_range(paddr, paddr + sz);
-#elif defined(CONFIG_ARM64)
- /* TODO (MID64-46): There's no other suitable cache flush function for ARM64 */
- flush_cache_all();
-#elif defined(CONFIG_X86)
- struct scatterlist scl = { 0, };
- sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
- dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_TO_DEVICE);
- mb(); /* for outer_sync (if needed) */
-#else
-#error Implement cache maintenance for your architecture here
-#endif
-}
-
-void kbase_sync_to_cpu(phys_addr_t paddr, void *vaddr, size_t sz)
-{
-#ifdef CONFIG_ARM
- __cpuc_flush_dcache_area(vaddr, sz);
- outer_flush_range(paddr, paddr + sz);
-#elif defined(CONFIG_ARM64)
- /* TODO (MID64-46): There's no other suitable cache flush function for ARM64 */
- flush_cache_all();
-#elif defined(CONFIG_X86)
- struct scatterlist scl = { 0, };
- sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
- dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE);
-#else
-#error Implement cache maintenance for your architecture here
-#endif
-}
--- /dev/null
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * @file mali_kbase_mem_profile_debugfs_buf_size.h
+ * Header file for the size of the buffer to accumulate the histogram report text in
+ */
+
+#ifndef _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+#define _KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_
+
+/**
+ * The size of the buffer to accumulate the histogram report text in
+ * @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT
+ */
+#define KBASE_MEM_PROFILE_MAX_BUF_SIZE ( ( size_t ) ( 64 + ( ( 80 + ( 56 * 64 ) ) * 15 ) + 56 ) )
+
+#endif /*_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/
+
* a 4kB physical page.
*/
-static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as);
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, const char *reason_str);
/* Helper Function to perform assignment of page table entries, to ensure the use of
static void page_fault_worker(struct work_struct *data)
{
u64 fault_pfn;
+ u32 fault_access;
size_t new_pages;
size_t fault_rel_pfn;
struct kbase_as *faulting_as;
/* AS transaction end */
kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
kbase_pm_context_idle(kbdev);
}
return;
kbase_gpu_vm_lock(kctx);
- /* find the region object for this VA */
region = kbase_region_tracker_find_region_enclosing_address(kctx, faulting_as->fault_addr);
- if (NULL == region || (GROWABLE_FLAGS_REQUIRED != (region->flags & GROWABLE_FLAGS_MASK))) {
+ if (NULL == region || region->flags & KBASE_REG_FREE) {
kbase_gpu_vm_unlock(kctx);
- /* failed to find the region or mismatch of the flags */
- kbase_mmu_report_fault_and_kill(kctx, faulting_as);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Memory is not mapped on the GPU");
goto fault_done;
}
- if ((((faulting_as->fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) == AS_FAULTSTATUS_ACCESS_TYPE_READ) && !(region->flags & KBASE_REG_GPU_RD)) || (((faulting_as->fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) == AS_FAULTSTATUS_ACCESS_TYPE_WRITE) && !(region->flags & KBASE_REG_GPU_WR)) || (((faulting_as->fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) == AS_FAULTSTATUS_ACCESS_TYPE_EX) && (region->flags & KBASE_REG_GPU_NX))) {
+ fault_access = faulting_as->fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK;
+ if (((fault_access == AS_FAULTSTATUS_ACCESS_TYPE_READ) &&
+ !(region->flags & KBASE_REG_GPU_RD)) ||
+ ((fault_access == AS_FAULTSTATUS_ACCESS_TYPE_WRITE) &&
+ !(region->flags & KBASE_REG_GPU_WR)) ||
+ ((fault_access == AS_FAULTSTATUS_ACCESS_TYPE_EX) &&
+ (region->flags & KBASE_REG_GPU_NX))) {
dev_warn(kbdev->dev, "Access permissions don't match: region->flags=0x%lx", region->flags);
kbase_gpu_vm_unlock(kctx);
- kbase_mmu_report_fault_and_kill(kctx, faulting_as);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Access permissions mismatch");
+ goto fault_done;
+ }
+
+ if (!(region->flags & GROWABLE_FLAGS_REQUIRED)) {
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Memory is not growable");
goto fault_done;
}
fault_rel_pfn = fault_pfn - region->start_pfn;
if (fault_rel_pfn < kbase_reg_current_backed_size(region)) {
- dev_warn(kbdev->dev, "Page fault in allocated region of growable TMEM: Ignoring");
+ dev_dbg(kbdev->dev, "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
+ faulting_as->fault_addr, region->start_pfn,
+ region->start_pfn +
+ kbase_reg_current_backed_size(region));
+
kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ /* [1] in case another page fault occurred while we were
+ * handling the (duplicate) page fault we need to ensure we
+ * don't loose the other page fault as result of us clearing
+ * the MMU IRQ. Therefore, after we clear the MMU IRQ we send
+ * an UNLOCK command that will retry any stalled memory
+ * transaction (which should cause the other page fault to be
+ * raised again).
+ */
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, 0, 0, 0,
+ AS_COMMAND_UNLOCK, 1);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
+
goto fault_done;
}
- new_pages = make_multiple(fault_rel_pfn - kbase_reg_current_backed_size(region) + 1, region->extent);
- if (new_pages + kbase_reg_current_backed_size(region) > region->nr_pages) {
- /* cap to max vsize */
- new_pages = region->nr_pages - kbase_reg_current_backed_size(region);
- }
+ new_pages = make_multiple(fault_rel_pfn -
+ kbase_reg_current_backed_size(region) + 1,
+ region->extent);
+
+ /* cap to max vsize */
+ if (new_pages + kbase_reg_current_backed_size(region) >
+ region->nr_pages)
+ new_pages = region->nr_pages -
+ kbase_reg_current_backed_size(region);
if (0 == new_pages) {
/* Duplicate of a fault we've already handled, nothing to do */
kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ KBASE_MMU_FAULT_TYPE_PAGE);
+ /* See comment [1] about UNLOCK usage */
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, 0, 0, 0,
+ AS_COMMAND_UNLOCK, 1);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
goto fault_done;
}
kbase_free_phy_pages_helper(region->alloc, new_pages);
kbase_gpu_vm_unlock(kctx);
/* The locked VA region will be unlocked and the cache invalidated in here */
- kbase_mmu_report_fault_and_kill(kctx, faulting_as);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Page table update failure");
goto fault_done;
}
#ifdef CONFIG_MALI_GATOR_SUPPORT
else
op = AS_COMMAND_FLUSH_PT;
+ /* clear MMU interrupt - this needs to be done after updating
+ * the page tables but before issuing a FLUSH command. The
+ * FLUSH cmd has a side effect that it restarts stalled memory
+ * transactions in other address spaces which may cause
+ * another fault to occur. If we didn't clear the interrupt at
+ * this stage a new IRQ might not be raised when the GPU finds
+ * a MMU IRQ is already pending.
+ */
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_PAGE);
+
kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx,
faulting_as->fault_addr >> PAGE_SHIFT,
new_pages,
/* AS transaction end */
/* reenable this in the mask */
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
} else {
/* failed to extend, handle as a normal PF */
kbase_gpu_vm_unlock(kctx);
- kbase_mmu_report_fault_and_kill(kctx, faulting_as);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Page allocation failure");
}
fault_done:
/* Clean the full page */
dma_sync_single_for_device(kctx->kbdev->dev,
- page_private(p),
+ kbase_dma_addr(p),
PAGE_SIZE,
DMA_TO_DEVICE);
kunmap(pfn_to_page(PFN_DOWN(pgd)));
mmu_phyaddr_to_pte(target_pgd));
dma_sync_single_for_device(kctx->kbdev->dev,
- page_private(p),
+ kbase_dma_addr(p),
PAGE_SIZE,
DMA_TO_DEVICE);
/* Rely on the caller to update the address space flags. */
nr -= count;
dma_sync_single_for_device(kctx->kbdev->dev,
- page_private(p),
+ kbase_dma_addr(p),
PAGE_SIZE, DMA_TO_DEVICE);
kunmap_atomic(pgd_page);
}
nr -= count;
dma_sync_single_for_device(kctx->kbdev->dev,
- page_private(p) +
+ kbase_dma_addr(p) +
(index * sizeof(u64)),
count * sizeof(u64),
DMA_TO_DEVICE);
nr -= count;
dma_sync_single_for_device(kctx->kbdev->dev,
- page_private(p) +
+ kbase_dma_addr(p) +
(index * sizeof(u64)),
count * sizeof(u64),
DMA_TO_DEVICE);
nr -= count;
dma_sync_single_for_device(kctx->kbdev->dev,
- page_private(p) +
+ kbase_dma_addr(p) +
(index * sizeof(u64)),
count * sizeof(u64),
DMA_TO_DEVICE);
nr -= count;
dma_sync_single_for_device(kctx->kbdev->dev,
- page_private(p) +
+ kbase_dma_addr(p) +
(index * sizeof(u64)),
count * sizeof(u64),
DMA_TO_DEVICE);
kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
KBASE_MMU_FAULT_TYPE_BUS);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ KBASE_MMU_FAULT_TYPE_BUS);
+
kbase_pm_context_idle(kbdev);
}
#if KBASE_GPU_RESET_EN
/**
* The caller must ensure it's retained the ctx to prevent it from being scheduled out whilst it's being worked on.
*/
-static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as)
+static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
+ struct kbase_as *as, const char *reason_str)
{
unsigned long flags;
int exception_type;
#endif
static const char * const access_type_names[] = { "RESERVED", "EXECUTE", "READ", "WRITE" };
- KBASE_DEBUG_ASSERT(as);
- KBASE_DEBUG_ASSERT(kctx);
-
as_no = as->number;
kbdev = kctx->kbdev;
js_devdata = &kbdev->js_data;
source_id = (as->fault_status >> 16);
/* terminal fault, print info about the fault */
- dev_err(kbdev->dev, "Unhandled Page fault in AS%d at VA 0x%016llX\n"
- "raw fault status 0x%X\n"
- "decoded fault status: %s\n"
- "exception type 0x%X: %s\n"
- "access type 0x%X: %s\n"
- "source id 0x%X\n",
- as_no, as->fault_addr,
- as->fault_status,
- (as->fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
- exception_type, kbase_exception_name(exception_type),
- access_type, access_type_names[access_type],
- source_id);
+ dev_err(kbdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ as_no, as->fault_addr,
+ reason_str,
+ as->fault_status,
+ (as->fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, kbase_exception_name(exception_type),
+ access_type, access_type_names[access_type],
+ source_id);
/* hardware counters dump fault handling */
if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) && (kbdev->hwcnt.state == KBASE_INSTR_STATE_DUMPING)) {
/* Clear down the fault */
kbase_mmu_hw_clear_fault(kbdev, as, kctx, KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_enable_fault(kbdev, as, kctx, KBASE_MMU_FAULT_TYPE_PAGE);
#if KBASE_GPU_RESET_EN
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8245) && reset_status)
* We need to switch to UNMAPPED mode - but we do this in a
* worker so that we can sleep
*/
- kbdev->kbase_group_error++;
KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_busfault));
INIT_WORK(&as->work_busfault, bus_fault_worker);
queue_work(as->pf_wq, &as->work_busfault);
} else {
- kbdev->kbase_group_error++;
KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&as->work_pagefault));
INIT_WORK(&as->work_pagefault, page_fault_worker);
queue_work(as->pf_wq, &as->work_pagefault);
*
* @param[in] kbdev kbase device to clear the fault from.
* @param[in] as address space to clear the fault from.
- * @param[in] kctx kbase context to clear the fault from.
+ * @param[in] kctx kbase context to clear the fault from or NULL.
* @param[in] type The type of fault that needs to be cleared.
*/
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+/** @brief Enable fault that has been previously reported by the MMU.
+ *
+ * After a page fault or bus error has been reported by the MMU these
+ * will be disabled. After these are handled this function needs to be
+ * called to enable the page fault or bus error fault again.
+ *
+ * @param[in] kbdev kbase device to again enable the fault from.
+ * @param[in] as address space to again enable the fault from.
+ * @param[in] kctx kbase context to again enable the fault from.
+ * @param[in] type The type of fault that needs to be enabled again.
+ */
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+
/** @} *//* end group mali_kbase_mmu_hw */
/** @} *//* end group base_kbase_api */
void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
{
const int num_as = 16;
- const int busfault_shift = MMU_REGS_PAGE_FAULT_FLAGS;
+ const int busfault_shift = MMU_PAGE_FAULT_FLAGS;
const int pf_shift = 0;
const unsigned long as_bit_mask = (1UL << num_as) - 1;
unsigned long flags;
/* find the fault type */
as->fault_type = (bf_bits & (1 << as_no)) ?
- KBASE_MMU_FAULT_TYPE_BUS : KBASE_MMU_FAULT_TYPE_PAGE;
-
+ KBASE_MMU_FAULT_TYPE_BUS :
+ KBASE_MMU_FAULT_TYPE_PAGE;
if (kbase_as_has_bus_fault(as)) {
- /*
- * Clear the internal JM mask first before clearing the
- * internal MMU mask
- *
- * Note:
- * Always clear the page fault just in case there was
- * one at the same time as the bus error (bus errors are
- * always processed in preference to pagefaults should
- * both happen at the same time).
+ /* Mark bus fault as handled.
+ * Note that a bus fault is processed first in case
+ * where both a bus fault and page fault occur.
*/
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR),
- (1UL << MMU_REGS_BUS_ERROR_FLAG(as_no)) |
- (1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no)), kctx);
+ bf_bits &= ~(1UL << as_no);
- /* mark as handled (note: bf_bits is already shifted) */
- bf_bits &= ~(1UL << (as_no));
-
- /* remove the queued BFs (and PFs) from the mask */
- new_mask &= ~((1UL << MMU_REGS_BUS_ERROR_FLAG(as_no)) |
- (1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no)));
+ /* remove the queued BF (and PF) from the mask */
+ new_mask &= ~(MMU_BUS_ERROR(as_no) |
+ MMU_PAGE_FAULT(as_no));
} else {
- /*
- * Clear the internal JM mask first before clearing the
- * internal MMU mask
- */
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR),
- 1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no),
- kctx);
-
- /* mark as handled */
+ /* Mark page fault as handled */
pf_bits &= ~(1UL << as_no);
- /* remove the queued PFs from the mask */
- new_mask &= ~(1UL << MMU_REGS_PAGE_FAULT_FLAG(as_no));
+ /* remove the queued PF from the mask */
+ new_mask &= ~MMU_PAGE_FAULT(as_no);
}
/* Process the interrupt for this address space */
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
struct kbase_context *kctx, enum kbase_mmu_fault_type type)
+{
+ u32 pf_bf_mask;
+
+ /* Clear the page (and bus fault IRQ as well in case one occurred) */
+ pf_bf_mask = MMU_PAGE_FAULT(as->number);
+ if (type == KBASE_MMU_FAULT_TYPE_BUS)
+ pf_bf_mask |= MMU_BUS_ERROR(as->number);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx);
+}
+
+void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
+ struct kbase_context *kctx, enum kbase_mmu_fault_type type)
{
unsigned long flags;
- u32 mask;
+ u32 irq_mask;
+ /* Enable the page fault IRQ (and bus fault IRQ as well in case one
+ * occurred) */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx);
- mask |= (1UL << MMU_REGS_PAGE_FAULT_FLAG(as->number));
+ irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) |
+ MMU_PAGE_FAULT(as->number);
+
if (type == KBASE_MMU_FAULT_TYPE_BUS)
- mask |= (1UL << MMU_REGS_BUS_ERROR_FLAG(as->number));
+ irq_mask |= MMU_BUS_ERROR(as->number);
+
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx);
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), mask, kctx);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
}
#endif
ktime_t time_period_start;
u32 time_busy;
u32 time_idle;
+ u32 prev_busy;
+ u32 prev_idle;
mali_bool gpu_active;
u32 busy_cl[2];
u32 busy_gl;
u64 shader_poweroff_pending;
/** Set to MALI_TRUE if the poweroff timer is currently running, MALI_FALSE otherwise */
- mali_bool poweroff_timer_running;
+ mali_bool poweroff_timer_needed;
int poweroff_shader_ticks;
#ifdef CONFIG_PM_DEVFREQ
void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
- unsigned long *total, unsigned long *busy, bool reset);
+ unsigned long *total, unsigned long *busy);
+void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev);
#endif
#ifdef CONFIG_MALI_MIDGARD_DVFS
present = kbase_pm_get_present_cores(kbdev, type);
trans = kbase_pm_get_trans_cores(kbdev, type);
ready = kbase_pm_get_ready_cores(kbdev, type);
+ /* mask off ready from trans in case transitions finished between the register reads */
+ trans &= ~ready;
powering_on_trans = trans & *powering_on;
*powering_on = powering_on_trans;
}
desired_l3_state = get_desired_cache_status(kbdev->l3_present_bitmap, desired_l2_state);
- prev_l2_available_bitmap = l2_available_bitmap;
+ prev_l2_available_bitmap = kbdev->l2_available_bitmap;
in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_L3, desired_l3_state, 0, NULL, &kbdev->pm.powering_on_l3_state);
in_desired_state &= kbase_pm_transition_core_type(kbdev, KBASE_PM_CORE_L2, desired_l2_state, 0, &l2_available_bitmap, &kbdev->pm.powering_on_l2_state);
Exceeding this will cause overflow */
#define KBASE_PM_TIME_SHIFT 8
+/* Maximum time between sampling of utilization data, without resetting the
+ * counters. */
+#define MALI_UTILIZATION_MAX_PERIOD 100000 /* ns = 100ms */
+
#ifdef CONFIG_MALI_MIDGARD_DVFS
static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
{
kbdev->pm.metrics.time_period_start = ktime_get();
kbdev->pm.metrics.time_busy = 0;
kbdev->pm.metrics.time_idle = 0;
+ kbdev->pm.metrics.prev_busy = 0;
+ kbdev->pm.metrics.prev_idle = 0;
kbdev->pm.metrics.gpu_active = MALI_TRUE;
kbdev->pm.metrics.active_cl_ctx[0] = 0;
kbdev->pm.metrics.active_cl_ctx[1] = 0;
}
}
-/*caller needs to hold kbdev->pm.metrics.lock before calling this function*/
-static void kbase_pm_get_dvfs_utilisation_reset(struct kbase_device *kbdev, ktime_t now)
+/* Caller needs to hold kbdev->pm.metrics.lock before calling this function. */
+static void kbase_pm_reset_dvfs_utilisation_unlocked(struct kbase_device *kbdev, ktime_t now)
{
+ /* Store previous value */
+ kbdev->pm.metrics.prev_idle = kbdev->pm.metrics.time_idle;
+ kbdev->pm.metrics.prev_busy = kbdev->pm.metrics.time_busy;
+
+ /* Reset current values */
kbdev->pm.metrics.time_period_start = now;
kbdev->pm.metrics.time_idle = 0;
kbdev->pm.metrics.time_busy = 0;
kbdev->pm.metrics.busy_gl = 0;
}
-void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev, unsigned long *total, unsigned long *busy, bool reset)
+void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, ktime_get());
+ spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
+}
+
+void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
+ unsigned long *total_out, unsigned long *busy_out)
{
ktime_t now = ktime_get();
- unsigned long tmp, flags;
+ unsigned long flags, busy, total;
spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
- tmp = kbdev->pm.metrics.busy_gl;
- tmp += kbdev->pm.metrics.busy_cl[0];
- tmp += kbdev->pm.metrics.busy_cl[1];
-
- *busy = tmp;
- *total = tmp + kbdev->pm.metrics.time_idle;
+ busy = kbdev->pm.metrics.time_busy;
+ total = busy + kbdev->pm.metrics.time_idle;
+
+ /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default
+ * 100ms) */
+ if (total >= MALI_UTILIZATION_MAX_PERIOD) {
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
+ } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) {
+ total += kbdev->pm.metrics.prev_idle +
+ kbdev->pm.metrics.prev_busy;
+ busy += kbdev->pm.metrics.prev_busy;
+ }
- if (reset)
- kbase_pm_get_dvfs_utilisation_reset(kbdev, now);
+ *total_out = total;
+ *busy_out = busy;
spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
}
}
out:
- kbase_pm_get_dvfs_utilisation_reset(kbdev, now);
+ kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
return utilisation;
}
spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
- hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time);
- return HRTIMER_RESTART;
+ if (kbdev->pm.poweroff_timer_needed) {
+ hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time);
+
+ return HRTIMER_RESTART;
+ }
+
+ return HRTIMER_NORESTART;
}
static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
if (do_poweroff != MALI_FALSE) {
- kbdev->pm.poweroff_timer_running = MALI_FALSE;
+ kbdev->pm.poweroff_timer_needed = MALI_FALSE;
/* Power off the GPU */
kbase_pm_do_poweroff(kbdev, MALI_FALSE);
hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer);
lockdep_assert_held(&kbdev->pm.lock);
+ kbdev->pm.poweroff_timer_needed = MALI_FALSE;
hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer);
/* If wq is already running but is held off by pm.lock, make sure it has no effect */
return;
}
- if (!kbdev->pm.poweroff_timer_running && !kbdev->pm.gpu_powered) {
- kbdev->pm.poweroff_timer_running = MALI_TRUE;
+ if (!kbdev->pm.poweroff_timer_needed && !kbdev->pm.gpu_powered) {
+ kbdev->pm.poweroff_timer_needed = MALI_TRUE;
hrtimer_start(&kbdev->pm.gpu_poweroff_timer, kbdev->pm.gpu_poweroff_time, HRTIMER_MODE_REL);
}
/* Request power off */
if (kbdev->pm.gpu_powered) {
kbdev->pm.gpu_poweroff_pending = kbdev->pm.poweroff_gpu_ticks;
- if (!kbdev->pm.poweroff_timer_running) {
+ if (!kbdev->pm.poweroff_timer_needed) {
/* Start timer if not running (eg if power policy has been changed from always_on
* to something else). This will ensure the GPU is actually powered off */
- kbdev->pm.poweroff_timer_running = MALI_TRUE;
+ kbdev->pm.poweroff_timer_needed = MALI_TRUE;
hrtimer_start(&kbdev->pm.gpu_poweroff_timer, kbdev->pm.gpu_poweroff_time, HRTIMER_MODE_REL);
}
}
/* Start timer to power off cores */
kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap);
kbdev->pm.shader_poweroff_pending_time = kbdev->pm.poweroff_shader_ticks;
- } else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 && kbdev->pm.poweroff_timer_running) {
- /* If power policy is keeping cores on despite there being no active contexts
- * then disable poweroff timer as it isn't required */
- kbdev->pm.poweroff_timer_running = MALI_FALSE;
- hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer);
+ } else if (kbdev->pm.active_count == 0 && desired_bitmap != 0 && kbdev->pm.poweroff_timer_needed) {
+ /* If power policy is keeping cores on despite there being no
+ * active contexts then disable poweroff timer as it isn't
+ * required.
+ * Only reset poweroff_timer_needed if we're not in the middle
+ * of the power off callback */
+ kbdev->pm.poweroff_timer_needed = MALI_FALSE;
+ hrtimer_try_to_cancel(&kbdev->pm.gpu_poweroff_timer);
}
/* Ensure timer does not power off wanted cores and make sure to power off unwanted cores */
{
struct mali_power_actor *mali_actor = actor->data;
struct kbase_device *kbdev = mali_actor->kbdev;
- struct devfreq_dev_status stat;
+ unsigned long total_time, busy_time;
unsigned long power, temperature;
- int err;
struct dev_pm_opp *opp;
unsigned long voltage;
unsigned long freq;
+ kbase_pm_get_dvfs_utilisation(kbdev, &total_time, &busy_time);
- err = kbdev->devfreq->profile->get_dev_status(kbdev->dev, &stat);
- if (err) {
- dev_err(kbdev->dev, "Failed to get devfreq status (%d)\n", err);
- return 0;
- }
-
- freq = stat.current_frequency;
+ freq = clk_get_rate(kbdev->clock);
rcu_read_lock();
opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
rcu_read_unlock();
- power = mali_actor->ops->get_dynamic_power(freq);
- power = (power * stat.busy_time) / stat.total_time;
+ power = mali_actor->ops->get_dynamic_power(freq, voltage);
+ power = (power * busy_time) / total_time;
temperature = zone->temperature;
temperature = zone->temperature;
power = mali_actor->ops->get_static_power(voltage, temperature)
- + mali_actor->ops->get_dynamic_power(freq);
+ + mali_actor->ops->get_dynamic_power(freq, voltage);
dev_dbg(kbdev->dev, "get max power = %u\n", power);
struct mali_power_actor *mali_actor = actor->data;
struct kbase_device *kbdev = mali_actor->kbdev;
struct thermal_cooling_device *cdev;
- struct devfreq_dev_status stat;
+ unsigned long total_time, busy_time;
unsigned long freq, state;
unsigned long static_power, normalized_power;
unsigned long voltage, temperature;
dev_dbg(kbdev->dev, "Setting max power %u\n", power);
- err = kbdev->devfreq->profile->get_dev_status(kbdev->dev, &stat);
- if (err) {
- dev_err(kbdev->dev, "Failed to get devfreq status (%d)\n", err);
- return err;
- }
+ kbase_pm_get_dvfs_utilisation(kbdev, &total_time, &busy_time);
- freq = stat.current_frequency;
+ freq = clk_get_rate(kbdev->clock);
rcu_read_lock();
opp = dev_pm_opp_find_freq_exact(kbdev->dev, freq, true);
} else {
unsigned long dyn_power = power - static_power;
- if (!stat.busy_time)
+ if (!busy_time)
normalized_power = dyn_power;
else
- normalized_power = (dyn_power * stat.total_time) / stat.busy_time;
+ normalized_power = (dyn_power * total_time) / busy_time;
}
/* Find target frequency. Use the lowest OPP if allocated power is too
num_opps = dev_pm_opp_get_opp_count(kbdev->dev);
rcu_read_unlock();
- table = kzalloc(num_opps, sizeof(table[0]), GFP_KERNEL);
+ table = kcalloc(num_opps, sizeof(table[0]), GFP_KERNEL);
if (!table) {
kfree(mali_actor);
return -ENOMEM;
table[i].freq = freq;
- power_dyn = callbacks->get_dynamic_power(freq);
+ power_dyn = callbacks->get_dynamic_power(freq, voltage);
power_static = callbacks->get_static_power(voltage, 85000);
dev_info(kbdev->dev, "Power table: %lu MHz @ %lu mV: %lu + %lu = %lu mW\n",
mali_actor->dyn_table = table;
mali_actor->dyn_table_count = i;
- actor = power_actor_register(&mali_pa_ops, mali_actor);
+ /* Register power actor.
+ * Set default actor weight to 1 (8-bit fixed point). */
+ actor = power_actor_register(1 * 256, &mali_pa_ops, mali_actor);
if (IS_ERR_OR_NULL(actor)) {
kfree(mali_actor->dyn_table);
kfree(mali_actor);
#include <linux/pm_opp.h>
+/** struct mali_pa_model_ops - Function pointer for power model
+ *
+ * @get_static_power: Pointer to a function that returns the estimated static
+ * power usage in mW, based on the input voltage in mV and
+ * temperature in millidegrees Celsius.
+ * @get_dynamic_power: Pointer to a function that returns the estimated dynamic power
+ * usage in mW, based on the input voltage in mV and
+ * frequency in Hz.
+ */
struct mali_pa_model_ops {
- unsigned long (*get_static_power)(unsigned long voltage, unsigned long temperature);
- unsigned long (*get_dynamic_power)(unsigned long freq);
+ unsigned long (*get_static_power)(unsigned long voltage,
+ unsigned long temperature);
+ unsigned long (*get_dynamic_power)(unsigned long freq,
+ unsigned long voltage);
};
struct mali_pa_power_table {
bool err = false;
/* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or
- * BASE_JD_EVENT_TERMINATED.
+ * if force_replay is enabled.
*/
- if ((BASE_JD_EVENT_TERMINATED == katom->event_code) ||
- (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code)) {
+ if (BASE_JD_EVENT_TERMINATED == katom->event_code) {
+ return false;
+ } else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) {
+ return true;
+ } else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) {
+ katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT;
return true;
} else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) {
/* No replay for faults of type other than
payload->fragment_hierarchy_mask,
payload->fragment_core_req);
#endif
-
/* Process fragment job chain */
job_header = (mali_addr64) payload->fragment_jc;
job_loop_detect = job_header;
} while (hi1 != hi2);
/* Record the CPU's idea of current time */
- getnstimeofday(&ts);
+ getrawmonotonic(&ts);
kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
return 0;
dma_sync_single_for_cpu(katom->kctx->kbdev->dev,
- page_private(pfn_to_page(PFN_DOWN(addr))) +
+ kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
offset, sizeof(data),
DMA_BIDIRECTIONAL);
memcpy(page + offset, &data, sizeof(data));
dma_sync_single_for_device(katom->kctx->kbdev->dev,
- page_private(pfn_to_page(PFN_DOWN(addr))) +
+ kbase_dma_addr(pfn_to_page(PFN_DOWN(addr))) +
offset, sizeof(data),
DMA_BIDIRECTIONAL);
kunmap(pfn_to_page(PFN_DOWN(addr)));
static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
{
- if(!katom)
- {
- pr_err("katom null.forbiden return\n");
- return;
- }
- if(!katom->fence)
- {
- pr_info("katom->fence null.may release out of order.so continue unfinished step\n");
- /*
- if return here,may result in infinite loop?
- we need to delete dep_item[0] from kctx->waiting_soft_jobs?
- jd_done_nolock function move the dep_item[0] to complete job list and then delete?
- */
- goto finish_softjob;
- }
-
if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
/* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
return;
}
/* Wait was cancelled - zap the atoms */
-finish_softjob:
katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
kbase_finish_soft_job(katom);
if (jd_done_nolock(katom))
kbasep_js_try_schedule_head_ctx(katom->kctx->kbdev);
-
- return;
}
#endif /* CONFIG_SYNC */
break;
case BASE_JD_REQ_SOFT_FENCE_WAIT:
/* Release the reference to the fence object */
- if(katom->fence) {
- sync_fence_put(katom->fence);
- katom->fence = NULL;
- }
+ sync_fence_put(katom->fence);
+ katom->fence = NULL;
break;
#endif /* CONFIG_SYNC */
}
u32 padding;
};
+#ifdef BASE_LEGACY_UK7_SUPPORT
+/**
+ * This structure is kept for the backward compatibility reasons.
+ * It shall be removed as soon as KBASE_FUNC_CPU_PROPS_REG_DUMP_OBSOLETE
+ * (previously KBASE_FUNC_CPU_PROPS_REG_DUMP) ioctl call
+ * is removed. Removal of KBASE_FUNC_CPU_PROPS_REG_DUMP is part of having
+ * the function for reading cpu properties moved from base to osu.
+ */
+#define BASE_CPU_PROPERTY_FLAG_LITTLE_ENDIAN F_BIT_0
+struct base_cpu_id_props {
+ /**
+ * CPU ID
+ */
+ u32 id;
+
+ /**
+ * CPU Part number
+ */
+ u16 part;
+ /**
+ * ASCII code of implementer trademark
+ */
+ u8 implementer;
+
+ /**
+ * CPU Variant
+ */
+ u8 variant;
+ /**
+ * CPU Architecture
+ */
+ u8 arch;
+
+ /**
+ * CPU revision
+ */
+ u8 rev;
+
+ /**
+ * Validity of CPU id where 0-invalid and
+ * 1-valid only if ALL the cpu_id props are valid
+ */
+ u8 valid;
+
+ u8 padding[1];
+};
+
+/**
+ * This structure is kept for the backward compatibility reasons.
+ * It shall be removed as soon as KBASE_FUNC_CPU_PROPS_REG_DUMP_OBSOLETE
+ * (previously KBASE_FUNC_CPU_PROPS_REG_DUMP) ioctl call
+ * is removed. Removal of KBASE_FUNC_CPU_PROPS_REG_DUMP is part of having
+ * the function for reading cpu properties moved from base to osu.
+ */
+struct base_cpu_props {
+ u32 nr_cores; /**< Number of CPU cores */
+
+ /**
+ * CPU page size as a Logarithm to Base 2. The compile-time
+ * equivalent is @ref OSU_CONFIG_CPU_PAGE_SIZE_LOG2
+ */
+ u32 cpu_page_size_log2;
+
+ /**
+ * CPU L1 Data cache line size as a Logarithm to Base 2. The compile-time
+ * equivalent is @ref OSU_CONFIG_CPU_L1_DCACHE_LINE_SIZE_LOG2.
+ */
+ u32 cpu_l1_dcache_line_size_log2;
+
+ /**
+ * CPU L1 Data cache size, in bytes. The compile-time equivalient is
+ * @ref OSU_CONFIG_CPU_L1_DCACHE_SIZE.
+ *
+ * This CPU Property is mainly provided to implement OpenCL's
+ * clGetDeviceInfo(), which allows the CL_DEVICE_GLOBAL_MEM_CACHE_SIZE
+ * hint to be queried.
+ */
+ u32 cpu_l1_dcache_size;
+
+ /**
+ * CPU Property Flags bitpattern.
+ *
+ * This is a combination of bits as specified by the macros prefixed with
+ * 'BASE_CPU_PROPERTY_FLAG_'.
+ */
+ u32 cpu_flags;
+
+ /**
+ * Maximum clock speed in MHz.
+ * @usecase 'Maximum' CPU Clock Speed information is required by OpenCL's
+ * clGetDeviceInfo() function for the CL_DEVICE_MAX_CLOCK_FREQUENCY hint.
+ */
+ u32 max_cpu_clock_speed_mhz;
+
+ /**
+ * @brief Total memory, in bytes.
+ *
+ * This is the theoretical maximum memory available to the CPU. It is
+ * unlikely that a client will be able to allocate all of this memory for
+ * their own purposes, but this at least provides an upper bound on the
+ * memory available to the CPU.
+ *
+ * This is required for OpenCL's clGetDeviceInfo() call when
+ * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL CPU devices.
+ */
+ u64 available_memory_size;
+
+ /**
+ * CPU ID detailed info
+ */
+ struct base_cpu_id_props cpu_id;
+
+ u32 padding;
+};
+
+/**
+ * This structure is kept for the backward compatibility reasons.
+ * It shall be removed as soon as KBASE_FUNC_CPU_PROPS_REG_DUMP_OBSOLETE
+ * (previously KBASE_FUNC_CPU_PROPS_REG_DUMP) ioctl call
+ * is removed. Removal of KBASE_FUNC_CPU_PROPS_REG_DUMP is part of having
+ * the function for reading cpu properties moved from base to osu.
+ */
struct kbase_uk_cpuprops {
union uk_header header;
struct base_cpu_props props;
/* OUT */
};
+#endif /* BASE_LEGACY_UK7_SUPPORT */
struct kbase_uk_gpuprops {
union uk_header header;
char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE];
u32 version_string_size;
u32 padding;
- u32 rk_version;
};
struct kbase_uk_disjoint_query {
enum kbase_uk_function_id {
KBASE_FUNC_MEM_ALLOC = (UK_FUNC_ID + 0),
- KBASE_FUNC_MEM_IMPORT,
- KBASE_FUNC_MEM_COMMIT,
- KBASE_FUNC_MEM_QUERY,
- KBASE_FUNC_MEM_FREE,
- KBASE_FUNC_MEM_FLAGS_CHANGE,
- KBASE_FUNC_MEM_ALIAS,
+ KBASE_FUNC_MEM_IMPORT = (UK_FUNC_ID + 1),
+ KBASE_FUNC_MEM_COMMIT = (UK_FUNC_ID + 2),
+ KBASE_FUNC_MEM_QUERY = (UK_FUNC_ID + 3),
+ KBASE_FUNC_MEM_FREE = (UK_FUNC_ID + 4),
+ KBASE_FUNC_MEM_FLAGS_CHANGE = (UK_FUNC_ID + 5),
+ KBASE_FUNC_MEM_ALIAS = (UK_FUNC_ID + 6),
#ifdef BASE_LEGACY_UK6_SUPPORT
KBASE_FUNC_JOB_SUBMIT_UK6 = (UK_FUNC_ID + 7),
KBASE_FUNC_SYNC = (UK_FUNC_ID + 8),
- KBASE_FUNC_POST_TERM,
+ KBASE_FUNC_POST_TERM = (UK_FUNC_ID + 9),
- KBASE_FUNC_HWCNT_SETUP,
- KBASE_FUNC_HWCNT_DUMP,
- KBASE_FUNC_HWCNT_CLEAR,
+ KBASE_FUNC_HWCNT_SETUP = (UK_FUNC_ID + 10),
+ KBASE_FUNC_HWCNT_DUMP = (UK_FUNC_ID + 11),
+ KBASE_FUNC_HWCNT_CLEAR = (UK_FUNC_ID + 12),
- KBASE_FUNC_CPU_PROPS_REG_DUMP,
- KBASE_FUNC_GPU_PROPS_REG_DUMP,
+#ifdef BASE_LEGACY_UK7_SUPPORT
+ KBASE_FUNC_CPU_PROPS_REG_DUMP_OBSOLETE = (UK_FUNC_ID + 13),
+#endif /* BASE_LEGACY_UK7_SUPPORT */
+ KBASE_FUNC_GPU_PROPS_REG_DUMP = (UK_FUNC_ID + 14),
- KBASE_FUNC_FIND_CPU_OFFSET,
+ KBASE_FUNC_FIND_CPU_OFFSET = (UK_FUNC_ID + 15),
- KBASE_FUNC_GET_VERSION,
- KBASE_FUNC_EXT_BUFFER_LOCK,
- KBASE_FUNC_SET_FLAGS,
+ KBASE_FUNC_GET_VERSION = (UK_FUNC_ID + 16),
+ KBASE_FUNC_EXT_BUFFER_LOCK = (UK_FUNC_ID + 17),
+ KBASE_FUNC_SET_FLAGS = (UK_FUNC_ID + 18),
- KBASE_FUNC_SET_TEST_DATA,
- KBASE_FUNC_INJECT_ERROR,
- KBASE_FUNC_MODEL_CONTROL,
+ KBASE_FUNC_SET_TEST_DATA = (UK_FUNC_ID + 19),
+ KBASE_FUNC_INJECT_ERROR = (UK_FUNC_ID + 20),
+ KBASE_FUNC_MODEL_CONTROL = (UK_FUNC_ID + 21),
- KBASE_FUNC_KEEP_GPU_POWERED,
+ KBASE_FUNC_KEEP_GPU_POWERED = (UK_FUNC_ID + 22),
- KBASE_FUNC_FENCE_VALIDATE,
- KBASE_FUNC_STREAM_CREATE,
- KBASE_FUNC_GET_PROFILING_CONTROLS,
- KBASE_FUNC_SET_PROFILING_CONTROLS, /* to be used only for testing
+ KBASE_FUNC_FENCE_VALIDATE = (UK_FUNC_ID + 23),
+ KBASE_FUNC_STREAM_CREATE = (UK_FUNC_ID + 24),
+ KBASE_FUNC_GET_PROFILING_CONTROLS = (UK_FUNC_ID + 25),
+ KBASE_FUNC_SET_PROFILING_CONTROLS = (UK_FUNC_ID + 26),
+ /* to be used only for testing
* purposes, otherwise these controls
* are set through gator API */
- KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD,
+ KBASE_FUNC_DEBUGFS_MEM_PROFILE_ADD = (UK_FUNC_ID + 27),
KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 28),
- KBASE_FUNC_DISJOINT_QUERY
+ KBASE_FUNC_DISJOINT_QUERY = (UK_FUNC_ID + 29)
};
MMU_IRQ_CLEAR, MMU_IRQ_MASK, MMU_IRQ_STATUS registers.
*/
-#define MMU_REGS_PAGE_FAULT_FLAGS 16
+#define MMU_PAGE_FAULT_FLAGS 16
-/* Macros return bit number to retrvie page fault or bus eror flag from MMU registers */
-#define MMU_REGS_PAGE_FAULT_FLAG(n) (n)
-#define MMU_REGS_BUS_ERROR_FLAG(n) (n + MMU_REGS_PAGE_FAULT_FLAGS)
+/* Macros returning a bitmask to retrieve page fault or bus error flags from
+ * MMU registers */
+#define MMU_PAGE_FAULT(n) (1UL << (n))
+#define MMU_BUS_ERROR(n) (1UL << ((n) + MMU_PAGE_FAULT_FLAGS))
/*
* Begin MMU TRANSTAB register values
#ifdef MALI_INCLUDE_TFRX
#define GPU_ID_PI_TFRX 0x0880
#endif /* MALI_INCLUDE_TFRX */
-#ifdef MALI_INCLUDE_TF2X
-#define GPU_ID_PI_TF2X 0x0860
-#endif /* MALI_INCLUDE_TF2X */
+#define GPU_ID_PI_T86X 0x0860
/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */
#define GPU_ID_S_15DEV0 0x1
--- /dev/null
+#
+# (C) COPYRIGHT 2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+obj-y += mali_kbase_config_juno_soc.o
+
+
+obj-m += juno_mali_opp.o
--- /dev/null
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/scpi_protocol.h>
+
+
+static int init_juno_opps_from_scpi(struct device *dev)
+{
+ struct scpi_opp *sopp;
+ int i;
+
+ /* Hard coded for Juno. 2 is GPU domain */
+ sopp = scpi_dvfs_get_opps(2);
+ if (IS_ERR_OR_NULL(sopp))
+ return PTR_ERR(sopp);
+
+ for (i = 0; i < sopp->count; i++) {
+ struct scpi_opp_entry *e = &sopp->opp[i];
+ dev_info(dev, "Mali OPP from SCPI: %u Hz @ %u mV\n",
+ e->freq_hz, e->volt_mv);
+
+ dev_pm_opp_add(dev, e->freq_hz, e->volt_mv * 1000);
+ }
+
+ return 0;
+}
+
+static int juno_setup_opps(void)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+ int err;
+
+ np = of_find_node_by_name(NULL, "gpu");
+ if (!np) {
+ printk(KERN_ERR "Failed to find DT entry for Mali\n");
+ return -EFAULT;
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ printk(KERN_ERR "Failed to find device for Mali\n");
+ of_node_put(np);
+ return -EFAULT;
+ }
+
+ err = init_juno_opps_from_scpi(&pdev->dev);
+
+ of_node_put(np);
+
+ return err;
+}
+
+module_init(juno_setup_opps);
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/ioport.h>
+#include <mali_kbase.h>
+#include <mali_kbase_defs.h>
+#include <mali_kbase_config.h>
+
+#include "../mali_kbase_power_actor.h"
+
+/* Versatile Express (VE) Juno Development Platform */
+
+#define HARD_RESET_AT_POWER_OFF 0
+
+#ifndef CONFIG_OF
+static struct kbase_io_resources io_resources = {
+ .job_irq_number = 65,
+ .mmu_irq_number = 66,
+ .gpu_irq_number = 64,
+ .io_memory_region = {
+ .start = 0x2D000000,
+ .end = 0x2D000000 + (4096 * 4) - 1}
+};
+#endif
+
+static int pm_callback_power_on(struct kbase_device *kbdev)
+{
+ /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */
+ return 1;
+}
+
+static void pm_callback_power_off(struct kbase_device *kbdev)
+{
+#if HARD_RESET_AT_POWER_OFF
+ /* Cause a GPU hard reset to test whether we have actually idled the GPU
+ * and that we properly reconfigure the GPU on power up.
+ * Usually this would be dangerous, but if the GPU is working correctly it should
+ * be completely safe as the GPU should not be active at this point.
+ * However this is disabled normally because it will most likely interfere with
+ * bus logging etc.
+ */
+ KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
+ kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET);
+#endif
+}
+
+static struct kbase_pm_callback_conf pm_callbacks = {
+ .power_on_callback = pm_callback_power_on,
+ .power_off_callback = pm_callback_power_off,
+ .power_suspend_callback = NULL,
+ .power_resume_callback = NULL
+};
+
+static unsigned long juno_model_static_power(unsigned long voltage, unsigned long temperature)
+{
+ /* Calculate power, corrected for voltage.
+ * Shifts are done to avoid overflow. */
+ const unsigned long coefficient = (410UL << 20) / (729000000UL >> 10);
+ const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10;
+
+ /* Calculate the temperature scaling factor. To be applied to the
+ * voltage scaled power. */
+ const unsigned long temp = temperature / 1000;
+ const unsigned long temp_squared = temp * temp;
+ const unsigned long temp_cubed = temp_squared * temp;
+ const unsigned long temp_scaling_factor =
+ (2 * temp_cubed)
+ - (80 * temp_squared)
+ + (4700 * temp)
+ + 32000;
+
+ return (((coefficient * voltage_cubed) >> 20) * temp_scaling_factor) / 1000000;
+}
+
+static unsigned long juno_model_dynamic_power(unsigned long freq,
+ unsigned long voltage)
+{
+ /* The inputs: freq (f) is in Hz, and voltage (v) in mV.
+ * The coefficient (c) is in mW/(MHz mV mV).
+ *
+ * This function calculates the dynamic power after this formula:
+ * Pdyn (mW) = c (mW/(MHz*mV*mV)) * v (mV) * v (mV) * f (MHz)
+ */
+ const unsigned long v2 = (voltage * voltage) / 1000; /* m*(V*V) */
+ const unsigned long f_mhz = freq / 1000000; /* MHz */
+ const unsigned long coefficient = 3600; /* mW/(MHz*mV*mV) */
+
+ return (coefficient * v2 * f_mhz) / 1000000; /* mW */
+}
+
+static struct mali_pa_model_ops juno_model_ops = {
+ .get_static_power = juno_model_static_power,
+ .get_dynamic_power = juno_model_dynamic_power,
+};
+
+static struct kbase_attribute config_attributes[] = {
+ { KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS, 500 },
+ { KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS, ((uintptr_t)&pm_callbacks) },
+ { KBASE_CONFIG_ATTR_POWER_MODEL_CALLBACKS, ((uintptr_t)&juno_model_ops) },
+
+ { KBASE_CONFIG_ATTR_END, 0 }
+};
+
+static struct kbase_platform_config versatile_platform_config = {
+ .attributes = config_attributes,
+#ifndef CONFIG_OF
+ .io_resources = &io_resources
+#endif
+};
+
+struct kbase_platform_config *kbase_get_platform_config(void)
+{
+ return &versatile_platform_config;
+}
+
+int kbase_platform_early_init(void)
+{
+ /* Nothing needed at this stage */
+ return 0;
+}
--- /dev/null
+/*
+ *
+ * (C) COPYRIGHT ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+/**
+ * Maximum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MAX 600000
+/**
+ * Minimum frequency GPU will be clocked at. Given in kHz.
+ * This must be specified as there is no default value.
+ *
+ * Attached value: number in kHz
+ * Default value: NA
+ */
+#define GPU_FREQ_KHZ_MIN 600000
+++ /dev/null
-#
-# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
-#
-# This program is free software and is provided to you under the terms of the
-# GNU General Public License version 2 as published by the Free Software
-# Foundation, and any use by you of this program is subject to the terms
-# of such GNU licence.
-#
-# A copy of the licence is included with the program, and can also be obtained
-# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
-# Boston, MA 02110-1301, USA.
-#
-#
-
-ccflags-y += -I$(srctree)/drivers/staging/android
-ifeq ($(CONFIG_MALI_MIDGARD),y)
-obj-y += mali_kbase_config_rk.o
-obj-y += mali_kbase_dvfs.o
-obj-y += mali_kbase_platform.o
-else ifeq ($(CONFIG_MALI_MIDGARD),m)
-SRC += platform/rk/mali_kbase_config_rk.c
-SRC += platform/rk/mali_kbase_dvfs.c
-SRC += platform/rk/mali_kbase_platform.c
-endif
+++ /dev/null
-/*
- *
- * (C) COPYRIGHT ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained
- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-
-
-
-
-#include <linux/ioport.h>
-#include <mali_kbase.h>
-#include <mali_kbase_defs.h>
-#include <mali_kbase_config.h>
-#ifdef CONFIG_UMP
-#include <linux/ump-common.h>
-#endif /* CONFIG_UMP */
-#include <platform/rk/mali_kbase_platform.h>
-#include <platform/rk/mali_kbase_dvfs.h>
-#include <linux/pm_runtime.h>
-#include <linux/suspend.h>
-#include <linux/reboot.h>
-
-int get_cpu_clock_speed(u32 *cpu_clock);
-
-#define HZ_IN_MHZ (1000000)
-#ifdef CONFIG_MALI_MIDGARD_RT_PM
-#define RUNTIME_PM_DELAY_TIME 50
-#endif
-
-/* Versatile Express (VE) configuration defaults shared between config_attributes[]
- * and config_attributes_hw_issue_8408[]. Settings are not shared for
- * JS_HARD_STOP_TICKS_SS and JS_RESET_TICKS_SS.
- */
-#define KBASE_VE_MEMORY_PER_PROCESS_LIMIT (512 * 1024 * 1024UL) /* 512MB */
-#define KBASE_VE_MEMORY_OS_SHARED_MAX (2048 * 1024 * 1024UL) /* 768MB */
-#define KBASE_VE_MEMORY_OS_SHARED_PERF_GPU KBASE_MEM_PERF_FAST/*KBASE_MEM_PERF_SLOW*/
-#define KBASE_VE_GPU_FREQ_KHZ_MAX 500000
-#define KBASE_VE_GPU_FREQ_KHZ_MIN 100000
-#ifdef CONFIG_UMP
-#define KBASE_VE_UMP_DEVICE UMP_DEVICE_Z_SHIFT
-#endif /* CONFIG_UMP */
-
-#define KBASE_VE_JS_SCHEDULING_TICK_NS_DEBUG 15000000u /* 15ms, an agressive tick for testing purposes. This will reduce performance significantly */
-#define KBASE_VE_JS_SOFT_STOP_TICKS_DEBUG 1 /* between 15ms and 30ms before soft-stop a job */
-#define KBASE_VE_JS_HARD_STOP_TICKS_SS_DEBUG 333 /* 5s before hard-stop */
-#define KBASE_VE_JS_HARD_STOP_TICKS_SS_8401_DEBUG 2000 /* 30s before hard-stop, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
-#define KBASE_VE_JS_HARD_STOP_TICKS_NSS_DEBUG 100000 /* 1500s (25mins) before NSS hard-stop */
-#define KBASE_VE_JS_RESET_TICKS_SS_DEBUG 500 /* 45s before resetting GPU, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) */
-#define KBASE_VE_JS_RESET_TICKS_SS_8401_DEBUG 3000 /* 7.5s before resetting GPU - for issue 8401 */
-#define KBASE_VE_JS_RESET_TICKS_NSS_DEBUG 100166 /* 1502s before resetting GPU */
-
-#define KBASE_VE_JS_SCHEDULING_TICK_NS 2500000000u /* 2.5s */
-#define KBASE_VE_JS_SOFT_STOP_TICKS 1 /* 2.5s before soft-stop a job */
-#define KBASE_VE_JS_HARD_STOP_TICKS_SS 2 /* 5s before hard-stop */
-#define KBASE_VE_JS_HARD_STOP_TICKS_SS_8401 12 /* 30s before hard-stop, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
-#define KBASE_VE_JS_HARD_STOP_TICKS_NSS 600 /* 1500s before NSS hard-stop */
-#define KBASE_VE_JS_RESET_TICKS_SS 3 /* 7.5s before resetting GPU */
-#define KBASE_VE_JS_RESET_TICKS_SS_8401 18 /* 45s before resetting GPU, for a certain GLES2 test at 128x128 (bound by combined vertex+tiler job) - for issue 8401 */
-#define KBASE_VE_JS_RESET_TICKS_NSS 601 /* 1502s before resetting GPU */
-
-#define KBASE_VE_JS_RESET_TIMEOUT_MS 500 /* 3s before cancelling stuck jobs */
-#define KBASE_VE_JS_CTX_TIMESLICE_NS 1000000 /* 1ms - an agressive timeslice for testing purposes (causes lots of scheduling out for >4 ctxs) */
-#define KBASE_VE_SECURE_BUT_LOSS_OF_PERFORMANCE ((uintptr_t)MALI_FALSE) /* By default we prefer performance over security on r0p0-15dev0 and KBASE_CONFIG_ATTR_ earlier */
-/*#define KBASE_VE_POWER_MANAGEMENT_CALLBACKS ((uintptr_t)&pm_callbacks)*/
-#define KBASE_VE_CPU_SPEED_FUNC ((uintptr_t)&get_cpu_clock_speed)
-
-static int mali_pm_notifier(struct notifier_block *nb,unsigned long event,void* cmd);
-static struct notifier_block mali_pm_nb = {
- .notifier_call = mali_pm_notifier
-};
-static int mali_reboot_notifier_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
-
- pr_info("%s enter\n",__func__);
- if (kbase_platform_dvfs_enable(false, MALI_DVFS_CURRENT_FREQ)!= MALI_TRUE)
- return -EPERM;
- pr_info("%s exit\n",__func__);
- return NOTIFY_OK;
-}
-
-static struct notifier_block mali_reboot_notifier = {
- .notifier_call = mali_reboot_notifier_event,
-};
-
-#ifndef CONFIG_OF
-static kbase_io_resources io_resources = {
- .job_irq_number = 68,
- .mmu_irq_number = 69,
- .gpu_irq_number = 70,
- .io_memory_region = {
- .start = 0xFC010000,
- .end = 0xFC010000 + (4096 * 5) - 1}
-};
-#endif
-int get_cpu_clock_speed(u32 *cpu_clock)
-{
-#if 0
- struct clk *cpu_clk;
- u32 freq = 0;
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return 1;
- freq = clk_get_rate(cpu_clk);
- *cpu_clock = (freq / HZ_IN_MHZ);
-#endif
- return 0;
-}
-
-static int mali_pm_notifier(struct notifier_block *nb,unsigned long event,void* cmd)
-{
- int err = NOTIFY_OK;
- switch (event) {
- case PM_SUSPEND_PREPARE:
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- /*
- pr_info("%s,PM_SUSPEND_PREPARE\n",__func__);
- */
- if (kbase_platform_dvfs_enable(false, p_mali_dvfs_infotbl[0].clock)!= MALI_TRUE)
- err = NOTIFY_BAD;
-#endif
- break;
- case PM_POST_SUSPEND:
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- /*
- pr_info("%s,PM_POST_SUSPEND\n",__func__);
- */
- if (kbase_platform_dvfs_enable(true, p_mali_dvfs_infotbl[0].clock)!= MALI_TRUE)
- err = NOTIFY_BAD;
-#endif
- break;
- default:
- break;
- }
- return err;
-}
-
-/*
- rk3288 hardware specific initialization
- */
-mali_bool kbase_platform_rk_init(struct kbase_device *kbdev)
-{
- if(MALI_ERROR_NONE == kbase_platform_init(kbdev))
- {
- if (register_pm_notifier(&mali_pm_nb)) {
- return MALI_FALSE;
- }
- pr_info("%s,register_reboot_notifier\n",__func__);
- register_reboot_notifier(&mali_reboot_notifier);
- return MALI_TRUE;
- }
- return MALI_FALSE;
-}
-
-/*
- rk3288 hardware specific termination
-*/
-void kbase_platform_rk_term(struct kbase_device *kbdev)
-{
- unregister_pm_notifier(&mali_pm_nb);
-#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
- kbase_platform_remove_sysfs_file(kbdev->dev);
-#endif /* CONFIG_MALI_MIDGARD_DEBUG_SYS */
- kbase_platform_term(kbdev);
-}
-
-kbase_platform_funcs_conf platform_funcs = {
- .platform_init_func = &kbase_platform_rk_init,
- .platform_term_func = &kbase_platform_rk_term,
-};
-
-#ifdef CONFIG_MALI_MIDGARD_RT_PM
-static int pm_callback_power_on(struct kbase_device *kbdev)
-{
- int result;
- int ret_val;
- struct device *dev = kbdev->dev;
- struct rk_context *platform;
- platform = (struct rk_context *)kbdev->platform_context;
-
- if (pm_runtime_status_suspended(dev))
- ret_val = 1;
- else
- ret_val = 0;
-
- if(dev->power.disable_depth > 0) {
- if(platform->cmu_pmu_status == 0)
- kbase_platform_cmu_pmu_control(kbdev, 1);
- return ret_val;
- }
- result = pm_runtime_resume(dev);
-
- if (result < 0 && result == -EAGAIN)
- kbase_platform_cmu_pmu_control(kbdev, 1);
- else if (result < 0)
- printk(KERN_ERR "pm_runtime_get_sync failed (%d)\n", result);
-
- return ret_val;
-}
-
-static void pm_callback_power_off(struct kbase_device *kbdev)
-{
- struct device *dev = kbdev->dev;
- pm_schedule_suspend(dev, RUNTIME_PM_DELAY_TIME);
-}
-
-mali_error kbase_device_runtime_init(struct kbase_device *kbdev)
-{
- pm_suspend_ignore_children(kbdev->dev, true);
- pm_runtime_enable(kbdev->dev);
-#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
- if (kbase_platform_create_sysfs_file(kbdev->dev))
- return MALI_ERROR_FUNCTION_FAILED;
-#endif /* CONFIG_MALI_MIDGARD_DEBUG_SYS */
- return MALI_ERROR_NONE;
-}
-
-void kbase_device_runtime_disable(struct kbase_device *kbdev)
-{
- pm_runtime_disable(kbdev->dev);
-}
-
-static int pm_callback_runtime_on(struct kbase_device *kbdev)
-{
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
- unsigned long flags;
- unsigned int clock;
-#endif
-
- kbase_platform_power_on(kbdev);
-
- kbase_platform_clock_on(kbdev);
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- if (platform->dvfs_enabled) {
- if(platform->gpu_in_touch) {
- clock = p_mali_dvfs_infotbl[MALI_DVFS_STEP-1].clock;
- spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
- platform->gpu_in_touch = false;
- spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
- } else {
- clock = MALI_DVFS_CURRENT_FREQ;
- }
- /*
- pr_info("%s,clock = %d\n",__func__,clock);
- */
- if (kbase_platform_dvfs_enable(true, clock)!= MALI_TRUE)
- return -EPERM;
-
- } else {
- if (kbase_platform_dvfs_enable(false, MALI_DVFS_CURRENT_FREQ)!= MALI_TRUE)
- return -EPERM;
- }
-#endif
- return 0;
-}
-
-static void pm_callback_runtime_off(struct kbase_device *kbdev)
-{
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
- unsigned long flags;
-#endif
-
- kbase_platform_clock_off(kbdev);
- kbase_platform_power_off(kbdev);
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- if (platform->dvfs_enabled)
- {
- /*printk("%s\n",__func__);*/
- if (kbase_platform_dvfs_enable(false, p_mali_dvfs_infotbl[0].clock)!= MALI_TRUE)
- printk("[err] disabling dvfs is faled\n");
- spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
- platform->gpu_in_touch = false;
- spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
- }
-#endif
-}
-
-static kbase_pm_callback_conf pm_callbacks = {
- .power_on_callback = pm_callback_power_on,
- .power_off_callback = pm_callback_power_off,
-#ifdef CONFIG_PM_RUNTIME
- .power_runtime_init_callback = kbase_device_runtime_init,
- .power_runtime_term_callback = kbase_device_runtime_disable,
- .power_runtime_on_callback = pm_callback_runtime_on,
- .power_runtime_off_callback = pm_callback_runtime_off,
-
-#else /* CONFIG_PM_RUNTIME */
- .power_runtime_init_callback = NULL,
- .power_runtime_term_callback = NULL,
- .power_runtime_on_callback = NULL,
- .power_runtime_off_callback = NULL,
-
-#endif /* CONFIG_PM_RUNTIME */
-};
-#endif
-
-
-/* Please keep table config_attributes in sync with config_attributes_hw_issue_8408 */
-static kbase_attribute config_attributes[] = {
-#ifdef CONFIG_UMP
- {
- KBASE_CONFIG_ATTR_UMP_DEVICE,
- KBASE_VE_UMP_DEVICE},
-#endif /* CONFIG_UMP */
-#ifdef CONFIG_MALI_MIDGARD_RT_PM
- {
- KBASE_CONFIG_ATTR_POWER_MANAGEMENT_CALLBACKS,
- (uintptr_t)&pm_callbacks},
-#endif
- {
- KBASE_CONFIG_ATTR_PLATFORM_FUNCS,
- (uintptr_t) &platform_funcs},
-
- {
- KBASE_CONFIG_ATTR_JS_RESET_TIMEOUT_MS,
- KBASE_VE_JS_RESET_TIMEOUT_MS},
- {
- KBASE_CONFIG_ATTR_END,
- 0}
-};
-
-static kbase_platform_config rk_platform_config = {
- .attributes = config_attributes,
-#ifndef CONFIG_OF
- .io_resources = &io_resources
-#endif
-};
-#if 1
-kbase_platform_config *kbase_get_platform_config(void)
-{
- return &rk_platform_config;
-}
-#endif
-int kbase_platform_early_init(void)
-{
- /* Nothing needed at this stage */
- return 0;
-}
+++ /dev/null
-/* drivers/gpu/t6xx/kbase/src/platform/manta/mali_kbase_dvfs.c
- *
- *
- * Rockchip SoC Mali-T764 DVFS driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software FoundatIon.
- */
-
-/**
- * @file mali_kbase_dvfs.c
- * DVFS
- */
-
-#include <mali_kbase.h>
-#include <mali_kbase_uku.h>
-#include <mali_kbase_mem.h>
-#include <mali_midg_regmap.h>
-#include <mali_kbase_mem_linux.h>
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/list.h>
-#include <linux/semaphore.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/cpufreq.h>
-#include <linux/fb.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/regulator/consumer.h>
-#include <linux/regulator/driver.h>
-#include <linux/rk_fb.h>
-#include <linux/input.h>
-#include <linux/rockchip/common.h>
-
-#include <platform/rk/mali_kbase_platform.h>
-#include <platform/rk/mali_kbase_dvfs.h>
-#include <mali_kbase_gator.h>
-#include <linux/rockchip/dvfs.h>
-/***********************************************************/
-/* This table and variable are using the check time share of GPU Clock */
-/***********************************************************/
-extern int rockchip_tsadc_get_temp(int chn);
-#define gpu_temp_limit 110
-#define gpu_temp_statis_time 1
-#define level0_min 0
-#define level0_max 70
-#define levelf_max 100
-static u32 div_dvfs = 0 ;
-
-static mali_dvfs_info mali_dvfs_infotbl[] = {
- {925000, 100000, 0, 70, 0},
- {925000, 160000, 50, 65, 0},
- {1025000, 266000, 60, 78, 0},
- {1075000, 350000, 65, 75, 0},
- {1125000, 400000, 70, 75, 0},
- {1200000, 500000, 90, 100, 0},
-};
-mali_dvfs_info *p_mali_dvfs_infotbl = NULL;
-
-unsigned int MALI_DVFS_STEP = ARRAY_SIZE(mali_dvfs_infotbl);
-
-static struct cpufreq_frequency_table *mali_freq_table = NULL;
-#ifdef CONFIG_MALI_MIDGARD_DVFS
-typedef struct _mali_dvfs_status_type {
- struct kbase_device *kbdev;
- int step;
- int utilisation;
- u32 temperature;
- u32 temperature_time;
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- int upper_lock;
- int under_lock;
-#endif
-
-} mali_dvfs_status;
-
-static struct workqueue_struct *mali_dvfs_wq = 0;
-spinlock_t mali_dvfs_spinlock;
-struct mutex mali_set_clock_lock;
-struct mutex mali_enable_clock_lock;
-
-#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
-static void update_time_in_state(int level);
-#endif
-/*dvfs status*/
-static mali_dvfs_status mali_dvfs_status_current;
-
-#define LIMIT_FPS 60
-#define LIMIT_FPS_POWER_SAVE 50
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
-static void gpufreq_input_event(struct input_handle *handle, unsigned int type,
- unsigned int code, int value)
-{
- mali_dvfs_status *dvfs_status;
- struct rk_context *platform;
- unsigned long flags;
-
- if (type != EV_ABS)
- return;
-
- dvfs_status = &mali_dvfs_status_current;
- platform = (struct rk_context *)dvfs_status->kbdev->platform_context;
-
- spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
- platform->gpu_in_touch = true;
- spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
-}
-
-static int gpufreq_input_connect(struct input_handler *handler,
- struct input_dev *dev, const struct input_device_id *id)
-{
- struct input_handle *handle;
- int error;
-
- handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->dev = dev;
- handle->handler = handler;
- handle->name = "gpufreq";
-
- error = input_register_handle(handle);
- if (error)
- goto err2;
-
- error = input_open_device(handle);
- if (error)
- goto err1;
- pr_info("%s\n",__func__);
- return 0;
-err1:
- input_unregister_handle(handle);
-err2:
- kfree(handle);
- return error;
-}
-
-static void gpufreq_input_disconnect(struct input_handle *handle)
-{
- input_close_device(handle);
- input_unregister_handle(handle);
- kfree(handle);
- pr_info("%s\n",__func__);
-}
-
-static const struct input_device_id gpufreq_ids[] = {
- {
- .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
- INPUT_DEVICE_ID_MATCH_ABSBIT,
- .evbit = { BIT_MASK(EV_ABS) },
- .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
- BIT_MASK(ABS_MT_POSITION_X) |
- BIT_MASK(ABS_MT_POSITION_Y) },
- },
- {
- .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
- INPUT_DEVICE_ID_MATCH_ABSBIT,
- .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
- .absbit = { [BIT_WORD(ABS_X)] =
- BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
- },
- { },
-};
-
-static struct input_handler gpufreq_input_handler = {
- .event = gpufreq_input_event,
- .connect = gpufreq_input_connect,
- .disconnect = gpufreq_input_disconnect,
- .name = "gpufreq",
- .id_table = gpufreq_ids,
-};
-#endif
-
-static void mali_dvfs_event_proc(struct work_struct *w)
-{
- unsigned long flags;
- mali_dvfs_status *dvfs_status;
- static int level_down_time = 0;
- static int level_up_time = 0;
- static u32 temp_tmp;
- struct rk_context *platform;
- u32 fps=0;
- u32 fps_limit;
- u32 policy;
- mutex_lock(&mali_enable_clock_lock);
- dvfs_status = &mali_dvfs_status_current;
-
- if (!kbase_platform_dvfs_get_enable_status()) {
- mutex_unlock(&mali_enable_clock_lock);
- return;
- }
- platform = (struct rk_context *)dvfs_status->kbdev->platform_context;
-
- fps = rk_get_real_fps(0);
-
- dvfs_status->temperature_time++;
-
- temp_tmp += rockchip_tsadc_get_temp(1);
-
- if(dvfs_status->temperature_time >= gpu_temp_statis_time) {
- dvfs_status->temperature_time = 0;
- dvfs_status->temperature = temp_tmp / gpu_temp_statis_time;
- temp_tmp = 0;
- }
-
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- /*
- policy = rockchip_pm_get_policy();
- */
- policy = ROCKCHIP_PM_POLICY_NORMAL;
-
- if (ROCKCHIP_PM_POLICY_PERFORMANCE == policy) {
- dvfs_status->step = MALI_DVFS_STEP - 1;
- } else {
- fps_limit = (ROCKCHIP_PM_POLICY_NORMAL == policy)?LIMIT_FPS : LIMIT_FPS_POWER_SAVE;
- /*
- printk("policy : %d , fps_limit = %d\n",policy,fps_limit);
- */
-
- /*give priority to temperature unless in performance mode */
- if (dvfs_status->temperature > gpu_temp_limit) {
- if(dvfs_status->step > 0)
- dvfs_status->step--;
-
- if(gpu_temp_statis_time > 1)
- dvfs_status->temperature = 0;
- /*
- pr_info("decrease step for temperature over %d,next clock = %d\n",
- gpu_temp_limit, mali_dvfs_infotbl[dvfs_status->step].clock);
- */
- } else if ((dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) &&
- (dvfs_status->step < MALI_DVFS_STEP-1) && fps < fps_limit) {
- level_up_time++;
- if (level_up_time == MALI_DVFS_UP_TIME_INTERVAL) {
- /*
- printk("up,utilisation=%d,current clock=%d,fps = %d,temperature = %d",
- dvfs_status->utilisation, mali_dvfs_infotbl[dvfs_status->step].clock,
- fps,dvfs_status->temperature);
- */
- dvfs_status->step++;
- level_up_time = 0;
- /*
- printk(" next clock=%d\n",mali_dvfs_infotbl[dvfs_status->step].clock);
- */
- BUG_ON(dvfs_status->step >= MALI_DVFS_STEP);
- }
- level_down_time = 0;
- } else if ((dvfs_status->step > 0) &&
- (dvfs_status->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) {
- level_down_time++;
- if (level_down_time==MALI_DVFS_DOWN_TIME_INTERVAL) {
- /*
- printk("down,utilisation=%d,current clock=%d,fps = %d,temperature = %d",
- dvfs_status->utilisation,
- mali_dvfs_infotbl[dvfs_status->step].clock,fps,dvfs_status->temperature);
- */
- BUG_ON(dvfs_status->step <= 0);
- dvfs_status->step--;
- level_down_time = 0;
- /*
- printk(" next clock=%d\n",mali_dvfs_infotbl[dvfs_status->step].clock);
- */
- }
- level_up_time = 0;
- } else {
- level_down_time = 0;
- level_up_time = 0;
- /*
- printk("keep,utilisation=%d,current clock=%d,fps = %d,temperature = %d\n",
- dvfs_status->utilisation,
- mali_dvfs_infotbl[dvfs_status->step].clock,fps,dvfs_status->temperature);
- */
- }
- }
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- if ((dvfs_status->upper_lock >= 0) && (dvfs_status->step > dvfs_status->upper_lock))
- dvfs_status->step = dvfs_status->upper_lock;
-
- if (dvfs_status->under_lock > 0) {
- if (dvfs_status->step < dvfs_status->under_lock)
- dvfs_status->step = dvfs_status->under_lock;
- }
-#endif
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
- kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step);
-
- mutex_unlock(&mali_enable_clock_lock);
-}
-
-static DECLARE_WORK(mali_dvfs_work, mali_dvfs_event_proc);
-
-int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
- u32 util_gl_share_no_use,u32 util_cl_share_no_use[2])
-{
- unsigned long flags;
- struct rk_context *platform;
-
- BUG_ON(!kbdev);
- platform = (struct rk_context *)kbdev->platform_context;
-
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- if (platform->time_tick < MALI_DVFS_UP_TIME_INTERVAL) {
- platform->time_tick++;
- platform->time_busy += kbdev->pm.metrics.time_busy;
- platform->time_idle += kbdev->pm.metrics.time_idle;
- } else {
- platform->time_busy = kbdev->pm.metrics.time_busy;
- platform->time_idle = kbdev->pm.metrics.time_idle;
- platform->time_tick = 0;
- }
-
- if ((platform->time_tick == MALI_DVFS_UP_TIME_INTERVAL) &&
- (platform->time_idle + platform->time_busy > 0))
- platform->utilisation = (100 * platform->time_busy) /
- (platform->time_idle + platform->time_busy);
-
- mali_dvfs_status_current.utilisation = utilisation;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-
- queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
- /*add error handle here */
- return MALI_TRUE;
-}
-
-int kbase_platform_dvfs_get_utilisation(void)
-{
- unsigned long flags;
- int utilisation = 0;
-
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- utilisation = mali_dvfs_status_current.utilisation;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-
- return utilisation;
-}
-
-int kbase_platform_dvfs_get_enable_status(void)
-{
- struct kbase_device *kbdev;
- unsigned long flags;
- int enable;
-
- kbdev = mali_dvfs_status_current.kbdev;
- spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
- enable = kbdev->pm.metrics.timer_active;
- spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
-
- return enable;
-}
-
-int kbase_platform_dvfs_enable(bool enable, int freq)
-{
- mali_dvfs_status *dvfs_status;
- struct kbase_device *kbdev;
- unsigned long flags;
- struct rk_context *platform;
-
- dvfs_status = &mali_dvfs_status_current;
- kbdev = mali_dvfs_status_current.kbdev;
-
- BUG_ON(kbdev == NULL);
- platform = (struct rk_context *)kbdev->platform_context;
-
- mutex_lock(&mali_enable_clock_lock);
-
- if (enable != kbdev->pm.metrics.timer_active) {
- if (enable) {
- spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
- kbdev->pm.metrics.timer_active = MALI_TRUE;
- spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
- hrtimer_start(&kbdev->pm.metrics.timer,
- HR_TIMER_DELAY_MSEC(KBASE_PM_DVFS_FREQUENCY),
- HRTIMER_MODE_REL);
- } else {
- spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
- kbdev->pm.metrics.timer_active = MALI_FALSE;
- spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
- hrtimer_cancel(&kbdev->pm.metrics.timer);
- }
- }
-
- if (freq != MALI_DVFS_CURRENT_FREQ) {
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- platform->time_tick = 0;
- platform->time_busy = 0;
- platform->time_idle = 0;
- platform->utilisation = 0;
- dvfs_status->step = kbase_platform_dvfs_get_level(freq);
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
- kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step);
- }
-
- mutex_unlock(&mali_enable_clock_lock);
-
- return MALI_TRUE;
-}
-#define dividend 7
-#define fix_float(a) ((((a)*dividend)%10)?((((a)*dividend)/10)+1):(((a)*dividend)/10))
-static bool calculate_dvfs_max_min_threshold(u32 level)
-{
- u32 pre_level;
- u32 tmp ;
- if (0 == level) {
- if ((MALI_DVFS_STEP-1) == level) {
- mali_dvfs_infotbl[level].min_threshold = level0_min;
- mali_dvfs_infotbl[level].max_threshold = levelf_max;
- } else {
- mali_dvfs_infotbl[level].min_threshold = level0_min;
- mali_dvfs_infotbl[level].max_threshold = level0_max;
- }
- } else {
- pre_level = level - 1;
- if ((MALI_DVFS_STEP-1) == level) {
- mali_dvfs_infotbl[level].max_threshold = levelf_max;
- } else {
- mali_dvfs_infotbl[level].max_threshold = mali_dvfs_infotbl[pre_level].max_threshold +
- div_dvfs;
- }
- mali_dvfs_infotbl[level].min_threshold = (mali_dvfs_infotbl[pre_level].max_threshold *
- (mali_dvfs_infotbl[pre_level].clock/1000)) /
- (mali_dvfs_infotbl[level].clock/1000);
-
- tmp = mali_dvfs_infotbl[level].max_threshold - mali_dvfs_infotbl[level].min_threshold;
-
- mali_dvfs_infotbl[level].min_threshold += fix_float(tmp);
- }
- pr_info("mali_dvfs_infotbl[%d].clock=%d,min_threshold=%d,max_threshold=%d\n",
- level,mali_dvfs_infotbl[level].clock, mali_dvfs_infotbl[level].min_threshold,
- mali_dvfs_infotbl[level].max_threshold);
- return MALI_TRUE;
-}
-
-int kbase_platform_dvfs_init(struct kbase_device *kbdev)
-{
- unsigned long flags;
- /*default status
- add here with the right function to get initilization value.
- */
- struct rk_context *platform;
- int i;
- int rc;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (NULL == platform)
- panic("oops");
-
- mali_freq_table = dvfs_get_freq_volt_table(platform->mali_clk_node);
-
- if (mali_freq_table == NULL) {
- printk("mali freq table not assigned yet,use default\n");
- goto not_assigned ;
- } else {
- /*recalculte step*/
- MALI_DVFS_STEP = 0;
- for (i = 0; mali_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- mali_dvfs_infotbl[i].clock = mali_freq_table[i].frequency;
- MALI_DVFS_STEP++;
- }
- if(MALI_DVFS_STEP > 1)
- div_dvfs = round_up(((levelf_max - level0_max)/(MALI_DVFS_STEP-1)),1);
- printk("MALI_DVFS_STEP=%d,div_dvfs=%d\n",MALI_DVFS_STEP,div_dvfs);
-
- for(i=0;i<MALI_DVFS_STEP;i++)
- calculate_dvfs_max_min_threshold(i);
- p_mali_dvfs_infotbl = mali_dvfs_infotbl;
- }
-not_assigned :
- if (!mali_dvfs_wq)
- mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs");
-
- spin_lock_init(&mali_dvfs_spinlock);
- mutex_init(&mali_set_clock_lock);
- mutex_init(&mali_enable_clock_lock);
-
- spin_lock_init(&platform->gpu_in_touch_lock);
- rc = input_register_handler(&gpufreq_input_handler);
-
- /*add a error handling here */
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- mali_dvfs_status_current.kbdev = kbdev;
- mali_dvfs_status_current.utilisation = 0;
- mali_dvfs_status_current.step = 0;
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- mali_dvfs_status_current.upper_lock = -1;
- mali_dvfs_status_current.under_lock = -1;
-#endif
-
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-
- return MALI_TRUE;
-}
-
-void kbase_platform_dvfs_term(void)
-{
- if (mali_dvfs_wq)
- destroy_workqueue(mali_dvfs_wq);
-
- mali_dvfs_wq = NULL;
-
- input_unregister_handler(&gpufreq_input_handler);
-}
-#endif /*CONFIG_MALI_MIDGARD_DVFS*/
-
-int mali_get_dvfs_upper_locked_freq(void)
-{
- unsigned long flags;
- int locked_level = -1;
-
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- if (mali_dvfs_status_current.upper_lock >= 0)
- locked_level = mali_dvfs_infotbl[mali_dvfs_status_current.upper_lock].clock;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-#endif
- return locked_level;
-}
-
-int mali_get_dvfs_under_locked_freq(void)
-{
- unsigned long flags;
- int locked_level = -1;
-
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- if (mali_dvfs_status_current.under_lock >= 0)
- locked_level = mali_dvfs_infotbl[mali_dvfs_status_current.under_lock].clock;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-#endif
- return locked_level;
-}
-
-int mali_get_dvfs_current_level(void)
-{
- unsigned long flags;
- int current_level = -1;
-
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- current_level = mali_dvfs_status_current.step;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-#endif
- return current_level;
-}
-
-int mali_dvfs_freq_lock(int level)
-{
- unsigned long flags;
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- if (mali_dvfs_status_current.under_lock >= 0 &&
- mali_dvfs_status_current.under_lock > level) {
- printk(KERN_ERR " Upper lock Error : Attempting to set upper lock to below under lock\n");
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
- return -1;
- }
- mali_dvfs_status_current.upper_lock = level;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-
- printk(KERN_DEBUG " Upper Lock Set : %d\n", level);
-#endif
- return 0;
-}
-
-void mali_dvfs_freq_unlock(void)
-{
- unsigned long flags;
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- mali_dvfs_status_current.upper_lock = -1;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-#endif
- printk(KERN_DEBUG "mali Upper Lock Unset\n");
-}
-
-int mali_dvfs_freq_under_lock(int level)
-{
- unsigned long flags;
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- if (mali_dvfs_status_current.upper_lock >= 0 &&
- mali_dvfs_status_current.upper_lock < level) {
- printk(KERN_ERR "mali Under lock Error : Attempting to set under lock to above upper lock\n");
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
- return -1;
- }
- mali_dvfs_status_current.under_lock = level;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-
- printk(KERN_DEBUG "mali Under Lock Set : %d\n", level);
-#endif
- return 0;
-}
-
-void mali_dvfs_freq_under_unlock(void)
-{
- unsigned long flags;
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- spin_lock_irqsave(&mali_dvfs_spinlock, flags);
- mali_dvfs_status_current.under_lock = -1;
- spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);
-#endif
- printk(KERN_DEBUG " mali clock Under Lock Unset\n");
-}
-
-void kbase_platform_dvfs_set_clock(struct kbase_device *kbdev, int freq)
-{
- struct rk_context *platform;
-
- if (!kbdev)
- panic("oops");
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (NULL == platform)
- panic("oops");
-
- if (!platform->mali_clk_node) {
- printk("mali_clk_node not init\n");
- return;
- }
- mali_dvfs_clk_set(platform->mali_clk_node,freq);
-
- return;
-}
-
-
-int kbase_platform_dvfs_get_level(int freq)
-{
- int i;
- for (i = 0; i < MALI_DVFS_STEP; i++) {
- if (mali_dvfs_infotbl[i].clock == freq)
- return i;
- }
- return -1;
-}
-void kbase_platform_dvfs_set_level(struct kbase_device *kbdev, int level)
-{
- static int prev_level = -1;
-
- if (level == prev_level)
- return;
-
- if (WARN_ON((level >= MALI_DVFS_STEP) || (level < 0))) {
- printk("unkown mali dvfs level:level = %d,set clock not done \n",level);
- return ;
- }
- /*panic("invalid level");*/
-#ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK
- if (mali_dvfs_status_current.upper_lock >= 0 &&
- level > mali_dvfs_status_current.upper_lock)
- level = mali_dvfs_status_current.upper_lock;
- if (mali_dvfs_status_current.under_lock >= 0 &&
- level < mali_dvfs_status_current.under_lock)
- level = mali_dvfs_status_current.under_lock;
-#endif
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- mutex_lock(&mali_set_clock_lock);
-#endif
-
- kbase_platform_dvfs_set_clock(kbdev, mali_dvfs_infotbl[level].clock);
-#if defined(CONFIG_MALI_MIDGARD_DEBUG_SYS) && defined(CONFIG_MALI_MIDGARD_DVFS)
- update_time_in_state(prev_level);
-#endif
- prev_level = level;
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- mutex_unlock(&mali_set_clock_lock);
-#endif
-}
-
-#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
-#ifdef CONFIG_MALI_MIDGARD_DVFS
-static void update_time_in_state(int level)
-{
- u64 current_time;
- static u64 prev_time=0;
-
- if (level < 0)
- return;
-
- if (!kbase_platform_dvfs_get_enable_status())
- return;
-
- if (prev_time ==0)
- prev_time=get_jiffies_64();
-
- current_time = get_jiffies_64();
- mali_dvfs_infotbl[level].time += current_time-prev_time;
-
- prev_time = current_time;
-}
-#endif
-
-ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct kbase_device *kbdev;
- ssize_t ret = 0;
- int i;
-
- kbdev = dev_get_drvdata(dev);
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- update_time_in_state(mali_dvfs_status_current.step);
-#endif
- if (!kbdev)
- return -ENODEV;
-
- for (i = 0; i < MALI_DVFS_STEP; i++)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- "%d %llu\n",
- mali_dvfs_infotbl[i].clock, mali_dvfs_infotbl[i].time);
-
- if (ret < PAGE_SIZE - 1)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
- else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
-
- return ret;
-}
-
-ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int i;
-
- for (i = 0; i < MALI_DVFS_STEP; i++)
- mali_dvfs_infotbl[i].time = 0;
-
- printk(KERN_DEBUG "time_in_state value is reset complete.\n");
- return count;
-}
-#endif
+++ /dev/null
-/* drivers/gpu/midgard/platform/rk/mali_kbase_dvfs.h
- *
- * Rockchip SoC Mali-T764 DVFS driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software FoundatIon.
- */
-
-/**
- * @file mali_kbase_dvfs.h
- * DVFS
- */
-
-#ifndef _KBASE_DVFS_H_
-#define _KBASE_DVFS_H_
-
-/* Frequency that DVFS clock frequency decisions should be made */
-#define KBASE_PM_DVFS_FREQUENCY 100
-
-#define MALI_DVFS_KEEP_STAY_CNT 10
-#define MALI_DVFS_UP_TIME_INTERVAL 1
-#define MALI_DVFS_DOWN_TIME_INTERVAL 2
-#define MALI_DVFS_CURRENT_FREQ 0
-#if 0
-#define MALI_DVFS_BL_CONFIG_FREQ 500
-#define MALI_DVFS_START_FREQ 400
-#endif
-typedef struct _mali_dvfs_info {
- unsigned int voltage;
- unsigned int clock;
- int min_threshold;
- int max_threshold;
- unsigned long long time;
-} mali_dvfs_info;
-
-#define MALI_KHZ 1000
-extern mali_dvfs_info *p_mali_dvfs_infotbl;
-extern unsigned int MALI_DVFS_STEP;
-#ifdef CONFIG_MALI_MIDGARD_DVFS
-#define CONFIG_MALI_MIDGARD_FREQ_LOCK
-#endif
-
-void kbase_platform_dvfs_set_clock(struct kbase_device *kbdev, int freq);
-void kbase_platform_dvfs_set_level(struct kbase_device *kbdev, int level);
-int kbase_platform_dvfs_get_level(int freq);
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
-int kbase_platform_dvfs_init(struct kbase_device *dev);
-void kbase_platform_dvfs_term(void);
-/*int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation);*/
-/*int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,u32 util_gl_share, u32 util_cl_share[2]);*/
-int kbase_platform_dvfs_get_enable_status(void);
-int kbase_platform_dvfs_enable(bool enable, int freq);
-int kbase_platform_dvfs_get_utilisation(void);
-#endif
-
-int mali_get_dvfs_current_level(void);
-int mali_get_dvfs_upper_locked_freq(void);
-int mali_get_dvfs_under_locked_freq(void);
-int mali_dvfs_freq_lock(int level);
-void mali_dvfs_freq_unlock(void);
-int mali_dvfs_freq_under_lock(int level);
-void mali_dvfs_freq_under_unlock(void);
-
-ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf);
-ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
-
-#endif /* _KBASE_DVFS_H_ */
+++ /dev/null
-/* drivers/gpu/t6xx/kbase/src/platform/rk/mali_kbase_platform.c
- *
- * Rockchip SoC Mali-T764 platform-dependent codes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software FoundatIon.
- */
-
-/**
- * @file mali_kbase_platform.c
- * Platform-dependent init.
- */
-#include <mali_kbase.h>
-#include <mali_kbase_pm.h>
-#include <mali_kbase_uku.h>
-#include <mali_kbase_mem.h>
-#include <mali_midg_regmap.h>
-#include <mali_kbase_mem_linux.h>
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/list.h>
-#include <linux/semaphore.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <linux/fb.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <platform/rk/mali_kbase_platform.h>
-#include <platform/rk/mali_kbase_dvfs.h>
-
-#include <mali_kbase_gator.h>
-
-#include <linux/rockchip/dvfs.h>
-
-#define MALI_T7XX_DEFAULT_CLOCK 100000
-
-
-static int mali_clk_status = 0;
-static int mali_pd_status = 0;
-
-u32 kbase_group_error = 0;
-static struct kobject *rk_gpu;
-
-int mali_dvfs_clk_set(struct dvfs_node *node,unsigned long rate)
-{
- int ret = 0;
- if(!node)
- {
- printk("clk_get_dvfs_node error \r\n");
- ret = -1;
- }
- ret = dvfs_clk_set_rate(node,rate * MALI_KHZ);
- if(ret)
- {
- printk("dvfs_clk_set_rate error \r\n");
- }
- return ret;
-}
-static int kbase_platform_power_clock_init(struct kbase_device *kbdev)
-{
- /*struct device *dev = kbdev->dev;*/
- struct rk_context *platform;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (NULL == platform)
- panic("oops");
-
- /* enable mali t760 powerdomain*/
- platform->mali_pd = clk_get(NULL,"pd_gpu");
- if(IS_ERR_OR_NULL(platform->mali_pd))
- {
- platform->mali_pd = NULL;
- printk(KERN_ERR "%s, %s(): failed to get [platform->mali_pd]\n", __FILE__, __func__);
- goto out;
- }
- else
- {
- clk_prepare_enable(platform->mali_pd);
- printk("mali pd enabled\n");
- }
- mali_pd_status = 1;
-
- /* enable mali t760 clock */
- platform->mali_clk_node = clk_get_dvfs_node("clk_gpu");
- if (IS_ERR_OR_NULL(platform->mali_clk_node))
- {
- platform->mali_clk_node = NULL;
- printk(KERN_ERR "%s, %s(): failed to get [platform->mali_clk_node]\n", __FILE__, __func__);
- goto out;
- }
- else
- {
- dvfs_clk_prepare_enable(platform->mali_clk_node);
- printk("clk enabled\n");
- }
- mali_dvfs_clk_set(platform->mali_clk_node,MALI_T7XX_DEFAULT_CLOCK);
-
- mali_clk_status = 1;
- return 0;
-
-out:
- if(platform->mali_pd)
- clk_put(platform->mali_pd);
-
- return -EPERM;
-
-}
-int kbase_platform_clock_off(struct kbase_device *kbdev)
-{
- struct rk_context *platform;
- if (!kbdev)
- return -ENODEV;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (!platform)
- return -ENODEV;
-
- if (mali_clk_status == 0)
- return 0;
-
- if((platform->mali_clk_node))
- dvfs_clk_disable_unprepare(platform->mali_clk_node);
-
- mali_clk_status = 0;
-
- return 0;
-}
-
-int kbase_platform_clock_on(struct kbase_device *kbdev)
-{
- struct rk_context *platform;
- if (!kbdev)
- return -ENODEV;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (!platform)
- return -ENODEV;
-
- if (mali_clk_status == 1)
- return 0;
-
- if(platform->mali_clk_node)
- dvfs_clk_prepare_enable(platform->mali_clk_node);
-
- mali_clk_status = 1;
-
- return 0;
-}
-int kbase_platform_is_power_on(void)
-{
- return mali_pd_status;
-}
-
-/*turn on power domain*/
-int kbase_platform_power_on(struct kbase_device *kbdev)
-{
- struct rk_context *platform;
- if (!kbdev)
- return -ENODEV;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (!platform)
- return -ENODEV;
-
- if (mali_pd_status == 1)
- return 0;
-#if 1
- if(platform->mali_pd)
- clk_prepare_enable(platform->mali_pd);
-#endif
- mali_pd_status = 1;
- KBASE_TIMELINE_GPU_POWER(kbdev, 1);
-
- return 0;
-}
-
-/*turn off power domain*/
-int kbase_platform_power_off(struct kbase_device *kbdev)
-{
- struct rk_context *platform;
- if (!kbdev)
- return -ENODEV;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (!platform)
- return -ENODEV;
-
- if (mali_pd_status== 0)
- return 0;
-#if 1
- if(platform->mali_pd)
- clk_disable_unprepare(platform->mali_pd);
-#endif
- mali_pd_status = 0;
- KBASE_TIMELINE_GPU_POWER(kbdev, 0);
-
- return 0;
-}
-
-int kbase_platform_cmu_pmu_control(struct kbase_device *kbdev, int control)
-{
- unsigned long flags;
- struct rk_context *platform;
- if (!kbdev)
- return -ENODEV;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (!platform)
- return -ENODEV;
-
- spin_lock_irqsave(&platform->cmu_pmu_lock, flags);
-
- /* off */
- if (control == 0)
- {
- if (platform->cmu_pmu_status == 0)
- {
- spin_unlock_irqrestore(&platform->cmu_pmu_lock, flags);
- return 0;
- }
-
- if (kbase_platform_power_off(kbdev))
- panic("failed to turn off mali power domain\n");
- if (kbase_platform_clock_off(kbdev))
- panic("failed to turn off mali clock\n");
-
- platform->cmu_pmu_status = 0;
- printk("turn off mali power \n");
- }
- else
- {
- /* on */
- if (platform->cmu_pmu_status == 1)
- {
- spin_unlock_irqrestore(&platform->cmu_pmu_lock, flags);
- return 0;
- }
-
- if (kbase_platform_power_on(kbdev))
- panic("failed to turn on mali power domain\n");
- if (kbase_platform_clock_on(kbdev))
- panic("failed to turn on mali clock\n");
-
- platform->cmu_pmu_status = 1;
- printk(KERN_ERR "turn on mali power\n");
- }
-
- spin_unlock_irqrestore(&platform->cmu_pmu_lock, flags);
-
- return 0;
-}
-
-static ssize_t error_count_show(struct device *dev,struct device_attribute *attr, char *buf)
-{
- struct kbase_device *kbdev = dev_get_drvdata(dev);
- ssize_t ret;
-
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->kbase_group_error);
- return ret;
-}
-static DEVICE_ATTR(error_count, S_IRUGO, error_count_show, NULL);
-
-#ifdef CONFIG_MALI_MIDGARD_DEBUG_SYS
-static ssize_t show_clock(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct kbase_device *kbdev;
- struct rk_context *platform;
- ssize_t ret = 0;
- unsigned int clkrate;
- int i ;
- kbdev = dev_get_drvdata(dev);
-
- if (!kbdev)
- return -ENODEV;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (!platform)
- return -ENODEV;
-
- if (!platform->mali_clk_node)
- {
- printk("mali_clk_node not init\n");
- return -ENODEV;
- }
- clkrate = dvfs_clk_get_rate(platform->mali_clk_node);
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current clk mali = %dMhz", clkrate / 1000000);
-
- /* To be revised */
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings:");
- for(i=0;i<MALI_DVFS_STEP;i++)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ",p_mali_dvfs_infotbl[i].clock/1000);
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
-
- if (ret < PAGE_SIZE - 1)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
- else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
-
- return ret;
-}
-
-static ssize_t set_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct kbase_device *kbdev;
- struct rk_context *platform;
- unsigned int tmp = 0, freq = 0;
- kbdev = dev_get_drvdata(dev);
- tmp = 0;
- if (!kbdev)
- return -ENODEV;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (!platform)
- return -ENODEV;
-
- if (!platform->mali_clk_node)
- return -ENODEV;
-#if 0
- if (sysfs_streq("500", buf)) {
- freq = 500;
- } else if (sysfs_streq("400", buf)) {
- freq = 400;
- } else if (sysfs_streq("350", buf)) {
- freq = 350;
- } else if (sysfs_streq("266", buf)) {
- freq = 266;
- } else if (sysfs_streq("160", buf)) {
- freq = 160;
- } else if (sysfs_streq("100", buf)) {
- freq = 100;
- } else {
- dev_err(dev, "set_clock: invalid value\n");
- return -ENOENT;
- }
-#endif
- freq = simple_strtoul(buf, NULL, 10);
-
- kbase_platform_dvfs_set_level(kbdev, kbase_platform_dvfs_get_level(freq));
- return count;
-}
-
-static ssize_t show_fbdev(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct kbase_device *kbdev;
- ssize_t ret = 0;
- int i;
-
- kbdev = dev_get_drvdata(dev);
-
- if (!kbdev)
- return -ENODEV;
-
- for (i = 0; i < num_registered_fb; i++)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "fb[%d] xres=%d, yres=%d, addr=0x%lx\n", i, registered_fb[i]->var.xres, registered_fb[i]->var.yres, registered_fb[i]->fix.smem_start);
-
- if (ret < PAGE_SIZE - 1)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
- else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
-
- return ret;
-}
-
-typedef enum {
- L1_I_tag_RAM = 0x00,
- L1_I_data_RAM = 0x01,
- L1_I_BTB_RAM = 0x02,
- L1_I_GHB_RAM = 0x03,
- L1_I_TLB_RAM = 0x04,
- L1_I_indirect_predictor_RAM = 0x05,
- L1_D_tag_RAM = 0x08,
- L1_D_data_RAM = 0x09,
- L1_D_load_TLB_array = 0x0A,
- L1_D_store_TLB_array = 0x0B,
- L2_tag_RAM = 0x10,
- L2_data_RAM = 0x11,
- L2_snoop_tag_RAM = 0x12,
- L2_data_ECC_RAM = 0x13,
- L2_dirty_RAM = 0x14,
- L2_TLB_RAM = 0x18
-} RAMID_type;
-
-static inline void asm_ramindex_mrc(u32 *DL1Data0, u32 *DL1Data1, u32 *DL1Data2, u32 *DL1Data3)
-{
- u32 val;
-
- if (DL1Data0) {
- asm volatile ("mrc p15, 0, %0, c15, c1, 0" : "=r" (val));
- *DL1Data0 = val;
- }
- if (DL1Data1) {
- asm volatile ("mrc p15, 0, %0, c15, c1, 1" : "=r" (val));
- *DL1Data1 = val;
- }
- if (DL1Data2) {
- asm volatile ("mrc p15, 0, %0, c15, c1, 2" : "=r" (val));
- *DL1Data2 = val;
- }
- if (DL1Data3) {
- asm volatile ("mrc p15, 0, %0, c15, c1, 3" : "=r" (val));
- *DL1Data3 = val;
- }
-}
-
-static inline void asm_ramindex_mcr(u32 val)
-{
- asm volatile ("mcr p15, 0, %0, c15, c4, 0" : : "r" (val));
- asm volatile ("dsb");
- asm volatile ("isb");
-}
-
-static void get_tlb_array(u32 val, u32 *DL1Data0, u32 *DL1Data1, u32 *DL1Data2, u32 *DL1Data3)
-{
- asm_ramindex_mcr(val);
- asm_ramindex_mrc(DL1Data0, DL1Data1, DL1Data2, DL1Data3);
-}
-
-static RAMID_type ramindex = L1_D_load_TLB_array;
-static ssize_t show_dtlb(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct kbase_device *kbdev;
- ssize_t ret = 0;
- int entries, ways;
- u32 DL1Data0 = 0, DL1Data1 = 0, DL1Data2 = 0, DL1Data3 = 0;
-
- kbdev = dev_get_drvdata(dev);
-
- if (!kbdev)
- return -ENODEV;
-
- /* L1-I tag RAM */
- if (ramindex == L1_I_tag_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L1-I data RAM */
- else if (ramindex == L1_I_data_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L1-I BTB RAM */
- else if (ramindex == L1_I_BTB_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L1-I GHB RAM */
- else if (ramindex == L1_I_GHB_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L1-I TLB RAM */
- else if (ramindex == L1_I_TLB_RAM) {
- printk(KERN_DEBUG "L1-I TLB RAM\n");
- for (entries = 0; entries < 32; entries++) {
- get_tlb_array((((u8) ramindex) << 24) + entries, &DL1Data0, &DL1Data1, &DL1Data2, NULL);
- printk(KERN_DEBUG "entries[%d], DL1Data0=%08x, DL1Data1=%08x DL1Data2=%08x\n", entries, DL1Data0, DL1Data1 & 0xffff, 0x0);
- }
- }
- /* L1-I indirect predictor RAM */
- else if (ramindex == L1_I_indirect_predictor_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L1-D tag RAM */
- else if (ramindex == L1_D_tag_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L1-D data RAM */
- else if (ramindex == L1_D_data_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L1-D load TLB array */
- else if (ramindex == L1_D_load_TLB_array) {
- printk(KERN_DEBUG "L1-D load TLB array\n");
- for (entries = 0; entries < 32; entries++) {
- get_tlb_array((((u8) ramindex) << 24) + entries, &DL1Data0, &DL1Data1, &DL1Data2, &DL1Data3);
- printk(KERN_DEBUG "entries[%d], DL1Data0=%08x, DL1Data1=%08x, DL1Data2=%08x, DL1Data3=%08x\n", entries, DL1Data0, DL1Data1, DL1Data2, DL1Data3 & 0x3f);
- }
- }
- /* L1-D store TLB array */
- else if (ramindex == L1_D_store_TLB_array) {
- printk(KERN_DEBUG "\nL1-D store TLB array\n");
- for (entries = 0; entries < 32; entries++) {
- get_tlb_array((((u8) ramindex) << 24) + entries, &DL1Data0, &DL1Data1, &DL1Data2, &DL1Data3);
- printk(KERN_DEBUG "entries[%d], DL1Data0=%08x, DL1Data1=%08x, DL1Data2=%08x, DL1Data3=%08x\n", entries, DL1Data0, DL1Data1, DL1Data2, DL1Data3 & 0x3f);
- }
- }
- /* L2 tag RAM */
- else if (ramindex == L2_tag_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L2 data RAM */
- else if (ramindex == L2_data_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L2 snoop tag RAM */
- else if (ramindex == L2_snoop_tag_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L2 data ECC RAM */
- else if (ramindex == L2_data_ECC_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
- /* L2 dirty RAM */
- else if (ramindex == L2_dirty_RAM)
- printk(KERN_DEBUG "Not implemented yet\n");
-
- /* L2 TLB array */
- else if (ramindex == L2_TLB_RAM) {
- printk(KERN_DEBUG "\nL2 TLB array\n");
- for (ways = 0; ways < 4; ways++) {
- for (entries = 0; entries < 512; entries++) {
- get_tlb_array((ramindex << 24) + (ways << 18) + entries, &DL1Data0, &DL1Data1, &DL1Data2, &DL1Data3);
- printk(KERN_DEBUG "ways[%d]:entries[%d], DL1Data0=%08x, DL1Data1=%08x, DL1Data2=%08x, DL1Data3=%08x\n", ways, entries, DL1Data0, DL1Data1, DL1Data2, DL1Data3);
- }
- }
- } else {
- }
-
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Succeeded...\n");
-
- if (ret < PAGE_SIZE - 1)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
- else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
- return ret;
-}
-
-static ssize_t set_dtlb(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct kbase_device *kbdev;
- kbdev = dev_get_drvdata(dev);
-
- if (!kbdev)
- return -ENODEV;
-
- if (sysfs_streq("L1_I_tag_RAM", buf)) {
- ramindex = L1_I_tag_RAM;
- } else if (sysfs_streq("L1_I_data_RAM", buf)) {
- ramindex = L1_I_data_RAM;
- } else if (sysfs_streq("L1_I_BTB_RAM", buf)) {
- ramindex = L1_I_BTB_RAM;
- } else if (sysfs_streq("L1_I_GHB_RAM", buf)) {
- ramindex = L1_I_GHB_RAM;
- } else if (sysfs_streq("L1_I_TLB_RAM", buf)) {
- ramindex = L1_I_TLB_RAM;
- } else if (sysfs_streq("L1_I_indirect_predictor_RAM", buf)) {
- ramindex = L1_I_indirect_predictor_RAM;
- } else if (sysfs_streq("L1_D_tag_RAM", buf)) {
- ramindex = L1_D_tag_RAM;
- } else if (sysfs_streq("L1_D_data_RAM", buf)) {
- ramindex = L1_D_data_RAM;
- } else if (sysfs_streq("L1_D_load_TLB_array", buf)) {
- ramindex = L1_D_load_TLB_array;
- } else if (sysfs_streq("L1_D_store_TLB_array", buf)) {
- ramindex = L1_D_store_TLB_array;
- } else if (sysfs_streq("L2_tag_RAM", buf)) {
- ramindex = L2_tag_RAM;
- } else if (sysfs_streq("L2_data_RAM", buf)) {
- ramindex = L2_data_RAM;
- } else if (sysfs_streq("L2_snoop_tag_RAM", buf)) {
- ramindex = L2_snoop_tag_RAM;
- } else if (sysfs_streq("L2_data_ECC_RAM", buf)) {
- ramindex = L2_data_ECC_RAM;
- } else if (sysfs_streq("L2_dirty_RAM", buf)) {
- ramindex = L2_dirty_RAM;
- } else if (sysfs_streq("L2_TLB_RAM", buf)) {
- ramindex = L2_TLB_RAM;
- } else {
- printk(KERN_DEBUG "Invalid value....\n\n");
- printk(KERN_DEBUG "Available options are one of below\n");
- printk(KERN_DEBUG "L1_I_tag_RAM, L1_I_data_RAM, L1_I_BTB_RAM\n");
- printk(KERN_DEBUG "L1_I_GHB_RAM, L1_I_TLB_RAM, L1_I_indirect_predictor_RAM\n");
- printk(KERN_DEBUG "L1_D_tag_RAM, L1_D_data_RAM, L1_D_load_TLB_array, L1_D_store_TLB_array\n");
- printk(KERN_DEBUG "L2_tag_RAM, L2_data_RAM, L2_snoop_tag_RAM, L2_data_ECC_RAM\n");
- printk(KERN_DEBUG "L2_dirty_RAM, L2_TLB_RAM\n");
- }
-
- return count;
-}
-
-static ssize_t show_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct kbase_device *kbdev;
- struct rk_context *platform;
- ssize_t ret = 0;
- unsigned int clkrate;
-
- kbdev = dev_get_drvdata(dev);
-
- if (!kbdev)
- return -ENODEV;
-
- platform = (struct rk_context *)kbdev->platform_context;
- if (!platform)
- return -ENODEV;
-
- clkrate = dvfs_clk_get_rate(platform->mali_clk_node);
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- if (kbase_platform_dvfs_get_enable_status())
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is on\nutilisation:%d\ncurrent clock:%dMhz", kbase_platform_dvfs_get_utilisation(),clkrate/1000000);
- else
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is off,clock:%dMhz",clkrate/1000000);
-#else
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is disabled");
-#endif
-
- if (ret < PAGE_SIZE - 1)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
- else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
-
- return ret;
-}
-
-static ssize_t set_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct kbase_device *kbdev = dev_get_drvdata(dev);
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- struct rk_context *platform;
-#endif
-
- if (!kbdev)
- return -ENODEV;
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- platform = (struct rk_context *)kbdev->platform_context;
- if (sysfs_streq("off", buf)) {
- /*kbase_platform_dvfs_enable(false, MALI_DVFS_BL_CONFIG_FREQ);*/
- kbase_platform_dvfs_enable(false, p_mali_dvfs_infotbl[MALI_DVFS_STEP-1].clock);
- platform->dvfs_enabled = false;
- } else if (sysfs_streq("on", buf)) {
- /*kbase_platform_dvfs_enable(true, MALI_DVFS_START_FREQ);*/
- kbase_platform_dvfs_enable(true, p_mali_dvfs_infotbl[0].clock);
- platform->dvfs_enabled = true;
- } else {
- printk(KERN_DEBUG "invalid val -only [on, off] is accepted\n");
- }
-#else
- printk(KERN_DEBUG "mali DVFS is disabled\n");
-#endif
- return count;
-}
-
-static ssize_t show_upper_lock_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct kbase_device *kbdev;
- ssize_t ret = 0;
- int i;
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- int locked_level = -1;
-#endif
-
- kbdev = dev_get_drvdata(dev);
-
- if (!kbdev)
- return -ENODEV;
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- locked_level = mali_get_dvfs_upper_locked_freq();
- if (locked_level > 0)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current Upper Lock Level = %dMhz", locked_level);
- else
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Unset the Upper Lock Level");
- /*ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings : 400, 350,266, 160, 100, If you want to unlock : 600 or off");*/
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings :");
- for(i=0;i<MALI_DVFS_STEP;i++)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ",p_mali_dvfs_infotbl[i].clock/1000);
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
- ret += snprintf(buf + ret, PAGE_SIZE - ret, ", If you want to unlock : off");
-
-#else
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is disabled. You can not set");
-#endif
-
- if (ret < PAGE_SIZE - 1)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
- else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
-
- return ret;
-}
-
-static ssize_t set_upper_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct kbase_device *kbdev;
- int i;
- unsigned int freq;
- kbdev = dev_get_drvdata(dev);
- freq = 0;
-
- if (!kbdev)
- return -ENODEV;
-
-freq = simple_strtoul(buf, NULL, 10);
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- if (sysfs_streq("off", buf))
- {
- mali_dvfs_freq_unlock();
- }
- else
- {
- for(i=0;i<MALI_DVFS_STEP;i++)
- {
- if (p_mali_dvfs_infotbl[i].clock == freq)
- {
- mali_dvfs_freq_lock(i);
- break;
- }
- if(i==MALI_DVFS_STEP)
- {
- dev_err(dev, "set_clock: invalid value\n");
- return -ENOENT;
- }
- }
- }
-#else /* CONFIG_MALI_MIDGARD_DVFS */
- printk(KERN_DEBUG "mali DVFS is disabled. You can not set\n");
-#endif
-
- return count;
-}
-
-static ssize_t show_under_lock_dvfs(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct kbase_device *kbdev;
- ssize_t ret = 0;
- int i;
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- int locked_level = -1;
-#endif
-
- kbdev = dev_get_drvdata(dev);
-
- if (!kbdev)
- return -ENODEV;
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- locked_level = mali_get_dvfs_under_locked_freq();
- if (locked_level > 0)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Current Under Lock Level = %dMhz", locked_level);
- else
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Unset the Under Lock Level");
- /*ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings : 600, 400, 350,266, 160, If you want to unlock : 100 or off");*/
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\nPossible settings :");
- for(i=0;i<MALI_DVFS_STEP;i++)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "%d ",p_mali_dvfs_infotbl[i].clock/1000);
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "Mhz");
- ret += snprintf(buf + ret, PAGE_SIZE - ret, ", If you want to unlock : off");
-
-#else
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "mali DVFS is disabled. You can not set");
-#endif
-
- if (ret < PAGE_SIZE - 1)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
- else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
-
- return ret;
-}
-
-static ssize_t set_under_lock_dvfs(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- int i;
- unsigned int freq;
- struct kbase_device *kbdev;
- kbdev = dev_get_drvdata(dev);
- freq = 0;
-
- if (!kbdev)
- return -ENODEV;
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- if (sysfs_streq("off", buf))
- {
- mali_dvfs_freq_unlock();
- }
- else
- {
- for(i=0;i<MALI_DVFS_STEP;i++)
- {
- if (p_mali_dvfs_infotbl[i].clock == freq)
- {
- mali_dvfs_freq_lock(i);
- break;
- }
- if(i==MALI_DVFS_STEP)
- {
- dev_err(dev, "set_clock: invalid value\n");
- return -ENOENT;
- }
- }
- }
-#else /* CONFIG_MALI_MIDGARD_DVFS */
- printk(KERN_DEBUG "mali DVFS is disabled. You can not set\n");
-#endif
- return count;
-}
-
-/** The sysfs file @c clock, fbdev.
- *
- * This is used for obtaining information about the mali t6xx operating clock & framebuffer address,
- */
-DEVICE_ATTR(clock, S_IRUGO | S_IWUSR, show_clock, set_clock);
-DEVICE_ATTR(fbdev, S_IRUGO, show_fbdev, NULL);
-DEVICE_ATTR(dtlb, S_IRUGO | S_IWUSR, show_dtlb, set_dtlb);
-DEVICE_ATTR(dvfs, S_IRUGO | S_IWUSR, show_dvfs, set_dvfs);
-DEVICE_ATTR(dvfs_upper_lock, S_IRUGO | S_IWUSR, show_upper_lock_dvfs, set_upper_lock_dvfs);
-DEVICE_ATTR(dvfs_under_lock, S_IRUGO | S_IWUSR, show_under_lock_dvfs, set_under_lock_dvfs);
-DEVICE_ATTR(time_in_state, S_IRUGO | S_IWUSR, show_time_in_state, set_time_in_state);
-
-int kbase_platform_create_sysfs_file(struct device *dev)
-{
- if (device_create_file(dev, &dev_attr_clock)) {
- dev_err(dev, "Couldn't create sysfs file [clock]\n");
- goto out;
- }
-
- if (device_create_file(dev, &dev_attr_fbdev)) {
- dev_err(dev, "Couldn't create sysfs file [fbdev]\n");
- goto out;
- }
-
- if (device_create_file(dev, &dev_attr_dtlb)) {
- dev_err(dev, "Couldn't create sysfs file [dtlb]\n");
- goto out;
- }
-
- if (device_create_file(dev, &dev_attr_dvfs)) {
- dev_err(dev, "Couldn't create sysfs file [dvfs]\n");
- goto out;
- }
-
- if (device_create_file(dev, &dev_attr_dvfs_upper_lock)) {
- dev_err(dev, "Couldn't create sysfs file [dvfs_upper_lock]\n");
- goto out;
- }
-
- if (device_create_file(dev, &dev_attr_dvfs_under_lock)) {
- dev_err(dev, "Couldn't create sysfs file [dvfs_under_lock]\n");
- goto out;
- }
-
- if (device_create_file(dev, &dev_attr_time_in_state)) {
- dev_err(dev, "Couldn't create sysfs file [time_in_state]\n");
- goto out;
- }
- return 0;
- out:
- return -ENOENT;
-}
-
-void kbase_platform_remove_sysfs_file(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_clock);
- device_remove_file(dev, &dev_attr_fbdev);
- device_remove_file(dev, &dev_attr_dtlb);
- device_remove_file(dev, &dev_attr_dvfs);
- device_remove_file(dev, &dev_attr_dvfs_upper_lock);
- device_remove_file(dev, &dev_attr_dvfs_under_lock);
- device_remove_file(dev, &dev_attr_time_in_state);
-}
-#endif /* CONFIG_MALI_MIDGARD_DEBUG_SYS */
-
-mali_error kbase_platform_init(struct kbase_device *kbdev)
-{
- struct rk_context *platform;
- int ret;
-
- platform = kmalloc(sizeof(struct rk_context), GFP_KERNEL);
-
- if (NULL == platform)
- return MALI_ERROR_OUT_OF_MEMORY;
-
- kbdev->platform_context = (void *)platform;
-
- platform->cmu_pmu_status = 0;
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- platform->utilisation = 0;
- platform->time_busy = 0;
- platform->time_idle = 0;
- platform->time_tick = 0;
- platform->dvfs_enabled = true;
-#endif
-
- rk_gpu = kobject_create_and_add("rk_gpu", NULL);
- if (!rk_gpu)
- return MALI_ERROR_FUNCTION_FAILED;
-
- ret = sysfs_create_file(rk_gpu, &dev_attr_error_count.attr);
- if(ret)
- return MALI_ERROR_FUNCTION_FAILED;
-
- spin_lock_init(&platform->cmu_pmu_lock);
-
- if (kbase_platform_power_clock_init(kbdev))
- goto clock_init_fail;
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- kbase_platform_dvfs_init(kbdev);
-#endif /* CONFIG_MALI_MIDGARD_DVFS */
-
- /* Enable power */
- kbase_platform_cmu_pmu_control(kbdev, 1);
- return MALI_ERROR_NONE;
-
- clock_init_fail:
- kfree(platform);
-
- return MALI_ERROR_FUNCTION_FAILED;
-}
-
-void kbase_platform_term(struct kbase_device *kbdev)
-{
- struct rk_context *platform;
-
- platform = (struct rk_context *)kbdev->platform_context;
-
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- kbase_platform_dvfs_term();
-#endif /* CONFIG_MALI_MIDGARD_DVFS */
-
- /* Disable power */
- kbase_platform_cmu_pmu_control(kbdev, 0);
- kfree(kbdev->platform_context);
- kbdev->platform_context = 0;
- return;
-}
+++ /dev/null
-/* drivers/gpu/t6xx/kbase/src/platform/rk/mali_kbase_platform.h
- * Rockchip SoC Mali-T764 platform-dependent codes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software FoundatIon.
- */
-
-/**
- * @file mali_kbase_platform.h
- * Platform-dependent init
- */
-
-#ifndef _KBASE_PLATFORM_H_
-#define _KBASE_PLATFORM_H_
-
-struct rk_context {
- /** Indicator if system clock to mail-t604 is active */
- int cmu_pmu_status;
- /** cmd & pmu lock */
- spinlock_t cmu_pmu_lock;
- struct clk *mali_pd;
- struct dvfs_node * mali_clk_node;
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- /*To calculate utilization for x sec */
- int time_tick;
- int utilisation;
- u32 time_busy;
- u32 time_idle;
- bool dvfs_enabled;
- bool gpu_in_touch;
- spinlock_t gpu_in_touch_lock;
-#endif
-};
-int mali_dvfs_clk_set(struct dvfs_node * node,unsigned long rate);
-
-/* All things that are needed for the Linux port. */
-int kbase_platform_cmu_pmu_control(struct kbase_device *kbdev, int control);
-int kbase_platform_create_sysfs_file(struct device *dev);
-void kbase_platform_remove_sysfs_file(struct device *dev);
-int kbase_platform_is_power_on(void);
-mali_error kbase_platform_init(struct kbase_device *kbdev);
-void kbase_platform_term(struct kbase_device *kbdev);
-
-int kbase_platform_clock_on(struct kbase_device *kbdev);
-int kbase_platform_clock_off(struct kbase_device *kbdev);
-int kbase_platform_power_off(struct kbase_device *kbdev);
-int kbase_platform_power_on(struct kbase_device *kbdev);
-
-#endif /* _KBASE_PLATFORM_H_ */