From 6a805bcf2b5a839e6ae0313047c53f99c16baff6 Mon Sep 17 00:00:00 2001
From: xxm <xxm@rock-chips.com>
Date: Tue, 29 Jul 2014 14:15:25 +0800
Subject: [PATCH] rockchip:midgard:1,update gpu version to r4p1-00rel0  2,add
 input handler when runtime on/off 3,version to 0x09

---
 drivers/gpu/arm/midgard/Kbuild                |   2 +-
 drivers/gpu/arm/midgard/mali_base_hwconfig.h  |   7 +
 drivers/gpu/arm/midgard/mali_base_kernel.h    |  64 +++++-
 .../arm/midgard/mali_kbase_10969_workaround.c |   8 +-
 .../arm/midgard/mali_kbase_config_defaults.h  |   2 +-
 .../gpu/arm/midgard/mali_kbase_core_linux.c   |   7 +-
 drivers/gpu/arm/midgard/mali_kbase_defs.h     |  78 +++++++-
 drivers/gpu/arm/midgard/mali_kbase_jd.c       | 187 +++++++++++-------
 drivers/gpu/arm/midgard/mali_kbase_mem.c      |   6 +
 drivers/gpu/arm/midgard/mali_kbase_mem.h      |   7 +-
 .../gpu/arm/midgard/mali_kbase_mem_linux.c    |  17 +-
 drivers/gpu/arm/midgard/mali_kbase_mmu.c      |   2 +-
 drivers/gpu/arm/midgard/mali_kbase_pm.c       |  13 --
 .../gpu/arm/midgard/mali_kbase_pm_driver.c    |  15 +-
 .../gpu/arm/midgard/mali_kbase_pm_policy.c    |  22 ++-
 drivers/gpu/arm/midgard/mali_kbase_replay.c   |   8 +-
 drivers/gpu/arm/midgard/mali_kbase_softjobs.c |  19 +-
 drivers/gpu/arm/midgard/mali_kbase_uku.h      |  11 +-
 .../platform/rk/mali_kbase_config_rk.c        |  22 ++-
 .../arm/midgard/platform/rk/mali_kbase_dvfs.c |  94 ++++++++-
 .../midgard/platform/rk/mali_kbase_platform.h |   2 +
 21 files changed, 454 insertions(+), 139 deletions(-)

diff --git a/drivers/gpu/arm/midgard/Kbuild b/drivers/gpu/arm/midgard/Kbuild
index 33dc4570bae1..3cf2828da120 100755
--- a/drivers/gpu/arm/midgard/Kbuild
+++ b/drivers/gpu/arm/midgard/Kbuild
@@ -15,7 +15,7 @@
 
 
 # Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r4p1-02dev0"
+MALI_RELEASE_NAME ?= "r4p1-00rel0"
 
 # Paths required for build
 KBASE_PATH = $(src)
diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig.h b/drivers/gpu/arm/midgard/mali_base_hwconfig.h
index f7ee4a7257d1..5f5c945f3f87 100755
--- a/drivers/gpu/arm/midgard/mali_base_hwconfig.h
+++ b/drivers/gpu/arm/midgard/mali_base_hwconfig.h
@@ -276,6 +276,9 @@ typedef enum base_hw_issue {
 	/* Write buffer can cause tile list corruption */
 	BASE_HW_ISSUE_11024,
 
+	/* Pause buffer can cause a fragment job hang */
+	BASE_HW_ISSUE_11035,
+
 	/* T76X hw issues */
 
 	/* Partial 16xMSAA support */
@@ -367,6 +370,7 @@ static const base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
 	BASE_HW_ISSUE_10995,
 	BASE_HW_ISSUE_11012,
 	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11035,
 	/* List of hardware issues must end with BASE_HW_ISSUE_END */
 	BASE_HW_ISSUE_END
 };
@@ -403,6 +407,7 @@ static const base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
 	BASE_HW_ISSUE_10969,
 	BASE_HW_ISSUE_11012,
 	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11035,
 	/* List of hardware issues must end with BASE_HW_ISSUE_END */
 	BASE_HW_ISSUE_END
 };
@@ -436,6 +441,7 @@ static const base_hw_issue base_hw_issues_t60x_r0p1[] = {
 	BASE_HW_ISSUE_10946,
 	BASE_HW_ISSUE_11012,
 	BASE_HW_ISSUE_11020,
+	BASE_HW_ISSUE_11035,
 	/* List of hardware issues must end with BASE_HW_ISSUE_END */
 	BASE_HW_ISSUE_END
 };
@@ -466,6 +472,7 @@ static const base_hw_issue base_hw_issues_t62x_r0p1[] = {
 	BASE_HW_ISSUE_11012,
 	BASE_HW_ISSUE_11020,
 	BASE_HW_ISSUE_11024,
+	BASE_HW_ISSUE_11035,
 	/* List of hardware issues must end with BASE_HW_ISSUE_END */
 	BASE_HW_ISSUE_END
 };
diff --git a/drivers/gpu/arm/midgard/mali_base_kernel.h b/drivers/gpu/arm/midgard/mali_base_kernel.h
index 4aff2bdf270b..53e643116a27 100755
--- a/drivers/gpu/arm/midgard/mali_base_kernel.h
+++ b/drivers/gpu/arm/midgard/mali_base_kernel.h
@@ -315,6 +315,22 @@ struct base_mem_aliasing_info {
 	u64 length;
 };
 
+/**
+ * @brief Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a dependency is a data or 
+ * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without 
+ * changing the structure size).
+ * When the flag is set for a particular dependency to signal that it is an ordering only dependency then 
+ * errors will not be propagated.
+ */
+typedef u8 base_jd_dep_type;
+
+
+#define BASE_JD_DEP_TYPE_INVALID  (0) 	/**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA     (1U << 0) 	/**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER    (1U << 1) 	/**< Order dependency */
+
 /**
  * @brief Job chain hardware requirements.
  *
@@ -542,17 +558,23 @@ typedef struct base_jd_atom {
 
 typedef u8 base_atom_id; /**< Type big enough to store an atom number in */
 
+struct base_dependency {
+	base_atom_id  atom_id;               /**< An atom number */
+	base_jd_dep_type dependency_type;    /**< Dependency type */
+}; 
+
 typedef struct base_jd_atom_v2 {
 	mali_addr64 jc;			    /**< job-chain GPU address */
 	base_jd_udata udata;		    /**< user data */
 	kbase_pointer extres_list;	    /**< list of external resources */
 	u16 nr_extres;			    /**< nr of external resources */
 	base_jd_core_req core_req;	    /**< core requirements */
-	base_atom_id pre_dep[2];	    /**< pre-dependencies */
+	const struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field,
+	this is done in order to reduce possibility of improper assigment of a dependency field */
 	base_atom_id atom_number;	    /**< unique number to identify the atom */
 	s8 prio;			    /**< priority - smaller is higher priority */
 	u8 device_nr;			    /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
-	u8 padding[7];
+	u8 padding[5];
 } base_jd_atom_v2;
 
 #if BASE_LEGACY_JD_API
@@ -626,6 +648,44 @@ static INLINE base_syncset *base_jd_get_atom_syncset(base_jd_atom *atom, u16 n)
 }
 #endif				/* BASE_LEGACY_JD_API */
 
+
+/**
+ * @brief Setter for a dependency structure
+ *
+ * @param[in] dep          The kbase jd atom dependency to be initialized.
+ * @param     id           The atom_id to be assigned.
+ * @param     dep_type     The dep_type to be assigned.
+ *
+ */
+static INLINE void base_jd_atom_dep_set(const struct base_dependency* const_dep, base_atom_id id, base_jd_dep_type dep_type)
+{
+	struct base_dependency* dep;
+	
+	LOCAL_ASSERT(const_dep != NULL);
+	/* make sure we don't set not allowed combinations of atom_id/dependency_type */
+	LOCAL_ASSERT( ( id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) || 
+				(id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID) );
+
+	dep = REINTERPRET_CAST(struct base_dependency*)const_dep;
+
+	dep->atom_id = id;
+	dep->dependency_type = dep_type;
+}
+
+/**
+ * @brief Make a copy of a dependency structure
+ *
+ * @param[in,out] dep          The kbase jd atom dependency to be written.
+ * @param[in]     from         The dependency to make a copy from.
+ *
+ */
+static INLINE void base_jd_atom_dep_copy(const struct base_dependency* const_dep, const struct base_dependency* from)
+{
+	LOCAL_ASSERT(const_dep != NULL);
+
+	base_jd_atom_dep_set(const_dep, from->atom_id, from->dependency_type);
+}
+
 /**
  * @brief Soft-atom fence trigger setup.
  *
diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
index b08143020aa0..6d52dcfcf362 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
@@ -44,9 +44,12 @@ int kbasep_10969_workaround_clamp_coordinates(kbase_jd_atom *katom)
 {
 	struct device *dev = katom->kctx->kbdev->dev;
 	u32   clamped = 0;
-	dev_warn(dev,"Called TILE_RANGE_FAULT workaround clamping function. \n");
+	dev_warn(dev, "Called TILE_RANGE_FAULT workaround clamping function.\n");
 	if (katom->core_req & BASE_JD_REQ_FS){
-		kbase_va_region * region = kbase_region_tracker_find_region_enclosing_address(katom->kctx, katom->jc );
+		kbase_va_region *region;
+
+		kbase_gpu_vm_lock(katom->kctx);
+		region = kbase_region_tracker_find_region_enclosing_address(katom->kctx, katom->jc);
 
 		if (region){
 			phys_addr_t * page_array = kbase_get_phy_pages(region);
@@ -171,6 +174,7 @@ int kbasep_10969_workaround_clamp_coordinates(kbase_jd_atom *katom)
 				kunmap_atomic(page_1);
 			}
 		}
+		kbase_gpu_vm_unlock(katom->kctx);
 	}
 	return clamped;
 }
diff --git a/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
index 88dcff3f2803..30fdb584bacf 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
@@ -164,7 +164,7 @@
 /*** Begin Power Manager defaults */
 
 /* Milliseconds */
-#define DEFAULT_PM_DVFS_FREQ 50
+#define DEFAULT_PM_DVFS_FREQ 20
 
 /**
  * Default poweroff tick granuality, in nanoseconds
diff --git a/drivers/gpu/arm/midgard/mali_kbase_core_linux.c b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
index 926c2fb96bc4..aa23e5f38762 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
@@ -38,7 +38,6 @@
 #include <linux/syscalls.h>
 #endif /* CONFIG_KDS */
 
-#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/poll.h>
@@ -86,7 +85,7 @@ EXPORT_SYMBOL(shared_kernel_test_data);
 #endif /* MALI_UNIT_TEST */
 
 #define KBASE_DRV_NAME "mali"
-#define ROCKCHIP_VERSION 7
+#define ROCKCHIP_VERSION 9
 static const char kbase_drv_name[] = KBASE_DRV_NAME;
 
 static int kbase_dev_nr;
@@ -155,6 +154,7 @@ mali_error kbasep_kds_allocate_resource_list_data(kbase_context *kctx, base_exte
 		return MALI_ERROR_OUT_OF_MEMORY;
 	}
 
+	kbase_gpu_vm_lock(kctx);
 	for (res_id = 0; res_id < num_elems; res_id++, res++) {
 		int exclusive;
 		kbase_va_region *reg;
@@ -189,6 +189,7 @@ mali_error kbasep_kds_allocate_resource_list_data(kbase_context *kctx, base_exte
 		if (exclusive)
 			set_bit(res_id, resources_list->kds_access_bitmap);
 	}
+	kbase_gpu_vm_unlock(kctx);
 
 	/* did the loop run to completion? */
 	if (res_id == num_elems)
@@ -924,8 +925,6 @@ static int kbase_release(struct inode *inode, struct file *filp)
 	kbasep_kctx_list_element *element, *tmp;
 	mali_bool found_element = MALI_FALSE;
 	
-	msleep(500);
-
 	mutex_lock(&kbdev->kctx_list_lock);
 	list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
 		if (element->kctx == kctx) {
diff --git a/drivers/gpu/arm/midgard/mali_kbase_defs.h b/drivers/gpu/arm/midgard/mali_kbase_defs.h
index 52300d20d51d..ebf28dca7386 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_defs.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_defs.h
@@ -217,6 +217,82 @@ typedef enum {
 
 typedef struct kbase_jd_atom kbase_jd_atom;
 
+struct kbase_jd_atom_dependency
+{
+	struct kbase_jd_atom *atom;
+	u8 dep_type;
+};
+
+/**
+ * @brief The function retrieves a read-only reference to the atom field from 
+ * the  kbase_jd_atom_dependency structure
+ *
+ * @param[in] dep kbase jd atom dependency.
+ *
+ * @return readonly reference to dependent ATOM.
+ */
+static INLINE const struct kbase_jd_atom* const kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency* dep)
+{
+	LOCAL_ASSERT(dep != NULL);
+	
+	return (const struct kbase_jd_atom* const )(dep->atom);
+}
+ 
+/**
+ * @brief The function retrieves a read-only reference to the dependency type field from 
+ * the  kbase_jd_atom_dependency structure
+ *
+ * @param[in] dep kbase jd atom dependency.
+ *
+ * @return A dependency type value.
+ */
+static INLINE const u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency* dep)
+{
+	LOCAL_ASSERT(dep != NULL);
+
+	return dep->dep_type;
+}
+
+/**
+ * @brief Setter macro for dep_atom array entry in kbase_jd_atom
+ *
+ * @param[in] dep    The kbase jd atom dependency.
+ * @param[in] a      The ATOM to be set as a dependency.
+ * @param     type   The ATOM dependency type to be set.
+ *
+ */
+static INLINE void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency* const_dep, 
+	struct kbase_jd_atom * a,
+	u8 type)
+{
+	struct kbase_jd_atom_dependency* dep;
+	
+	LOCAL_ASSERT(const_dep != NULL);
+
+	dep = (REINTERPRET_CAST(struct kbase_jd_atom_dependency* )const_dep);
+
+	dep->atom = a;
+	dep->dep_type = type; 
+}
+
+/**
+ * @brief Setter macro for dep_atom array entry in kbase_jd_atom
+ *
+ * @param[in] dep    The kbase jd atom dependency to be cleared.
+ *
+ */
+static INLINE void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency* const_dep)
+{
+	struct kbase_jd_atom_dependency* dep;
+
+	LOCAL_ASSERT(const_dep != NULL);
+
+	dep = (REINTERPRET_CAST(struct kbase_jd_atom_dependency* )const_dep);
+
+	dep->atom = NULL;
+	dep->dep_type = BASE_JD_DEP_TYPE_INVALID; 
+}
+
 struct kbase_ext_res
 {
 	mali_addr64 gpu_address;
@@ -232,7 +308,7 @@ struct kbase_jd_atom {
 
 	struct list_head dep_head[2];
 	struct list_head dep_item[2];
-	struct kbase_jd_atom *dep_atom[2];
+	const struct kbase_jd_atom_dependency dep[2];
 
 	u16 nr_extres;
 	struct kbase_ext_res * extres;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd.c b/drivers/gpu/arm/midgard/mali_kbase_jd.c
index 4a7d8ab4852e..f5ddcf4219a7 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_jd.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd.c
@@ -31,7 +31,7 @@
 #include <linux/ump.h>
 #endif				/* CONFIG_UMP */
 #include <linux/random.h>
-#include <linux/delay.h>
+
 #define beenthere(kctx,f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
@@ -151,7 +151,7 @@ static void kds_dep_clear(void *callback_parameter, void *callback_extra_paramet
 	katom->kds_dep_satisfied = MALI_TRUE;
 
 	/* Check whether the atom's other dependencies were already met */
-	if (!katom->dep_atom[0] && !katom->dep_atom[1]) {
+	if (!kbase_jd_katom_dep_atom(&katom->dep[0]) && !kbase_jd_katom_dep_atom(&katom->dep[1])) {
 		/* katom dep complete, attempt to run it */
 		mali_bool resched = MALI_FALSE;
 		resched = jd_run_atom(katom);
@@ -248,17 +248,16 @@ out:
 	return err;
 }
 
-static void kbase_jd_umm_unmap(kbase_context *kctx, struct kbase_va_region *reg, int mmu_update)
+static void kbase_jd_umm_unmap(kbase_context *kctx, struct kbase_mem_phy_alloc *alloc)
 {
 	KBASE_DEBUG_ASSERT(kctx);
-	KBASE_DEBUG_ASSERT(reg);
-	KBASE_DEBUG_ASSERT(reg->alloc->imported.umm.dma_attachment);
-	KBASE_DEBUG_ASSERT(reg->alloc->imported.umm.sgt);
-	if (mmu_update)
-		kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg));
-	dma_buf_unmap_attachment(reg->alloc->imported.umm.dma_attachment, reg->alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
-	reg->alloc->imported.umm.sgt = NULL;
-	reg->alloc->nents = 0;
+	KBASE_DEBUG_ASSERT(alloc);
+	KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
+	KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
+	dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+			alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+	alloc->imported.umm.sgt = NULL;
+	alloc->nents = 0;
 }
 #endif				/* CONFIG_DMA_SHARED_BUFFER */
 
@@ -294,24 +293,29 @@ static void kbase_jd_post_external_resources(kbase_jd_atom *katom)
 		katom->kds_dep_satisfied = MALI_TRUE;
 #endif				/* CONFIG_KDS */
 
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
-	/* Lock also used in debug mode just for lock order checking */
 	kbase_gpu_vm_lock(katom->kctx);
-#endif				/* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
 	/* only roll back if extres is non-NULL */
 	if (katom->extres) {
 		u32 res_no;
 		res_no = katom->nr_extres;
 		while (res_no-- > 0) {
+			struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
 #ifdef CONFIG_DMA_SHARED_BUFFER
-			if (katom->extres[res_no].alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
-				kbase_va_region *reg;
-				int mmu_update = 0;
-				reg = kbase_region_tracker_find_region_base_address(katom->kctx, katom->extres[res_no].gpu_address);
-				if (reg && reg->alloc == katom->extres[res_no].alloc)
-					mmu_update = 1;
-				if (1 == katom->extres[res_no].alloc->imported.umm.current_mapping_usage_count--)
-					kbase_jd_umm_unmap(katom->kctx, reg, mmu_update);
+			if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+				alloc->imported.umm.current_mapping_usage_count--;
+
+				if (0 == alloc->imported.umm.current_mapping_usage_count) {
+					struct kbase_va_region *reg;
+					reg = kbase_region_tracker_find_region_base_address(
+					          katom->kctx, katom->extres[res_no].gpu_address);
+
+					if (reg && reg->alloc == alloc) {
+						kbase_mmu_teardown_pages(katom->kctx, reg->start_pfn,
+						    kbase_reg_current_backed_size(reg));
+					}
+
+					kbase_jd_umm_unmap(katom->kctx, alloc);
+				}
 			}
 #endif	/* CONFIG_DMA_SHARED_BUFFER */
 			kbase_mem_phy_alloc_put(katom->extres[res_no].alloc);
@@ -319,10 +323,7 @@ static void kbase_jd_post_external_resources(kbase_jd_atom *katom)
 		kfree(katom->extres);
 		katom->extres = NULL;
 	}
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
-	/* Lock also used in debug mode just for lock order checking */
 	kbase_gpu_vm_unlock(katom->kctx);
-#endif				/* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
 }
 
 #if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS)
@@ -402,12 +403,8 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
 	}
 #endif				/* CONFIG_KDS */
 
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
 	/* need to keep the GPU VM locked while we set up UMM buffers */
-	/* Lock also used in debug mode just for lock order checking */
 	kbase_gpu_vm_lock(katom->kctx);
-#endif				/* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
-
 	for (res_no = 0; res_no < katom->nr_extres; res_no++) {
 		base_external_resource *res;
 		kbase_va_region *reg;
@@ -472,11 +469,8 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
 		katom->extres[res_no].alloc = kbase_mem_phy_alloc_get(reg->alloc);
 	}
 	/* successfully parsed the extres array */
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
 	/* drop the vm lock before we call into kds */
-	/* Lock also used in debug mode just for lock order checking */
 	kbase_gpu_vm_unlock(katom->kctx);
-#endif				/* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
 
 #ifdef CONFIG_KDS
 	if (kds_res_count) {
@@ -512,34 +506,35 @@ static mali_error kbase_jd_pre_external_resources(kbase_jd_atom *katom, const ba
 #ifdef CONFIG_KDS
  failed_kds_setup:
 
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
 	/* lock before we unmap */
-	/* Lock also used in debug mode just for lock order checking */
 	kbase_gpu_vm_lock(katom->kctx);
-#endif				/* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
 #endif				/* CONFIG_KDS */
 
  failed_loop:
 	/* undo the loop work */
 	while (res_no-- > 0) {
+		struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
 #ifdef CONFIG_DMA_SHARED_BUFFER
-		if (katom->extres[res_no].alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
-			struct kbase_va_region * reg;
-			int mmu_update = 0;
-			reg = kbase_region_tracker_find_region_base_address(katom->kctx, katom->extres[res_no].gpu_address);
-			if (reg && reg->alloc == katom->extres[res_no].alloc && reg->alloc->type)
-				mmu_update = 1;
-			katom->extres[res_no].alloc->imported.umm.current_mapping_usage_count--;
-			if (0 == reg->alloc->imported.umm.current_mapping_usage_count)
-				kbase_jd_umm_unmap(katom->kctx, reg, mmu_update);
+		if (alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+			alloc->imported.umm.current_mapping_usage_count--;
+
+			if (0 == alloc->imported.umm.current_mapping_usage_count) {
+				struct kbase_va_region *reg;
+				reg = kbase_region_tracker_find_region_base_address(
+				          katom->kctx, katom->extres[res_no].gpu_address);
+
+				if (reg && reg->alloc == alloc) {
+					kbase_mmu_teardown_pages(katom->kctx, reg->start_pfn,
+					    kbase_reg_current_backed_size(reg));
+				}
+
+				kbase_jd_umm_unmap(katom->kctx, alloc);
+			}
 		}
 #endif				/* CONFIG_DMA_SHARED_BUFFER */
-		kbase_mem_phy_alloc_put(katom->extres[res_no].alloc);
+		kbase_mem_phy_alloc_put(alloc);
 	}
-#if defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG)
-	/* Lock also used in debug mode just for lock order checking */
 	kbase_gpu_vm_unlock(katom->kctx);
-#endif				/* defined(CONFIG_DMA_SHARED_BUFFER) || defined(CONFIG_MALI_DEBUG) */
 
  early_err_out:
 	kfree(katom->extres);
@@ -559,12 +554,13 @@ STATIC INLINE void jd_resolve_dep(struct list_head *out_list, kbase_jd_atom *kat
 		kbase_jd_atom *dep_atom = list_entry(katom->dep_head[d].next, kbase_jd_atom, dep_item[d]);
 		list_del(katom->dep_head[d].next);
 
-		dep_atom->dep_atom[d] = NULL;
+		kbase_jd_katom_dep_clear(&dep_atom->dep[d]);
+		
 		if (katom->event_code != BASE_JD_EVENT_DONE) {
 			/* Atom failed, so remove the other dependencies and immediately fail the atom */
-			if (dep_atom->dep_atom[other_d]) {
+			if (kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
 				list_del(&dep_atom->dep_item[other_d]);
-				dep_atom->dep_atom[other_d] = NULL;
+				kbase_jd_katom_dep_clear(&dep_atom->dep[other_d]);
 			}
 #ifdef CONFIG_KDS
 			if (!dep_atom->kds_dep_satisfied) {
@@ -575,12 +571,17 @@ STATIC INLINE void jd_resolve_dep(struct list_head *out_list, kbase_jd_atom *kat
 			}
 #endif
 
-			dep_atom->event_code = katom->event_code;
-			KBASE_DEBUG_ASSERT(dep_atom->status != KBASE_JD_ATOM_STATE_UNUSED);
-			dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+			/* at this point a dependency to the failed job is already removed */
+			if ( !( kbase_jd_katom_dep_type(&dep_atom->dep[d]) == BASE_JD_DEP_TYPE_ORDER &&
+					katom->event_code > BASE_JD_EVENT_ACTIVE) )
+			{
+				dep_atom->event_code = katom->event_code;
+				KBASE_DEBUG_ASSERT(dep_atom->status != KBASE_JD_ATOM_STATE_UNUSED);
+				dep_atom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+			}
 
 			list_add_tail(&dep_atom->dep_item[0], out_list);
-		} else if (!dep_atom->dep_atom[other_d]) {
+		} else if (!kbase_jd_katom_dep_atom(&dep_atom->dep[other_d])) {
 #ifdef CONFIG_KDS
 			if (dep_atom->kds_dep_satisfied)
 #endif
@@ -621,8 +622,8 @@ static void jd_check_force_failure(kbase_jd_atom *katom)
 	    (katom->core_req & BASEP_JD_REQ_EVENT_NEVER))
 		return;
 	for (i = 1; i < BASE_JD_ATOM_COUNT; i++) {
-		if (kctx->jctx.atoms[i].dep_atom[0] == katom ||
-		    kctx->jctx.atoms[i].dep_atom[1] == katom) {
+		if (kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[0]) == katom ||
+		    kbase_jd_katom_dep_atom(&kctx->jctx.atoms[i].dep[1]) == katom) {
 			kbase_jd_atom *dep_atom = &kctx->jctx.atoms[i];
 
 			if ((dep_atom->core_req & BASEP_JD_REQ_ATOM_TYPE) ==
@@ -680,9 +681,9 @@ mali_bool jd_done_nolock(kbase_jd_atom *katom)
 	/* This is needed in case an atom is failed due to being invalid, this
 	 * can happen *before* the jobs that the atom depends on have completed */
 	for (i = 0; i < 2; i++) {
-		if (katom->dep_atom[i]) {
+		if ( kbase_jd_katom_dep_atom(&katom->dep[i])) {
 			list_del(&katom->dep_item[i]);
-			katom->dep_atom[i] = NULL;
+			kbase_jd_katom_dep_clear(&katom->dep[i]);
 		}
 	}
 
@@ -842,6 +843,8 @@ mali_bool jd_submit_atom(kbase_context *kctx,
 	katom->nice_prio = user_atom->prio;
 	katom->atom_flags = 0;
 	katom->retry_count = 0;
+
+	
 #ifdef CONFIG_KDS
 	/* Start by assuming that the KDS dependencies are satisfied,
 	 * kbase_jd_pre_external_resources will correct this if there are dependencies */
@@ -849,20 +852,52 @@ mali_bool jd_submit_atom(kbase_context *kctx,
 	katom->kds_rset = NULL;
 #endif				/* CONFIG_KDS */
 
+
+	/* Don't do anything if there is a mess up with dependencies.
+	   This is done in a separate cycle to check both the dependencies at ones, otherwise 
+	   it will be extra complexity to deal with 1st dependency ( just added to the list )
+	   if only the 2nd one has invalid config.
+	 */
+	for (i = 0; i < 2; i++) {
+		int dep_atom_number = user_atom->pre_dep[i].atom_id;
+		base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
+
+		if (dep_atom_number) {
+			if ( dep_atom_type != BASE_JD_DEP_TYPE_ORDER && dep_atom_type != BASE_JD_DEP_TYPE_DATA )
+			{
+				katom->event_code = BASE_JD_EVENT_JOB_CONFIG_FAULT;
+				katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
+				ret = jd_done_nolock(katom);
+				goto out;
+			}
+		}
+	}
+	
 	/* Add dependencies */
 	for (i = 0; i < 2; i++) {
-		int dep_atom_number = user_atom->pre_dep[i];
-		katom->dep_atom[i] = NULL;
+		int dep_atom_number = user_atom->pre_dep[i].atom_id;
+		base_jd_dep_type dep_atom_type = user_atom->pre_dep[i].dependency_type;
+
+		kbase_jd_katom_dep_clear(&katom->dep[i]);
+
 		if (dep_atom_number) {
 			kbase_jd_atom *dep_atom = &jctx->atoms[dep_atom_number];
 
 			if (dep_atom->status == KBASE_JD_ATOM_STATE_UNUSED || dep_atom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
 				if (dep_atom->event_code != BASE_JD_EVENT_DONE) {
-					if (i == 1 && katom->dep_atom[0]) {
+					/* don't stop this atom if it has an order dependency only to the failed one,
+					 try to submit it throught the normal path */
+					if ( dep_atom_type == BASE_JD_DEP_TYPE_ORDER &&
+							dep_atom->event_code > BASE_JD_EVENT_ACTIVE) {
+						continue;
+					}
+
+					if (i == 1 && kbase_jd_katom_dep_atom(&katom->dep[0])) {
 						/* Remove the previous dependency */
 						list_del(&katom->dep_item[0]);
-						katom->dep_atom[0] = NULL;
+						kbase_jd_katom_dep_clear(&katom->dep[0]);
 					}
+					
 					/* Atom has completed, propagate the error code if any */
 					katom->event_code = dep_atom->event_code;
 					katom->status = KBASE_JD_ATOM_STATE_QUEUED;
@@ -881,12 +916,13 @@ mali_bool jd_submit_atom(kbase_context *kctx,
 						}
 					}					
 					ret = jd_done_nolock(katom);
+					
 					goto out;
 				}
 			} else {
 				/* Atom is in progress, add this atom to the list */
 				list_add_tail(&katom->dep_item[i], &dep_atom->dep_head[i]);
-				katom->dep_atom[i] = dep_atom;
+				kbase_jd_katom_dep_set(&katom->dep[i], dep_atom, dep_atom_type);
 				queued = 1;
 			}
 		}
@@ -1449,7 +1485,9 @@ static enum hrtimer_restart zap_timeout_callback(struct hrtimer *timer)
 void kbase_jd_zap_context(kbase_context *kctx)
 {
 	kbase_jd_atom *katom;
+	#if 0
 	struct list_head *entry,*entry1;
+	#endif
 	kbase_device *kbdev;
 	zap_reset_data reset_data;
 	unsigned long flags;
@@ -1457,18 +1495,30 @@ void kbase_jd_zap_context(kbase_context *kctx)
 	KBASE_DEBUG_ASSERT(kctx);
 
 	kbdev = kctx->kbdev;
-	
+
 	KBASE_TRACE_ADD(kbdev, JD_ZAP_CONTEXT, kctx, NULL, 0u, 0u);
 	kbase_job_zap_context(kctx);
 
 	mutex_lock(&kctx->jctx.lock);
+
 	/*
 	 * While holding the kbase_jd_context lock clean up jobs which are known to kbase but are
 	 * queued outside the job scheduler.
 	 */
-	/*
-	pr_info("%p,%p,%p\n",&kctx->waiting_soft_jobs,kctx->waiting_soft_jobs.next,kctx->waiting_soft_jobs.prev);
-	*/
+	
+	pr_info("%p,%p,%p\n",
+			&kctx->waiting_soft_jobs,
+			kctx->waiting_soft_jobs.next,
+			kctx->waiting_soft_jobs.prev);
+	
+	while (!list_empty(&kctx->waiting_soft_jobs)) {
+		katom = list_first_entry(&kctx->waiting_soft_jobs,
+								 struct kbase_jd_atom,
+								 dep_item[0]);
+		list_del(&katom->dep_item[0]);
+		kbase_cancel_soft_job(katom);
+	}
+	#if 0
 	list_for_each_safe(entry, entry1, &kctx->waiting_soft_jobs) {
 		if(entry == (struct list_head *)LIST_POISON1)
 			pr_err("@get to the end of a list, error happened in list somewhere@\n");
@@ -1476,6 +1526,7 @@ void kbase_jd_zap_context(kbase_context *kctx)
 			pr_info("katom = %p,&katom->dep_item[0] = %p\n",katom,&katom->dep_item[0]);
 		kbase_cancel_soft_job(katom);
 	}
+	#endif
 	/* kctx->waiting_soft_jobs is not valid after this point */
 
 #ifdef CONFIG_KDS
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.c b/drivers/gpu/arm/midgard/mali_kbase_mem.c
index c913c14708d1..f1f44769c9b9 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_mem.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.c
@@ -137,6 +137,8 @@ kbase_va_region *kbase_region_tracker_find_region_enclosing_address(kbase_contex
 
 	KBASE_DEBUG_ASSERT(NULL != kctx);
 
+	lockdep_assert_held(&kctx->reg_lock);
+
 	rbnode = kctx->reg_rbtree.rb_node;
 	while (rbnode) {
 		u64 tmp_start_pfn, tmp_end_pfn;
@@ -168,6 +170,8 @@ kbase_va_region *kbase_region_tracker_find_region_base_address(kbase_context *kc
 
 	KBASE_DEBUG_ASSERT(NULL != kctx);
 
+	lockdep_assert_held(&kctx->reg_lock);
+
 	rbnode = kctx->reg_rbtree.rb_node;
 	while (rbnode) {
 		reg = rb_entry(rbnode, struct kbase_va_region, rblink);
@@ -348,6 +352,8 @@ mali_error kbase_add_va_region(kbase_context *kctx, struct kbase_va_region *reg,
 	KBASE_DEBUG_ASSERT(NULL != kctx);
 	KBASE_DEBUG_ASSERT(NULL != reg);
 
+	lockdep_assert_held(&kctx->reg_lock);
+
 	if (!align)
 		align = 1;
 
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.h b/drivers/gpu/arm/midgard/mali_kbase_mem.h
index 8cbde3210a45..7baa6f87af7f 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_mem.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.h
@@ -278,15 +278,14 @@ static INLINE size_t kbase_reg_current_backed_size(struct kbase_va_region * reg)
 
 static INLINE struct kbase_mem_phy_alloc * kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
 {
-	struct kbase_mem_phy_alloc * alloc;
-	const size_t extra_pages = (sizeof(*alloc) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+	struct kbase_mem_phy_alloc *alloc;
 
 	/* Prevent nr_pages*sizeof + sizeof(*alloc) from wrapping around. */
-	if (nr_pages > (((size_t) -1 / sizeof(*alloc->pages))) - extra_pages)
+	if (nr_pages > ((((size_t) -1) - sizeof(*alloc)) / sizeof(*alloc->pages)))
 		return ERR_PTR(-ENOMEM);
 
 	alloc = vzalloc(sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages);
-	if (!alloc) 
+	if (!alloc)
 		return ERR_PTR(-ENOMEM);
 
 	kref_init(&alloc->kref);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
index 59bafcabf724..088fd559fcb5 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
@@ -884,7 +884,7 @@ int kbase_mem_commit(kbase_context * kctx, mali_addr64 gpu_addr, u64 new_pages,
 						(first_bad << PAGE_SHIFT),
 						mapping->vm_end);
 				WARN(zap_res,
-				     "Failed to zap VA range (0x%lx -0x%lx);\n",
+				     "Failed to zap VA range (0x%lx - 0x%lx);\n",
 				     mapping->vm_start +
 				     (first_bad << PAGE_SHIFT),
 				     mapping->vm_end
@@ -1002,7 +1002,7 @@ static const struct vm_operations_struct kbase_vm_ops = {
 	.fault = kbase_cpu_vm_fault
 };
 
-static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, int free_on_close)
+static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close)
 {
 	struct kbase_cpu_mapping *map;
 	u64 start_off = vma->vm_pgoff - reg->start_pfn;
@@ -1077,8 +1077,13 @@ static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vm
 	map->page_off = start_off;
 	map->region = free_on_close ? reg : NULL;
 	map->kctx = reg->kctx;
-	map->vm_start = vma->vm_start;
-	map->vm_end = vma->vm_end;
+	map->vm_start = vma->vm_start + aligned_offset;
+	if (aligned_offset) {
+		KBASE_DEBUG_ASSERT(!start_off);
+		map->vm_end = map->vm_start + (reg->nr_pages << PAGE_SHIFT);
+	} else {
+		map->vm_end = vma->vm_end;
+	}
 	map->alloc = kbase_mem_phy_alloc_get(reg->alloc);
 	map->count = 1; /* start with one ref */
 
@@ -1250,6 +1255,7 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
 	int err = 0;
 	int free_on_close = 0;
 	struct device *dev = kctx->kbdev->dev;
+	size_t aligned_offset = 0;
 
 	dev_dbg(dev, "kbase_mmap\n");
 	nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -1316,7 +1322,6 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
 		gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
 		reg = kctx->pending_regions[cookie];
 		if (NULL != reg) {
-			size_t aligned_offset = 0;
 
 			if (reg->flags & KBASE_REG_ALIGNED) {
 				/* nr_pages must be able to hold alignment pages
@@ -1431,7 +1436,7 @@ overflow:
 	} /* default */
 	} /* switch */
 map:
-	err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, free_on_close);
+	err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, aligned_offset, free_on_close);
 
 	if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
 		/* MMU dump - userspace should now have a reference on
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu.c b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
index 27fd4f29b315..17e4e08b68ff 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_mmu.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
@@ -213,8 +213,8 @@ static void page_fault_worker(struct work_struct *data)
 		if (MALI_ERROR_NONE != err) {
 			/* failed to insert pages, handle as a normal PF */
 			mutex_unlock(&faulting_as->transaction_mutex);
-			kbase_gpu_vm_unlock(kctx);
 			kbase_free_phy_pages_helper(region->alloc, new_pages);
+			kbase_gpu_vm_unlock(kctx);
 			/* The locked VA region will be unlocked and the cache invalidated in here */
 			kbase_mmu_report_fault_and_kill(kctx, faulting_as);
 			goto fault_done;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.c b/drivers/gpu/arm/midgard/mali_kbase_pm.c
index a0cd00b251b4..3fc671017b0e 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_pm.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm.c
@@ -401,13 +401,6 @@ void kbase_pm_suspend(struct kbase_device *kbdev)
 	 * reaches zero. */
 	wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
 
-	/* Suspend PM Metric timer on system suspend.
-	 * It is ok if kbase_pm_context_idle() is still running, it is safe
-	 * to still complete the last active time period - the pm stats will
-	 * get reset on resume anyway.
-	 */
-	kbasep_pm_metrics_term(kbdev);
-
 	/* NOTE: We synchronize with anything that was just finishing a
 	 * kbase_pm_context_idle() call by locking the pm.lock below */
 
@@ -427,14 +420,8 @@ void kbase_pm_resume(struct kbase_device *kbdev)
 	/* MUST happen before any pm_context_active calls occur */
 	mutex_lock(&kbdev->pm.lock);
 	kbdev->pm.suspending = MALI_FALSE;
-
 	kbase_pm_do_poweron(kbdev, MALI_TRUE);
-
 	mutex_unlock(&kbdev->pm.lock);
-	
-	/* Restart PM Metric timer on resume */
-	kbasep_pm_metrics_init(kbdev);
-	kbasep_pm_record_gpu_idle(kbdev);
 
 	/* Initial active call, to power on the GPU/cores if needed */
 	kbase_pm_context_active(kbdev);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm_driver.c b/drivers/gpu/arm/midgard/mali_kbase_pm_driver.c
index eb752e0323c3..d8c9bd8fe285 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_pm_driver.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm_driver.c
@@ -651,8 +651,12 @@ void kbase_pm_clock_on(kbase_device *kbdev, mali_bool is_resume)
 	if (is_resume && kbdev->pm.callback_power_resume) {
 		kbdev->pm.callback_power_resume(kbdev);
 	} else if (kbdev->pm.callback_power_on) {
-		if (kbdev->pm.callback_power_on(kbdev))
-			reset_required = MALI_TRUE;
+		kbdev->pm.callback_power_on(kbdev);
+		/* If your platform properly keeps the GPU state you may use the return
+		 * value of the callback_power_on function to conditionally reset the
+		 * GPU on power up. Currently we are conservative and always reset the
+		 * GPU. */
+		reset_required = MALI_TRUE;
 	}
 
 	spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
@@ -750,8 +754,11 @@ static void kbase_pm_hw_issues(kbase_device *kbdev)
 	u32 value = 0;
 	u32 config_value;
 
-	/* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443. */
-	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443))
+	/* Needed due to MIDBASE-1494: LS_PAUSEBUFFER_DISABLE. See PRLAM-8443.
+	 * and
+	 * needed due to MIDGLES-3539. See PRLAM-11035 */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8443) ||
+			kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11035))
 		value |= SC_LS_PAUSEBUFFER_DISABLE;
 
 	/* Needed due to MIDBASE-2054: SDC_DISABLE_OQ_DISCARD. See PRLAM-10327. */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm_policy.c b/drivers/gpu/arm/midgard/mali_kbase_pm_policy.c
index 7ac2d86e8009..7e43512181bd 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_pm_policy.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm_policy.c
@@ -375,17 +375,18 @@ void kbase_pm_update_cores_state_nolock(kbase_device *kbdev)
 	/* Are any cores being powered on? */
 	if (~kbdev->pm.desired_shader_state & desired_bitmap ||
 	    kbdev->pm.ca_in_transition != MALI_FALSE) {
+
+		/* Check if we are powering off any cores before updating shader state */
+		if (kbdev->pm.desired_shader_state & ~desired_bitmap) {
+			/* Start timer to power off cores */
+			kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap);
+			kbdev->pm.shader_poweroff_pending_time = kbdev->pm.poweroff_shader_ticks;
+		}
+
 		kbdev->pm.desired_shader_state = desired_bitmap;
 
 		/* If any cores are being powered on, transition immediately */
 		cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
-
-		/* Ensure timer does not power off wanted cores */
-		if (kbdev->pm.shader_poweroff_pending != 0) {
-			kbdev->pm.shader_poweroff_pending &= ~kbdev->pm.desired_shader_state;
-			if (kbdev->pm.shader_poweroff_pending == 0)
-				kbdev->pm.shader_poweroff_pending_time = 0;
-		}
 	} else if (kbdev->pm.desired_shader_state & ~desired_bitmap) {
 		/* Start timer to power off cores */
 		kbdev->pm.shader_poweroff_pending |= (kbdev->pm.desired_shader_state & ~desired_bitmap);
@@ -397,6 +398,13 @@ void kbase_pm_update_cores_state_nolock(kbase_device *kbdev)
 		hrtimer_cancel(&kbdev->pm.gpu_poweroff_timer);
 	}
 
+	/* Ensure timer does not power off wanted cores and make sure to power off unwanted cores */
+	if (kbdev->pm.shader_poweroff_pending != 0) {
+		kbdev->pm.shader_poweroff_pending &= ~(kbdev->pm.desired_shader_state & desired_bitmap);
+		if (kbdev->pm.shader_poweroff_pending == 0)
+			kbdev->pm.shader_poweroff_pending_time = 0;
+	}
+
 	/* Don't need 'cores_are_available', because we don't return anything */
 	CSTD_UNUSED(cores_are_available);
 }
diff --git a/drivers/gpu/arm/midgard/mali_kbase_replay.c b/drivers/gpu/arm/midgard/mali_kbase_replay.c
index 0e9ed621f59d..1f4ac3cf2395 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_replay.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_replay.c
@@ -681,7 +681,7 @@ static void kbasep_replay_reset_softjob(kbase_jd_atom *katom,
 						       kbase_jd_atom *dep_atom)
 {
 	katom->status = KBASE_JD_ATOM_STATE_QUEUED;
-	katom->dep_atom[0] = dep_atom;
+	kbase_jd_katom_dep_set(&katom->dep[0],dep_atom, BASE_JD_DEP_TYPE_DATA);
 	list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
 }
 
@@ -752,8 +752,8 @@ static void kbasep_replay_create_atom(kbase_context *kctx,
 	atom->prio = ((prio << 16) / ((20 << 16) / 128)) - 128;
 	atom->atom_number = atom_nr;
 
-	atom->pre_dep[0] = 0;
-	atom->pre_dep[1] = 0;
+	base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID);
+	base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID);
 
 	atom->udata.blob[0] = 0;
 	atom->udata.blob[1] = 0;
@@ -801,7 +801,7 @@ static mali_error kbasep_replay_create_atoms(kbase_context *kctx,
 	kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
 	kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
 
-	f_atom->pre_dep[0] = t_atom_nr;
+	base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA);
 
 	return MALI_ERROR_NONE;
 }
diff --git a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
index 82ee7c4139ee..0324f9d502a9 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
@@ -39,7 +39,7 @@
 static int kbase_dump_cpu_gpu_time(kbase_jd_atom *katom)
 {
 	kbase_va_region *reg;
-	phys_addr_t addr;
+	phys_addr_t addr = 0;
 	u64 pfn;
 	u32 offset;
 	char *page;
@@ -111,19 +111,14 @@ static int kbase_dump_cpu_gpu_time(kbase_jd_atom *katom)
 		return 0;
 	}
 
+	kbase_gpu_vm_lock(kctx);
 	reg = kbase_region_tracker_find_region_enclosing_address(kctx, jc);
-	if (!reg)
-		return 0;
-
-	if (!(reg->flags & KBASE_REG_GPU_WR)) {
-		/* Region is not writable by GPU so we won't write to it either */
-		return 0;
-	}
-
-	if (!reg->alloc->pages)
-		return 0;
+	if (reg &&
+	    (reg->flags & KBASE_REG_GPU_WR) &&
+	    reg->alloc && reg->alloc->pages)
+		addr = reg->alloc->pages[pfn - reg->start_pfn];
 
-	addr = reg->alloc->pages[pfn - reg->start_pfn];
+	kbase_gpu_vm_unlock(kctx);
 	if (!addr)
 		return 0;
 
diff --git a/drivers/gpu/arm/midgard/mali_kbase_uku.h b/drivers/gpu/arm/midgard/mali_kbase_uku.h
index 84ab305edf35..f1dd105d8be0 100755
--- a/drivers/gpu/arm/midgard/mali_kbase_uku.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_uku.h
@@ -46,7 +46,7 @@
 
 #include "mali_kbase_gpuprops_types.h"
 
-#define BASE_UK_VERSION_MAJOR 6
+#define BASE_UK_VERSION_MAJOR 7
 #define BASE_UK_VERSION_MINOR 0
 
 typedef struct kbase_uk_mem_alloc {
@@ -300,9 +300,7 @@ typedef enum kbase_uk_function_id {
 	KBASE_FUNC_MEM_FLAGS_CHANGE,
 	KBASE_FUNC_MEM_ALIAS,
 
-	KBASE_FUNC_JOB_SUBMIT,
-
-	KBASE_FUNC_SYNC,
+	KBASE_FUNC_SYNC  = (UK_FUNC_ID + 8),
 
 	KBASE_FUNC_POST_TERM,
 
@@ -328,9 +326,12 @@ typedef enum kbase_uk_function_id {
 	KBASE_FUNC_FENCE_VALIDATE,
 	KBASE_FUNC_STREAM_CREATE,
 	KBASE_FUNC_GET_PROFILING_CONTROLS,
-	KBASE_FUNC_SET_PROFILING_CONTROLS /* to be used only for testing
+	KBASE_FUNC_SET_PROFILING_CONTROLS, /* to be used only for testing
 					   * purposes, otherwise these controls
 					   * are set through gator API */
+	KBASE_FUNC_JOB_SUBMIT = (UK_FUNC_ID + 27)
+
 } kbase_uk_function_id;
 
+
 #endif				/* _KBASE_UKU_H_ */
diff --git a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
index f6766967e779..3cd50fe1c013 100755
--- a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
+++ b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
@@ -215,16 +215,28 @@ static int pm_callback_runtime_on(kbase_device *kbdev)
 {
 #ifdef CONFIG_MALI_MIDGARD_DVFS	
 	struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
+	unsigned long flags;
+	unsigned int clock;
 #endif
 	kbase_platform_power_on(kbdev);
 
 	kbase_platform_clock_on(kbdev);
 #ifdef CONFIG_MALI_MIDGARD_DVFS
 	if (platform->dvfs_enabled) {
-		/*if (kbase_platform_dvfs_enable(true, MALI_DVFS_START_FREQ)!= MALI_TRUE)*/
-		/*printk("%s\n",__func__);*/
-		if (kbase_platform_dvfs_enable(true, MALI_DVFS_CURRENT_FREQ)!= MALI_TRUE)
+		if(platform->gpu_in_touch) {
+			clock = p_mali_dvfs_infotbl[MALI_DVFS_STEP-1].clock;
+			spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+			platform->gpu_in_touch = false;
+			spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
+		} else {
+			clock = MALI_DVFS_CURRENT_FREQ;
+		}
+		/*
+		pr_info("%s,clock = %d\n",__func__,clock);
+		*/
+		if (kbase_platform_dvfs_enable(true, clock)!= MALI_TRUE)
 			return -EPERM;
+
 	} else {
 		if (kbase_platform_dvfs_enable(false, MALI_DVFS_CURRENT_FREQ)!= MALI_TRUE)
 			return -EPERM;
@@ -237,6 +249,7 @@ static void pm_callback_runtime_off(kbase_device *kbdev)
 {
 #ifdef CONFIG_MALI_MIDGARD_DVFS	
 	struct rk_context *platform = (struct rk_context *)kbdev->platform_context;
+	unsigned long flags;
 #endif
 
 	kbase_platform_clock_off(kbdev);
@@ -247,6 +260,9 @@ static void pm_callback_runtime_off(kbase_device *kbdev)
 		/*printk("%s\n",__func__);*/
 		if (kbase_platform_dvfs_enable(false, p_mali_dvfs_infotbl[0].clock)!= MALI_TRUE)
 			printk("[err] disabling dvfs is faled\n");
+		spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+		platform->gpu_in_touch = false;
+		spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
 	}
 #endif
 }
diff --git a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_dvfs.c b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_dvfs.c
index 4a3d5b03b4c0..e1e21b06b286 100755
--- a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_dvfs.c
+++ b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_dvfs.c
@@ -40,7 +40,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/driver.h>
 #include <linux/rk_fb.h>
-
+#include <linux/input.h>
 #include <linux/rockchip/common.h>
 
 #include <platform/rk/mali_kbase_platform.h>
@@ -98,6 +98,92 @@ static mali_dvfs_status mali_dvfs_status_current;
 
 #define LIMIT_FPS 60
 #define LIMIT_FPS_POWER_SAVE 50
+
+#ifdef CONFIG_MALI_MIDGARD_DVFS
+static void gpufreq_input_event(struct input_handle *handle, unsigned int type,
+		unsigned int code, int value)
+{
+	mali_dvfs_status *dvfs_status;
+	struct rk_context *platform;
+	unsigned long flags;
+	
+	if (type != EV_ABS)
+		return;
+	
+	dvfs_status = &mali_dvfs_status_current;
+	platform = (struct rk_context *)dvfs_status->kbdev->platform_context;
+	
+	spin_lock_irqsave(&platform->gpu_in_touch_lock, flags);
+	platform->gpu_in_touch = true;
+	spin_unlock_irqrestore(&platform->gpu_in_touch_lock, flags);
+}
+
+static int gpufreq_input_connect(struct input_handler *handler,
+		struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int error;
+
+	handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = "gpufreq";
+
+	error = input_register_handle(handle);
+	if (error)
+		goto err2;
+
+	error = input_open_device(handle);
+	if (error)
+		goto err1;
+	pr_info("%s\n",__func__);
+	return 0;
+err1:
+	input_unregister_handle(handle);
+err2:
+	kfree(handle);
+	return error;
+}
+
+static void gpufreq_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+	pr_info("%s\n",__func__);
+}
+
+static const struct input_device_id gpufreq_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+		.absbit = { [BIT_WORD(ABS_X)] =
+			BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+	},
+	{ },
+};
+
+static struct input_handler gpufreq_input_handler = {
+	.event		= gpufreq_input_event,
+	.connect	= gpufreq_input_connect,
+	.disconnect	= gpufreq_input_disconnect,
+	.name		= "gpufreq",
+	.id_table	= gpufreq_ids,
+};
+#endif
+
 static void mali_dvfs_event_proc(struct work_struct *w)
 {
 	unsigned long flags;
@@ -379,6 +465,7 @@ int kbase_platform_dvfs_init(struct kbase_device *kbdev)
 	 */
 	struct rk_context *platform;
 	int i;
+	int rc;
 	
 	platform = (struct rk_context *)kbdev->platform_context;
 	if (NULL == platform)
@@ -418,6 +505,9 @@ not_assigned :
 	mutex_init(&mali_set_clock_lock);
 	mutex_init(&mali_enable_clock_lock);
 
+	spin_lock_init(&platform->gpu_in_touch_lock);
+	rc = input_register_handler(&gpufreq_input_handler);
+
 	/*add a error handling here */
 	spin_lock_irqsave(&mali_dvfs_spinlock, flags);
 	mali_dvfs_status_current.kbdev = kbdev;
@@ -439,6 +529,8 @@ void kbase_platform_dvfs_term(void)
 		destroy_workqueue(mali_dvfs_wq);
 
 	mali_dvfs_wq = NULL;
+	
+	input_unregister_handler(&gpufreq_input_handler);
 }
 #endif /*CONFIG_MALI_MIDGARD_DVFS*/
 
diff --git a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_platform.h b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_platform.h
index 8b105b8cf530..49ef7b59a46a 100755
--- a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_platform.h
+++ b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_platform.h
@@ -28,6 +28,8 @@ struct rk_context {
 	u32 time_busy;
 	u32 time_idle;
 	bool dvfs_enabled;
+	bool gpu_in_touch;
+	spinlock_t gpu_in_touch_lock;
 #endif
 };
 int mali_dvfs_clk_set(struct dvfs_node * node,unsigned long rate);
-- 
2.34.1