rockchip:midgard:update to r4p1_01dev0
authorxxm <xxm@rock-chips.com>
Sat, 31 May 2014 02:45:08 +0000 (10:45 +0800)
committerxxm <xxm@rock-chips.com>
Sat, 31 May 2014 02:47:01 +0000 (10:47 +0800)
20 files changed:
drivers/gpu/arm/midgard/Kbuild
drivers/gpu/arm/midgard/mali_base_hwconfig.h
drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
drivers/gpu/arm/midgard/mali_kbase_context.c
drivers/gpu/arm/midgard/mali_kbase_core_linux.c
drivers/gpu/arm/midgard/mali_kbase_device.c
drivers/gpu/arm/midgard/mali_kbase_event.c
drivers/gpu/arm/midgard/mali_kbase_hw.c
drivers/gpu/arm/midgard/mali_kbase_instr.c
drivers/gpu/arm/midgard/mali_kbase_jd.c
drivers/gpu/arm/midgard/mali_kbase_jm.c
drivers/gpu/arm/midgard/mali_kbase_jm.h
drivers/gpu/arm/midgard/mali_kbase_js.c
drivers/gpu/arm/midgard/mali_kbase_js.h
drivers/gpu/arm/midgard/mali_kbase_js_policy_cfs.c
drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
drivers/gpu/arm/midgard/mali_kbase_mmu.c
drivers/gpu/arm/midgard/mali_kbase_pm_driver.c
drivers/gpu/arm/midgard/mali_kbase_replay.c
drivers/gpu/arm/midgard/mali_kbase_uku.h

index ea8c86489b069ed7ed52a267dcd2e142043bf602..c516686153bdcf3ace131f7c2b8bc974d73ee2d6 100755 (executable)
@@ -15,7 +15,7 @@
 
 
 # Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r4p0-02rel0"
+MALI_RELEASE_NAME ?= "r4p1-01dev0"
 
 # Paths required for build
 KBASE_PATH = $(src)
index d770cfdd91a8de8f77db00a9d296ba525ae14763..8609fa0a61168705dff4410b9ac4b97c785431bc 100755 (executable)
@@ -140,6 +140,9 @@ typedef enum base_hw_issue {
        /* (Note that PRLAM-9049 also uses this work-around) */
        BASE_HW_ISSUE_8564,
 
+       /* Fragments are clamped instead of discarded when fragment depth bound op is discard and depth datum source is shader. */
+       BASE_HW_ISSUE_8634,
+
        /* Livelock issue using atomic instructions (particularly when using atomic_cmpxchg as a spinlock) */
        BASE_HW_ISSUE_8791,
 
@@ -296,11 +299,14 @@ typedef enum base_hw_issue {
        /* AFBC is not supported for T76X beta. */
        BASE_HW_ISSUE_T76X_2906,
 
+       /* RTD doesn't specify the row stride for AFBC surfaces. */
+       BASE_HW_ISSUE_T76X_3086,
+
        /* Prevent MMU deadlock for T76X beta. */
        BASE_HW_ISSUE_T76X_3285,
 
        /* Clear encoder state for a hard stopped fragment job which is AFBC
-        * encoded by soft resetting the GPU. Only for T76X r0p0 and r0p1
+        * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and r0p1_50rel0
         */
        BASE_HW_ISSUE_T76X_3542,
 
@@ -351,6 +357,7 @@ static const base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
        BASE_HW_ISSUE_8443,
        BASE_HW_ISSUE_8456,
        BASE_HW_ISSUE_8564,
+       BASE_HW_ISSUE_8634,
        BASE_HW_ISSUE_8791,
        BASE_HW_ISSUE_8803,
        BASE_HW_ISSUE_8833,
@@ -567,6 +574,7 @@ static const base_hw_issue base_hw_issues_t76x_r0p0[] = {
        BASE_HW_ISSUE_11020,
        BASE_HW_ISSUE_11024,
        BASE_HW_ISSUE_T76X_26,
+       BASE_HW_ISSUE_T76X_3086,
        BASE_HW_ISSUE_T76X_3542,
        BASE_HW_ISSUE_T76X_3556,
        BASE_HW_ISSUE_T76X_3700,
@@ -586,6 +594,24 @@ static const base_hw_issue base_hw_issues_t76x_r0p1[] = {
        BASE_HW_ISSUE_11020,
        BASE_HW_ISSUE_11024,
        BASE_HW_ISSUE_T76X_26,
+       BASE_HW_ISSUE_T76X_3086,
+       BASE_HW_ISSUE_T76X_3542,
+       BASE_HW_ISSUE_T76X_3556,
+       BASE_HW_ISSUE_T76X_3700,
+       BASE_HW_ISSUE_T76X_3793,
+       /* List of hardware issues must end with BASE_HW_ISSUE_END */
+       BASE_HW_ISSUE_END
+};
+
+/* Mali T76x r0p1_50rel0 */
+static const base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
+       BASE_HW_ISSUE_8803,
+       BASE_HW_ISSUE_9435,
+       BASE_HW_ISSUE_10649,
+       BASE_HW_ISSUE_10821,
+       BASE_HW_ISSUE_10883,
+       BASE_HW_ISSUE_10946,
+       BASE_HW_ISSUE_T76X_26,
        BASE_HW_ISSUE_T76X_3542,
        BASE_HW_ISSUE_T76X_3556,
        BASE_HW_ISSUE_T76X_3700,
@@ -605,6 +631,24 @@ static const base_hw_issue base_hw_issues_t76x_r0p2[] = {
        BASE_HW_ISSUE_11020,
        BASE_HW_ISSUE_11024,
        BASE_HW_ISSUE_T76X_26,
+       BASE_HW_ISSUE_T76X_3086,
+       BASE_HW_ISSUE_T76X_3542,
+       BASE_HW_ISSUE_T76X_3556,
+       BASE_HW_ISSUE_T76X_3700,
+       BASE_HW_ISSUE_T76X_3793,
+       /* List of hardware issues must end with BASE_HW_ISSUE_END */
+       BASE_HW_ISSUE_END
+};
+
+/* Mali T76x r0p3 */
+static const base_hw_issue base_hw_issues_t76x_r0p3[] = {
+       BASE_HW_ISSUE_8803,
+       BASE_HW_ISSUE_9435,
+       BASE_HW_ISSUE_10649,
+       BASE_HW_ISSUE_10821,
+       BASE_HW_ISSUE_10883,
+       BASE_HW_ISSUE_10946,
+       BASE_HW_ISSUE_T76X_26,
        BASE_HW_ISSUE_T76X_3542,
        BASE_HW_ISSUE_T76X_3556,
        BASE_HW_ISSUE_T76X_3700,
@@ -621,6 +665,7 @@ static const base_hw_issue base_hw_issues_t76x_r1p0[] = {
        BASE_HW_ISSUE_10821,
        BASE_HW_ISSUE_10883,
        BASE_HW_ISSUE_10946,
+       BASE_HW_ISSUE_T76X_3086,
        BASE_HW_ISSUE_T76X_3700,
        BASE_HW_ISSUE_T76X_3793,
        /* List of hardware issues must end with BASE_HW_ISSUE_END */
@@ -682,9 +727,9 @@ static const base_hw_issue base_hw_issues_model_t7xx[] =
        BASE_HW_ISSUE_5736,
        BASE_HW_ISSUE_9275,
        BASE_HW_ISSUE_9435,
-       BASE_HW_ISSUE_10931,
        BASE_HW_ISSUE_11020,
        BASE_HW_ISSUE_11024,
+       BASE_HW_ISSUE_T76X_3086,
        BASE_HW_ISSUE_T76X_3700,
        BASE_HW_ISSUE_T76X_3793,
        /* List of hardware issues must end with BASE_HW_ISSUE_END */
index df3b129ee845882b761ae4306334745aa475822d..88dcff3f2803e01171493cee1e3ccae212490bf4 100755 (executable)
 /*** Begin Power Manager defaults */
 
 /* Milliseconds */
-#define DEFAULT_PM_DVFS_FREQ 100
+#define DEFAULT_PM_DVFS_FREQ 50
 
 /**
  * Default poweroff tick granuality, in nanoseconds
index 43697f5f744a111029067e63fe2c935ef431c923..5d004599812ca11fd1378c800c0ee6ce73ee225d 100755 (executable)
@@ -134,7 +134,7 @@ KBASE_EXPORT_SYMBOL(kbase_create_context)
 
 static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
 {
-       KBASE_LOG(2, reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
+       dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
        kbase_mem_phy_alloc_put(reg->alloc);
        kfree(reg);
 }
index 1798c8a475df98aa7c737cd2cff2d04e2217865f..607c485790767dce0e14b44910d8d7f466f6d735 100755 (executable)
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(shared_kernel_test_data);
 #endif /* MALI_UNIT_TEST */
 
 #define KBASE_DRV_NAME "mali"
-
+#define ROCKCHIP_VERSION 1
 static const char kbase_drv_name[] = KBASE_DRV_NAME;
 
 static int kbase_dev_nr;
@@ -668,6 +668,7 @@ copy_failed:
                        /* version buffer size check is made in compile time assert */
                        memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
                        get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+                       get_version->rk_version = ROCKCHIP_VERSION;
                        break;
                }
 
@@ -892,7 +893,7 @@ static int kbase_open(struct inode *inode, struct file *filp)
        init_waitqueue_head(&kctx->event_queue);
        filp->private_data = kctx;
 
-       KBASE_LOG(1, kbdev->dev, "created base context\n");
+       dev_dbg(kbdev->dev, "created base context\n");
 
        {
                kbasep_kctx_list_element *element;
@@ -937,7 +938,7 @@ static int kbase_release(struct inode *inode, struct file *filp)
        filp->private_data = NULL;
        kbase_destroy_context(kctx);
 
-       KBASE_LOG(1, kbdev->dev, "deleted base context\n");
+       dev_dbg(kbdev->dev, "deleted base context\n");
        kbase_release_device(kbdev);
        return 0;
 }
@@ -1096,7 +1097,7 @@ static irqreturn_t kbase_job_irq_handler(int irq, void *data)
        if (!val)
                return IRQ_NONE;
 
-       KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+       dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
 
        kbase_job_done(kbdev, val);
 
@@ -1131,7 +1132,7 @@ static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
        if (!val)
                return IRQ_NONE;
 
-       KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+       dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
 
        kbase_mmu_interrupt(kbdev, val);
 
@@ -1156,7 +1157,7 @@ static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
 
 #ifdef CONFIG_MALI_DEBUG
        if (!kbdev->pm.driver_ready_for_irqs)
-               KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+               dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
                                __func__, irq, val );
 #endif /* CONFIG_MALI_DEBUG */
        spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
@@ -1164,7 +1165,7 @@ static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
        if (!val)
                return IRQ_NONE;
 
-       KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+       dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
 
        kbase_gpu_interrupt(kbdev, val);
 
@@ -1249,7 +1250,7 @@ static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
        if (!val)
                return IRQ_NONE;
 
-       KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+       dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
 
        kbasep_irq_test_data.triggered = 1;
        wake_up(&kbasep_irq_test_data.wait);
@@ -1280,7 +1281,7 @@ static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
        if (!val)
                return IRQ_NONE;
 
-       KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+       dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
 
        kbasep_irq_test_data.triggered = 1;
        wake_up(&kbasep_irq_test_data.wait);
@@ -1352,7 +1353,7 @@ static mali_error kbasep_common_test_interrupt(kbase_device * const kbdev, u32 t
                                dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n", kbdev->irqs[tag].irq, tag);
                                err = MALI_ERROR_FUNCTION_FAILED;
                        } else {
-                               KBASE_LOG(2, kbdev->dev, "Interrupt %d (index %d) reached CPU.\n", kbdev->irqs[tag].irq, tag);
+                               dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n", kbdev->irqs[tag].irq, tag);
                        }
 
                        hrtimer_cancel(&kbasep_irq_test_data.timer);
@@ -1839,7 +1840,7 @@ static ssize_t set_split(struct device *dev, struct device_attribute *attr, cons
                        mali_js0_affinity_mask  = config->js0_mask;
                        mali_js1_affinity_mask  = config->js1_mask;
                        mali_js2_affinity_mask  = config->js2_mask;
-                       KBASE_LOG(2, dev, "Setting sc_split: '%s'\n", config->tag);
+                       dev_dbg(dev, "Setting sc_split: '%s'\n", config->tag);
                        return count;
                }
                config++;
@@ -1935,14 +1936,14 @@ static ssize_t set_js_timeouts(struct device *dev, struct device_attribute *attr
                do_div(ticks, kbdev->js_data.scheduling_tick_ns);
                kbdev->js_reset_ticks_nss = ticks;
 
-               KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks, js_soft_stop_ms);
-               KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks_cl, js_soft_stop_ms_cl);
-               KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_ss, js_hard_stop_ms_ss);
-               KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_cl, js_hard_stop_ms_cl);
-               KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_nss, js_hard_stop_ms_nss);
-               KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_ss, js_reset_ms_ss);
-               KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_cl, js_reset_ms_cl);
-               KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_nss, js_reset_ms_nss);
+               dev_dbg( kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks, js_soft_stop_ms);
+               dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks_cl, js_soft_stop_ms_cl);
+               dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_ss, js_hard_stop_ms_ss);
+               dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_cl, js_hard_stop_ms_cl);
+               dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_nss, js_hard_stop_ms_nss);
+               dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_ss, js_reset_ms_ss);
+               dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_cl, js_reset_ms_cl);
+               dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_nss, js_reset_ms_nss);
 
                return count;
        } else {
@@ -2153,8 +2154,7 @@ static ssize_t set_js_softstop_always(struct device *dev, struct device_attribut
        items = sscanf(buf, "%d", &softstop_always);
        if ((items == 1) && ((softstop_always == 0) || (softstop_always == 1))) {
                kbdev->js_data.softstop_always = (mali_bool) softstop_always;
-
-               KBASE_LOG(2, kbdev->dev, "Support for softstop on a single context: %s\n", (kbdev->js_data.softstop_always == MALI_FALSE) ? "Disabled" : "Enabled");
+               dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n", (kbdev->js_data.softstop_always == MALI_FALSE) ? "Disabled" : "Enabled");
                return count;
        } else {
                dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\nUse format " "<soft_stop_always>\n");
@@ -2364,6 +2364,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
 #ifdef CONFIG_MALI_TRACE_TIMELINE
                inited_timeline = (1u << 12),
 #endif /* CONFIG_MALI_TRACE_LINE */
+               inited_pm_powerup = (1u << 14),
        };
 
        int inited = 0;
@@ -2498,6 +2499,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
 
        mali_err = kbase_pm_powerup(kbdev);
        if (MALI_ERROR_NONE == mali_err) {
+               inited |= inited_pm_powerup;
 #ifdef CONFIG_MALI_DEBUG
 #ifndef CONFIG_MALI_NO_MALI
                if (MALI_ERROR_NONE != kbasep_common_test_interrupt_handlers(kbdev)) {
@@ -2512,6 +2514,10 @@ static int kbase_common_device_init(kbase_device *kbdev)
                mutex_init(&kbdev->kctx_list_lock);
                INIT_LIST_HEAD(&kbdev->kctx_list);
                return 0;
+       } else {
+               /* Failed to power up the GPU. */
+               dev_err(kbdev->dev, "GPU power up failed.\n");
+               err = -ENODEV;
        }
 
  out_partial:
@@ -2555,7 +2561,7 @@ static int kbase_common_device_init(kbase_device *kbdev)
        if (inited & inited_mem)
                kbase_mem_halt(kbdev);
 
-       if (inited & inited_pm)
+       if (inited & inited_pm_powerup)
                kbase_pm_halt(kbdev);
 
        if (inited & inited_irqs)
@@ -2765,6 +2771,7 @@ static int kbase_common_device_remove(struct kbase_device *kbdev)
 #endif /* CONFIG_MALI_DEBUG */
 #if MALI_CUSTOMER_RELEASE == 0
        device_remove_file(kbdev->dev, &dev_attr_js_timeouts);
+       device_remove_file(kbdev->dev, &dev_attr_force_replay);
 #endif /* MALI_CUSTOMER_RELEASE */
 #ifdef CONFIG_DEBUG_FS
        kbasep_gpu_memory_debugfs_term(kbdev);
@@ -2872,7 +2879,7 @@ static int kbase_device_runtime_suspend(struct device *dev)
 
        if (kbdev->pm.callback_power_runtime_off) {
                kbdev->pm.callback_power_runtime_off(kbdev);
-               KBASE_LOG(1, dev, "runtime suspend\n");
+               dev_dbg(dev, "runtime suspend\n");
        }
        return 0;
 }
@@ -2898,7 +2905,7 @@ int kbase_device_runtime_resume(struct device *dev)
 
        if (kbdev->pm.callback_power_runtime_on) {
                ret = kbdev->pm.callback_power_runtime_on(kbdev);
-               KBASE_LOG(1, dev, "runtime resume\n");
+               dev_dbg(dev, "runtime resume\n");
        }
        return ret;
 }
index c321ebf8c01a7999cdb04362cd24ccd6ce12e68e..6dfff10f0dbbec851dd6354406240fb69d6fef8b 100755 (executable)
@@ -326,7 +326,7 @@ void kbase_reg_write(kbase_device *kbdev, u16 offset, u32 value, kbase_context *
        KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
        KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
        KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
-       KBASE_LOG(4, kbdev->dev, "w: reg %04x val %08x", offset, value);
+       dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value);
        kbase_os_reg_write(kbdev, offset, value);
        if (kctx && kctx->jctx.tb)
                kbase_device_trace_register_access(kctx, REG_WRITE, offset, value);
@@ -341,7 +341,7 @@ u32 kbase_reg_read(kbase_device *kbdev, u16 offset, kbase_context *kctx)
        KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
        KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
        val = kbase_os_reg_read(kbdev, offset);
-       KBASE_LOG(4, kbdev->dev, "r: reg %04x val %08x", offset, val);
+       dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val);
        if (kctx && kctx->jctx.tb)
                kbase_device_trace_register_access(kctx, REG_READ, offset, val);
        return val;
@@ -490,7 +490,7 @@ void kbasep_trace_dump_msg(kbase_device *kbdev, kbase_trace *trace_msg)
        char buffer[DEBUG_MESSAGE_SIZE];
 
        kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
-       KBASE_LOG(1, kbdev->dev, "%s", buffer);
+       dev_dbg(kbdev->dev, "%s", buffer);
 }
 
 void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
@@ -550,7 +550,7 @@ void kbasep_trace_dump(kbase_device *kbdev)
        u32 start;
        u32 end;
 
-       KBASE_LOG(1, kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
+       dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
        spin_lock_irqsave(&kbdev->trace_lock, flags);
        start = kbdev->trace_first_out;
        end = kbdev->trace_next_in;
@@ -561,7 +561,7 @@ void kbasep_trace_dump(kbase_device *kbdev)
 
                start = (start + 1) & KBASE_TRACE_MASK;
        }
-       KBASE_LOG(1, kbdev->dev, "TRACE_END");
+       dev_dbg(kbdev->dev, "TRACE_END");
 
        spin_unlock_irqrestore(&kbdev->trace_lock, flags);
 
index acbccaabf6ea6ca71f6485ac086a9370117fe4d1..1e048696446a9b0ecc939b4c8e0383412e7fcf2e 100755 (executable)
@@ -70,7 +70,7 @@ int kbase_event_dequeue(kbase_context *ctx, base_jd_event_v2 *uevent)
                        mutex_unlock(&ctx->event_mutex);
                        uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
                        memset(&uevent->udata, 0, sizeof(uevent->udata));
-                       KBASE_LOG(2, ctx->kbdev->dev,
+                       dev_dbg(ctx->kbdev->dev,
                                "event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
                                BASE_JD_EVENT_DRV_TERMINATED);
                        return 0;
@@ -86,7 +86,7 @@ int kbase_event_dequeue(kbase_context *ctx, base_jd_event_v2 *uevent)
 
        mutex_unlock(&ctx->event_mutex);
 
-       KBASE_LOG(2, ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
+       dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
        uevent->event_code = atom->event_code;
        uevent->atom_number = (atom - ctx->jctx.atoms);
        uevent->udata = kbase_event_process(ctx, atom);
index 1fe4100a4844428ad34204af396c3c8d6bb611e7..abf1757631c64bd8c65b848b262f2477bb268fa9 100755 (executable)
@@ -94,15 +94,13 @@ mali_error kbase_hw_set_issues_mask(kbase_device *kbdev)
                        issues = base_hw_issues_t76x_r0p1;
                        break;
                case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9):
-                       /* TODO: MIDBASE-3084 - confirm hw issue list */
-                       issues = base_hw_issues_t76x_r0p1;
+                       issues = base_hw_issues_t76x_r0p1_50rel0;
                        break;
                case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1):
                        issues = base_hw_issues_t76x_r0p2;
                        break;
                case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1):
-                       /* TODO: MIDBASE-3086 - confirm hw issue list */
-                       issues = base_hw_issues_t76x_r0p2;
+                       issues = base_hw_issues_t76x_r0p3;
                        break;
                case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0):
                        issues = base_hw_issues_t76x_r1p0;
index 1e87476b3f3e49d7156404b6a543954ec537d9f6..0b9f355202501c91228cbb710734db51ec1a9caa 100755 (executable)
@@ -177,7 +177,7 @@ STATIC mali_error kbase_instr_hwcnt_enable_internal(kbase_device *kbdev, kbase_c
 
        err = MALI_ERROR_NONE;
 
-       KBASE_LOG(1, kbdev->dev, "HW counters dumping set-up for context %p", kctx);
+       dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
        return err;
  out_unrequest_cores:
        kbase_pm_unrequest_cores(kbdev, MALI_TRUE, shader_cores_needed);
@@ -275,7 +275,7 @@ mali_error kbase_instr_hwcnt_disable(kbase_context *kctx)
        /* Also release our Power Manager Active reference */
        kbase_pm_context_idle(kbdev);
 
-       KBASE_LOG(1, kbdev->dev, "HW counters dumping disabled for context %p", kctx);
+       dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx);
 
        err = MALI_ERROR_NONE;
 
@@ -355,7 +355,7 @@ mali_error kbase_instr_hwcnt_dump_irq(kbase_context *kctx)
        KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL, kbdev->hwcnt.addr, 0);
        kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_PRFCNT_SAMPLE, kctx);
 
-       KBASE_LOG(1, kbdev->dev, "HW counters dumping done for context %p", kctx);
+       dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
 
        err = MALI_ERROR_NONE;
 
index 0f877d152efd851afbb2a4f7661bb8c9650631eb..02a8471b3b48e7c7956374953c4bc2a1ceade9cd 100755 (executable)
@@ -32,7 +32,7 @@
 #endif                         /* CONFIG_UMP */
 #include <linux/random.h>
 
-#define beenthere(kctx,f, a...)  KBASE_LOG(1, kctx->kbdev->dev, "%s:" f, __func__, ##a)
+#define beenthere(kctx,f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
 /* random32 was renamed to prandom_u32 in 3.8 */
@@ -1167,7 +1167,7 @@ static void jd_done_worker(struct work_struct *data)
                if (kbdev->gpu_props.num_core_groups > 1 && 
                    !(katom->affinity & kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
                    (katom->affinity & kbdev->gpu_props.props.coherency_info.group[1].core_mask)) {
-                       KBASE_LOG(2, kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
+                       dev_dbg(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
                        kbasep_jd_cacheclean(kbdev);
                }
        }
@@ -1177,7 +1177,7 @@ static void jd_done_worker(struct work_struct *data)
            katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT       &&
            (katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
            !(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)){
-               KBASE_LOG(2, kbdev->dev,
+               dev_dbg(kbdev->dev,
                                       "Soft-stopped fragment shader job got a TILE_RANGE_FAULT." \
                                       "Possible HW issue, trying SW workaround\n" );
                if (kbasep_10969_workaround_clamp_coordinates(katom)){
@@ -1185,7 +1185,7 @@ static void jd_done_worker(struct work_struct *data)
                         * Due to an HW issue we try to execute the job
                         * again.
                         */
-                       KBASE_LOG(2, kbdev->dev, "Clamping has been executed, try to rerun the job\n" );
+                       dev_dbg(kbdev->dev, "Clamping has been executed, try to rerun the job\n" );
                        katom->event_code = BASE_JD_EVENT_STOPPED;
                        katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
 
@@ -1221,7 +1221,7 @@ static void jd_done_worker(struct work_struct *data)
        if (!kbasep_js_has_atom_finished(&katom_retained_state)) {
                unsigned long flags;
                /* Requeue the atom on soft-stop / removed from NEXT registers */
-               KBASE_LOG(2, kbdev->dev, "JS: Soft Stopped/Removed from next on Ctx %p; Requeuing", kctx);
+               dev_dbg(kbdev->dev, "JS: Soft Stopped/Removed from next on Ctx %p; Requeuing", kctx);
 
                mutex_lock(&js_devdata->runpool_mutex);
                kbasep_js_clear_job_retry_submit(katom);
@@ -1518,7 +1518,7 @@ void kbase_jd_zap_context(kbase_context *kctx)
        }
        destroy_hrtimer_on_stack(&reset_data.timer);
 
-       KBASE_LOG(1, kbdev->dev, "Zap: Finished Context %p", kctx);
+       dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
 
        /* Ensure that the signallers of the waitqs have finished */
        mutex_lock(&kctx->jctx.lock);
index c85f63ed05af9563173d83eb5542c238e1e72331..ca7935f64c133789f671bd8e8bf05e8e7d70c7bb 100755 (executable)
@@ -30,7 +30,7 @@
 
 #include "mali_kbase_jm.h"
 
-#define beenthere(kctx, f, a...)  KBASE_LOG(1, kctx->kbdev->dev, "%s:" f, __func__, ##a)
+#define beenthere(kctx, f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
 
 #ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
 u64 mali_js0_affinity_mask = 0xFFFFFFFFFFFFFFFFULL;
@@ -122,7 +122,7 @@ static void kbase_job_hw_submit(kbase_device *kbdev, kbase_jd_atom *katom, int j
        katom->start_timestamp = ktime_get();
 
        /* GO ! */
-       KBASE_LOG(2, kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx", katom, kctx, js, jc_head, katom->affinity);
+       dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx", katom, kctx, js, jc_head, katom->affinity);
 
        KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js, (u32) katom->affinity);
 
@@ -375,7 +375,7 @@ void kbase_job_done(kbase_device *kbdev, u32 done)
                                }
                        }
 
-                       KBASE_LOG(2, kbdev->dev, "Job ended with status 0x%08X\n", completion_code);
+                       dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n", completion_code);
 
                        nr_done = kbasep_jm_nr_jobs_submitted(slot);
                        nr_done -= (active >> i) & 1;
@@ -471,7 +471,7 @@ static void kbasep_job_slot_soft_or_hard_stop_do_action(kbase_device *kbdev, int
                mali_bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev, core_reqs);
                if (!soft_stop_allowed) {
 #ifdef CONFIG_MALI_DEBUG
-                       KBASE_LOG(2, kbdev->dev, "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X", (unsigned int)core_reqs);
+                       dev_dbg(kbdev->dev, "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X", (unsigned int)core_reqs);
 #endif                         /* CONFIG_MALI_DEBUG */
                        return;
                }
@@ -844,7 +844,7 @@ void kbase_job_zap_context(kbase_context *kctx)
        mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
        js_kctx_info->ctx.is_dying = MALI_TRUE;
 
-       KBASE_LOG(1, kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
+       dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
        mutex_lock(&js_devdata->queue_mutex);
        evict_success = kbasep_js_policy_try_evict_ctx(&js_devdata->policy, kctx);
        mutex_unlock(&js_devdata->queue_mutex);
@@ -897,7 +897,7 @@ void kbase_job_zap_context(kbase_context *kctx)
 
                KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u, js_kctx_info->ctx.is_scheduled);
 
-               KBASE_LOG(2, kbdev->dev, "Zap: Ctx %p evict_success=%d, scheduled=%d", kctx, evict_success, js_kctx_info->ctx.is_scheduled);
+               dev_dbg(kbdev->dev, "Zap: Ctx %p evict_success=%d, scheduled=%d", kctx, evict_success, js_kctx_info->ctx.is_scheduled);
 
                if (evict_success != MALI_FALSE) {
                        /* Only cancel jobs when we evicted from the policy queue. No Power
@@ -912,7 +912,7 @@ void kbase_job_zap_context(kbase_context *kctx)
                mali_bool was_retained;
                /* Case c: didn't evict, but it is scheduled - it's in the Run Pool */
                KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u, js_kctx_info->ctx.is_scheduled);
-               KBASE_LOG(2, kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
+               dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
 
                /* Disable the ctx from submitting any more jobs */
                spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
@@ -925,7 +925,7 @@ void kbase_job_zap_context(kbase_context *kctx)
                /* Since it's scheduled and we have the jsctx_mutex, it must be retained successfully */
                KBASE_DEBUG_ASSERT(was_retained != MALI_FALSE);
 
-               KBASE_LOG(2, kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
+               dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
                /* Cancel any remaining running jobs for this kctx - if any. Submit is disallowed
                 * which takes effect immediately, so no more new jobs will appear after we do this.  */
                for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
@@ -934,7 +934,7 @@ void kbase_job_zap_context(kbase_context *kctx)
                spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
                mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
 
-               KBASE_LOG(2, kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)", kctx);
+               dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)", kctx);
                kbasep_js_runpool_release_ctx(kbdev, kctx);
        }
        KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
index 9d004d91a491fbe324c554b575194cae07e855fb..bd2b70df009b1a467528842405f0d8d8e6076894 100755 (executable)
@@ -115,7 +115,7 @@ static INLINE kbase_jd_atom *kbasep_jm_dequeue_submit_slot(kbase_jm_slot *slot)
        slot->submitted_head = (slot->submitted_head + 1) & BASE_JM_SUBMIT_SLOTS_MASK;
        slot->submitted_nr--;
 
-       KBASE_LOG(2, katom->kctx->kbdev->dev, "katom %p new head %u", (void *)katom, (unsigned int)slot->submitted_head);
+       dev_dbg(katom->kctx->kbdev->dev, "katom %p new head %u", (void *)katom, (unsigned int)slot->submitted_head);
 
        return katom;
 }
index 85b82d86c82012a274445f710803abb858176f0f..aeeea6893afecee0bf9b6a290263227a3c71acfb 100755 (executable)
@@ -485,7 +485,7 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
                mali_bool use_workaround_for_security;
                use_workaround_for_security = (mali_bool) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE);
                if (use_workaround_for_security != MALI_FALSE) {
-                       KBASE_LOG(2, kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
+                       dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
                        kbdev->nr_user_address_spaces = 1;
                }
        }
@@ -517,28 +517,28 @@ mali_error kbasep_js_devdata_init(kbase_device * const kbdev)
        js_devdata->cfs_ctx_runtime_init_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES);
        js_devdata->cfs_ctx_runtime_min_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES);
 
-       KBASE_LOG(2, kbdev->dev, "JS Config Attribs: ");
-       KBASE_LOG(2, kbdev->dev, "\tscheduling_tick_ns:%u", js_devdata->scheduling_tick_ns);
-       KBASE_LOG(2, kbdev->dev, "\tsoft_stop_ticks:%u", js_devdata->soft_stop_ticks);
-       KBASE_LOG(2, kbdev->dev, "\tsoft_stop_ticks_cl:%u", js_devdata->soft_stop_ticks_cl);
-       KBASE_LOG(2, kbdev->dev, "\thard_stop_ticks_ss:%u", js_devdata->hard_stop_ticks_ss);
-       KBASE_LOG(2, kbdev->dev, "\thard_stop_ticks_cl:%u", js_devdata->hard_stop_ticks_cl);
-       KBASE_LOG(2, kbdev->dev, "\thard_stop_ticks_nss:%u", js_devdata->hard_stop_ticks_nss);
-       KBASE_LOG(2, kbdev->dev, "\tgpu_reset_ticks_ss:%u", js_devdata->gpu_reset_ticks_ss);
-       KBASE_LOG(2, kbdev->dev, "\tgpu_reset_ticks_cl:%u", js_devdata->gpu_reset_ticks_cl);
-       KBASE_LOG(2, kbdev->dev, "\tgpu_reset_ticks_nss:%u", js_devdata->gpu_reset_ticks_nss);
-       KBASE_LOG(2, kbdev->dev, "\tctx_timeslice_ns:%u", js_devdata->ctx_timeslice_ns);
-       KBASE_LOG(2, kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u", js_devdata->cfs_ctx_runtime_init_slices);
-       KBASE_LOG(2, kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u", js_devdata->cfs_ctx_runtime_min_slices);
+       dev_dbg(kbdev->dev, "JS Config Attribs: ");
+       dev_dbg(kbdev->dev, "\tscheduling_tick_ns:%u", js_devdata->scheduling_tick_ns);
+       dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u", js_devdata->soft_stop_ticks);
+       dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u", js_devdata->soft_stop_ticks_cl);
+       dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u", js_devdata->hard_stop_ticks_ss);
+       dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u", js_devdata->hard_stop_ticks_cl);
+       dev_dbg(kbdev->dev, "\thard_stop_ticks_nss:%u", js_devdata->hard_stop_ticks_nss);
+       dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u", js_devdata->gpu_reset_ticks_ss);
+       dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u", js_devdata->gpu_reset_ticks_cl);
+       dev_dbg(kbdev->dev, "\tgpu_reset_ticks_nss:%u", js_devdata->gpu_reset_ticks_nss);
+       dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u", js_devdata->ctx_timeslice_ns);
+       dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u", js_devdata->cfs_ctx_runtime_init_slices);
+       dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u", js_devdata->cfs_ctx_runtime_min_slices);
 
 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0
-       KBASE_LOG(2, kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns);
+       dev_dbg(kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns);
 #endif
 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
-       KBASE_LOG(2, kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%u at %uns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns);
+       dev_dbg(kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%u at %uns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns);
 #endif
 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 && KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
-       KBASE_LOG(2, kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
+       dev_dbg(kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
 #endif
 
        /* setup the number of irq throttle cycles base on given time */
@@ -914,7 +914,7 @@ mali_bool kbasep_js_add_job(kbase_context *kctx, kbase_jd_atom *atom)
                } else if (js_kctx_info->ctx.nr_jobs == 1) {
                        /* Handle Refcount going from 0 to 1: schedule the context on the Policy Queue */
                        KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
-                       KBASE_LOG(1, kbdev->dev, "JS: Enqueue Context %p", kctx);
+                       dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
 
                        mutex_lock(&js_devdata->queue_mutex);
                        kbasep_js_policy_enqueue_ctx(js_policy, kctx);
@@ -1182,7 +1182,7 @@ STATIC kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(kbase_dev
        /* Make a set of checks to see if the context should be scheduled out */
        if (new_ref_count == 0 && (kctx->jctx.sched_info.ctx.nr_jobs == 0 || kbasep_js_is_submit_allowed(js_devdata, kctx) == MALI_FALSE)) {
                /* Last reference, and we've been told to remove this context from the Run Pool */
-               KBASE_LOG(2, kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d", kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, kbasep_js_is_submit_allowed(js_devdata, kctx));
+               dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d", kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, kbasep_js_is_submit_allowed(js_devdata, kctx));
 
                kbasep_js_policy_runpool_remove_ctx(js_policy, kctx);
 
@@ -1273,20 +1273,20 @@ void kbasep_js_runpool_requeue_or_kill_ctx(kbase_device *kbdev, kbase_context *k
        if (js_kctx_info->ctx.is_dying != MALI_FALSE) {
                /* Dying: don't requeue, but kill all jobs on the context. This happens
                 * asynchronously */
-               KBASE_LOG(2, kbdev->dev, "JS: ** Killing Context %p on RunPool Remove **", kctx);
+               dev_dbg(kbdev->dev, "JS: ** Killing Context %p on RunPool Remove **", kctx);
                kbasep_js_policy_foreach_ctx_job(js_policy, kctx, &kbase_jd_cancel, MALI_TRUE);
        } else if (js_kctx_info->ctx.nr_jobs > 0) {
                /* Not dying, has jobs: de-ref core counts from each job before addding
                 * back to the queue */
                kbasep_js_policy_foreach_ctx_job(js_policy, kctx, &kbasep_js_job_check_deref_cores, MALI_FALSE);
 
-               KBASE_LOG(1, kbdev->dev, "JS: Requeue Context %p", kctx);
+               dev_dbg(kbdev->dev, "JS: Requeue Context %p", kctx);
                mutex_lock(&js_devdata->queue_mutex);
                kbasep_js_policy_enqueue_ctx(js_policy, kctx);
                mutex_unlock(&js_devdata->queue_mutex);
        } else {
                /* Not dying, no jobs: don't add back to the queue */
-               KBASE_LOG(1, kbdev->dev, "JS: Idling Context %p (not requeued)", kctx);
+               dev_dbg(kbdev->dev, "JS: Idling Context %p (not requeued)", kctx);
        }
 
        if (has_pm_ref) {
@@ -1788,7 +1788,7 @@ void kbasep_js_try_schedule_head_ctx(kbase_device *kbdev)
        }
        js_kctx_info = &head_kctx->jctx.sched_info;
 
-       KBASE_LOG(1, kbdev->dev, "JS: Dequeue Context %p", head_kctx);
+       dev_dbg(kbdev->dev, "JS: Dequeue Context %p", head_kctx);
 
        pm_active_err = kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE);
 
index ec20c9d45c740e1bc77a63cda7260b6d0a96594a..8c99634dce8faca935773916dae41e505c932a9a 100755 (executable)
@@ -671,7 +671,7 @@ static INLINE void kbasep_js_set_submit_allowed(kbasep_js_device_data *js_devdat
 
        set_bit = (u16) (1u << kctx->as_nr);
 
-       KBASE_LOG(3, kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+       dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
 
        js_devdata->runpool_irq.submit_allowed |= set_bit;
 }
@@ -696,7 +696,7 @@ static INLINE void kbasep_js_clear_submit_allowed(kbasep_js_device_data *js_devd
        clear_bit = (u16) (1u << kctx->as_nr);
        clear_mask = ~clear_bit;
 
-       KBASE_LOG(3, kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+       dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
 
        js_devdata->runpool_irq.submit_allowed &= clear_mask;
 }
index 78c0fd290b561cd1458d16e82e44d00eaebd4145..80919b730d5b3ebc24909b27fc33e55d9c3d3a6e 100755 (executable)
@@ -75,7 +75,8 @@
 static const kbasep_atom_req core_req_variants[] = {
        {
         /* 0: Fragment variant */
-        (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_COHERENT_GROUP),
+        (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_FS_AFBC |
+                                               BASE_JD_REQ_COHERENT_GROUP),
         (JS_CTX_REQ_ALL_OTHERS),
         0},
        {
@@ -732,7 +733,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
                                        /* Job has been scheduled for at least js_devdata->soft_stop_ticks ticks.
                                         * Soft stop the slot so we can run other jobs.
                                         */
-                                       KBASE_LOG(1, kbdev->dev, "Soft-stop");
+                                       dev_dbg(kbdev->dev, "Soft-stop");
 
 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
                                        kbase_job_slot_softstop(kbdev, s, atom);
@@ -759,7 +760,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
                                        /* Job has been scheduled for at least js_devdata->soft_stop_ticks.
                                         * We do not soft-stop during CINSTR_DUMPING_ENABLED, however.
                                         */
-                                       KBASE_LOG(1, kbdev->dev, "Soft-stop");
+                                       dev_dbg(kbdev->dev, "Soft-stop");
                                } else if (ticks == js_devdata->hard_stop_ticks_nss) {
                                        /* Job has been scheduled for at least js_devdata->hard_stop_ticks_nss ticks.
                                         * Hard stop the slot.
index 39c909904dc93e460e67ab8feeb961f49de200ff..a88fbabb2395588e7fa4ad9efb78a3b7bbf1cfa7 100755 (executable)
@@ -1100,7 +1100,7 @@ static int kbase_trace_buffer_mmap(kbase_context *kctx, struct vm_area_struct *v
        u32 *tb;
        int owns_tb = 1;
 
-       KBASE_LOG(1, kctx->kbdev->dev, "in %s\n", __func__);
+       dev_dbg(kctx->kbdev->dev, "in %s\n", __func__);
        size = (vma->vm_end - vma->vm_start);
        nr_pages = size >> PAGE_SHIFT;
 
@@ -1155,7 +1155,7 @@ static int kbase_trace_buffer_mmap(kbase_context *kctx, struct vm_area_struct *v
        vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
        /* the rest of the flags is added by the cpu_mmap handler */
 
-       KBASE_LOG(1, kctx->kbdev->dev, "%s done\n", __func__);
+       dev_dbg(kctx->kbdev->dev, "%s done\n", __func__);
        return 0;
 
 out_no_va_region:
@@ -1179,7 +1179,7 @@ static int kbase_mmu_dump_mmap(kbase_context *kctx, struct vm_area_struct *vma,
        size_t size;
        int err = 0;
 
-       KBASE_LOG(1, kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
+       dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
        size = (vma->vm_end - vma->vm_start);
        nr_pages = size >> PAGE_SHIFT;
 
@@ -1216,7 +1216,7 @@ static int kbase_mmu_dump_mmap(kbase_context *kctx, struct vm_area_struct *vma,
        *kmap_addr = kaddr;
        *reg = new_reg;
 
-       KBASE_LOG(1, kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
+       dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
        return 0;
 
 out_no_alloc:
@@ -1251,7 +1251,7 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
        int free_on_close = 0;
        struct device *dev = kctx->kbdev->dev;
 
-       KBASE_LOG(1, dev, "kbase_mmap\n");
+       dev_dbg(dev, "kbase_mmap\n");
        nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 
        /* strip away corresponding VM_MAY% flags to the VM_% flags requested */
@@ -1296,7 +1296,7 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
                err = kbase_trace_buffer_mmap(kctx, vma, &reg, &kaddr);
                if (0 != err)
                        goto out_unlock;
-               KBASE_LOG(1, dev, "kbase_trace_buffer_mmap ok\n");
+               dev_dbg(dev, "kbase_trace_buffer_mmap ok\n");
                /* free the region on munmap */
                free_on_close = 1;
                goto map;
index 4aedadbaf45d6e8b835a401acfff6f49526b91b4..ff361792d6c6bb2d073dc91c078547a955738d98 100755 (executable)
@@ -28,7 +28,7 @@
 #include <mali_kbase_gator.h>
 #include <mali_kbase_debug.h>
 
-#define beenthere(kctx, f, a...)  KBASE_LOG(1, kctx->kbdev->dev, "%s:" f, __func__, ##a)
+#define beenthere(kctx, f, a...)  dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
 
 #include <mali_kbase_defs.h>
 #include <mali_kbase_hw.h>
index 1a8cdacd50eda36a67718185649702955fa3932a..9fe788b630eb54b4a1631fe6ad1693717b75b798 100755 (executable)
@@ -655,16 +655,16 @@ void kbase_pm_clock_on(kbase_device *kbdev, mali_bool is_resume)
                        reset_required = MALI_TRUE;
        }
 
+       spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
+       kbdev->pm.gpu_powered = MALI_TRUE;
+       spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
+
        if (reset_required) {
                /* GPU state was lost, reset GPU to ensure it is in a
                 * consistent state */
                kbase_pm_init_hw(kbdev, MALI_TRUE);
        }
 
-       spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
-       kbdev->pm.gpu_powered = MALI_TRUE;
-       spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
-
        /* Lastly, enable the interrupts */
        kbase_pm_enable_interrupts(kbdev);
 }
index 91381329881cf39bbed333dbf1dc7acc8ba5428b..0e9ed621f59dc864532a6e4b6f1380248ac9c6d8 100755 (executable)
@@ -81,8 +81,8 @@ static void dump_job_head(kbase_context *kctx, char *head_str, job_head *job)
 #ifdef CONFIG_MALI_DEBUG
        struct device *dev = kctx->kbdev->dev;
 
-       KBASE_LOG(2, dev, "%s\n", head_str);
-       KBASE_LOG(2, dev, "addr               = %p\n"
+       dev_dbg(dev, "%s\n", head_str);
+       dev_dbg(dev, "addr               = %p\n"
                                        "status             = %x\n"
                                        "not_complete_index = %x\n"
                                        "fault_addr         = %llx\n"
@@ -99,9 +99,9 @@ static void dump_job_head(kbase_context *kctx, char *head_str, job_head *job)
                                                         job->dependencies[1]);
 
        if (job->flags & JOB_FLAG_DESC_SIZE)
-               KBASE_LOG(2, dev, "next               = %llx\n", job->next._64);
+               dev_dbg(dev, "next               = %llx\n", job->next._64);
        else
-               KBASE_LOG(2, dev, "next               = %x\n", job->next._32);
+               dev_dbg(dev, "next               = %x\n", job->next._32);
 #endif
 }
 
@@ -184,7 +184,7 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
        } *fbd_tiler;
        struct device *dev = kctx->kbdev->dev;
 
-       KBASE_LOG(2, dev, "fbd_address: %llx\n", fbd_address);
+       dev_dbg(dev, "fbd_address: %llx\n", fbd_address);
 
        fbd_tiler = kbasep_map_page_sync(kctx, fbd_address + SFBD_TILER_OFFSET,
                                                                   &phys_addr);
@@ -193,7 +193,7 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
                return MALI_ERROR_FUNCTION_FAILED;
        }
 #ifdef CONFIG_MALI_DEBUG
-       KBASE_LOG(2, dev, "FBD tiler:\n"
+       dev_dbg(dev, "FBD tiler:\n"
                                "flags = %x\n"
                                "heap_free_address = %llx\n",
                                                              fbd_tiler->flags,
@@ -215,11 +215,11 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
                }
 
 
-               KBASE_LOG(2, dev,
+               dev_dbg(dev,
                              "Old hierarchy mask=%x  New hierarchy mask=%x\n",
                                           old_hierarchy_mask, hierarchy_mask);
                for (i = 0; i < HIERARCHY_WEIGHTS; i++)
-                       KBASE_LOG(2, dev, " Hierarchy weight %02d: %08x\n",
+                       dev_dbg(dev, " Hierarchy weight %02d: %08x\n",
                                                                i, weights[i]);
 
                j = 0;
@@ -228,7 +228,7 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
                        if (hierarchy_mask & (1 << i)) {
                                KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
 
-                               KBASE_LOG(2, dev,
+                               dev_dbg(dev,
                                " Writing hierarchy level %02d (%08x) to %d\n",
                                                             i, weights[i], j);
 
@@ -244,7 +244,7 @@ static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
 
        fbd_tiler->heap_free_address = tiler_heap_free;
 
-       KBASE_LOG(2, dev, "heap_free_address=%llx flags=%x\n",
+       dev_dbg(dev, "heap_free_address=%llx flags=%x\n",
                               fbd_tiler->heap_free_address, fbd_tiler->flags);
 
        kbasep_unmap_page_sync(fbd_tiler, phys_addr);
@@ -270,7 +270,7 @@ static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
        mali_bool flags_different_page;
        struct device *dev = kctx->kbdev->dev;
 
-       KBASE_LOG(2, dev, "fbd_address: %llx\n", fbd_address);
+       dev_dbg(dev, "fbd_address: %llx\n", fbd_address);
 
        fbd_tiler = kbasep_map_page_sync(kctx, fbd_address + MFBD_TILER_OFFSET,
                                                                   &phys_addr);
@@ -298,7 +298,7 @@ static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
                return MALI_ERROR_FUNCTION_FAILED;
        }
 #ifdef CONFIG_MALI_DEBUG
-       KBASE_LOG(2, dev, "FBD tiler:\n"
+       dev_dbg(dev, "FBD tiler:\n"
                                "heap_free_address = %llx\n",
                                 fbd_tiler->heap_free_address);
 #endif
@@ -318,11 +318,11 @@ static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
                }
 
 
-               KBASE_LOG(2, dev,
+               dev_dbg(dev,
                              "Old hierarchy mask=%x  New hierarchy mask=%x\n",
                                           old_hierarchy_mask, hierarchy_mask);
                for (i = 0; i < HIERARCHY_WEIGHTS; i++)
-                       KBASE_LOG(2, dev, " Hierarchy weight %02d: %08x\n",
+                       dev_dbg(dev, " Hierarchy weight %02d: %08x\n",
                                                                i, weights[i]);
 
                j = 0;
@@ -331,7 +331,7 @@ static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
                        if (hierarchy_mask & (1 << i)) {
                                KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
 
-                               KBASE_LOG(2, dev,
+                               dev_dbg(dev,
                                " Writing hierarchy level %02d (%08x) to %d\n",
                                                             i, weights[i], j);
 
@@ -580,7 +580,7 @@ static mali_error kbasep_replay_find_hw_job_id(kbase_context *kctx,
                job_head *job;
                u64 phys_addr;
 
-               KBASE_LOG(2, kctx->kbdev->dev,
+               dev_dbg(kctx->kbdev->dev,
                        "kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
 
                job = kbasep_map_page_sync(kctx, jc, &phys_addr);
@@ -638,12 +638,12 @@ static mali_error kbasep_replay_parse_jc(kbase_context *kctx,
        mali_bool first_in_chain = MALI_TRUE;
        int nr_jobs = 0;
 
-       KBASE_LOG(2, kctx->kbdev->dev,
+       dev_dbg(kctx->kbdev->dev,
                              "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
                                                         jc, hw_job_id_offset);
 
        while (jc) {
-               KBASE_LOG(2, kctx->kbdev->dev,
+               dev_dbg(kctx->kbdev->dev,
                                   "kbasep_replay_parse_jc: parsing jc=%llx\n",
                                                                           jc);
 
@@ -704,7 +704,7 @@ static int kbasep_allocate_katom(kbase_context *kctx)
        for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
                if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
                        jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
-                       KBASE_LOG(2, kctx->kbdev->dev,
+                       dev_dbg(kctx->kbdev->dev,
                                  "kbasep_allocate_katom: Allocated atom %d\n",
                                                                            i);
                        return i;
@@ -728,7 +728,7 @@ static void kbasep_release_katom(kbase_context *kctx, int atom_id)
 {
        kbase_jd_context *jctx = &kctx->jctx;
 
-       KBASE_LOG(2, kctx->kbdev->dev,
+       dev_dbg(kctx->kbdev->dev,
                                    "kbasep_release_katom: Released atom %d\n",
                                                                      atom_id);
 
@@ -811,7 +811,7 @@ static void payload_dump(kbase_context *kctx, base_jd_replay_payload *payload)
 {
        mali_addr64 next;
 
-       KBASE_LOG(2, kctx->kbdev->dev, "Tiler jc list :\n");
+       dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n");
        next = payload->tiler_jc_list;
 
        while (next) {
@@ -820,7 +820,7 @@ static void payload_dump(kbase_context *kctx, base_jd_replay_payload *payload)
                if (!jc_struct)
                        return;
 
-               KBASE_LOG(2, kctx->kbdev->dev,
+               dev_dbg(kctx->kbdev->dev,
                                          "* jc_struct=%p jc=%llx next=%llx\n",
                                                                     jc_struct,
                                                                 jc_struct->jc,
@@ -856,7 +856,7 @@ static mali_error kbasep_replay_parse_payload(kbase_context *kctx,
        u64 phys_addr;
        struct device *dev = kctx->kbdev->dev;
 
-       KBASE_LOG(2, dev,
+       dev_dbg(dev,
                        "kbasep_replay_parse_payload: replay_atom->jc = %llx  "
                        "sizeof(payload) = %d\n",
                                             replay_atom->jc, sizeof(payload));
@@ -872,8 +872,8 @@ static mali_error kbasep_replay_parse_payload(kbase_context *kctx,
        }
 
 #ifdef CONFIG_MALI_DEBUG
-       KBASE_LOG(2, dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
-       KBASE_LOG(2, dev, "Payload structure:\n"
+       dev_dbg(dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
+       dev_dbg(dev, "Payload structure:\n"
                                        "tiler_jc_list            = %llx\n"
                                        "fragment_jc              = %llx\n"
                                        "tiler_heap_free          = %llx\n"
@@ -968,7 +968,7 @@ static mali_error kbasep_replay_parse_payload(kbase_context *kctx,
                goto out;
        }
 
-       KBASE_LOG(2, dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
+       dev_dbg(dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
                                                       t_atom->jc, f_atom->jc);
        ret = MALI_ERROR_NONE;
 
@@ -1005,12 +1005,12 @@ int kbase_replay_process(kbase_jd_atom *katom)
        struct device *dev = kctx->kbdev->dev;
 
        if (katom->event_code == BASE_JD_EVENT_DONE) {
-               KBASE_LOG(2, dev, "Previous job succeeded - not replaying\n");
+               dev_dbg(dev, "Previous job succeeded - not replaying\n");
                return MALI_REPLAY_STATUS_COMPLETE;
        }
 
        if (jctx->sched_info.ctx.is_dying) {
-               KBASE_LOG(2, dev, "Not replaying; context is dying\n");
+               dev_dbg(dev, "Not replaying; context is dying\n");
                return MALI_REPLAY_STATUS_COMPLETE;
        }
 
index a6c277176c2854896acc17841739239a9a1048a2..84ab305edf35e541f17827120f578772ef8657bd 100755 (executable)
@@ -224,6 +224,7 @@ typedef struct kbase_uk_get_ddk_version {
        char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE];
        u32 version_string_size;
        u32 padding;
+       u32 rk_version;
 } kbase_uk_get_ddk_version;
 
 typedef struct kbase_uk_set_flags {