# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r4p0-02rel0"
+MALI_RELEASE_NAME ?= "r4p1-01dev0"
# Paths required for build
KBASE_PATH = $(src)
/* (Note that PRLAM-9049 also uses this work-around) */
BASE_HW_ISSUE_8564,
+ /* Fragments are clamped instead of discarded when fragment depth bound op is discard and depth datum source is shader. */
+ BASE_HW_ISSUE_8634,
+
/* Livelock issue using atomic instructions (particularly when using atomic_cmpxchg as a spinlock) */
BASE_HW_ISSUE_8791,
/* AFBC is not supported for T76X beta. */
BASE_HW_ISSUE_T76X_2906,
+ /* RTD doesn't specify the row stride for AFBC surfaces. */
+ BASE_HW_ISSUE_T76X_3086,
+
/* Prevent MMU deadlock for T76X beta. */
BASE_HW_ISSUE_T76X_3285,
/* Clear encoder state for a hard stopped fragment job which is AFBC
- * encoded by soft resetting the GPU. Only for T76X r0p0 and r0p1
+ * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and r0p1_50rel0
*/
BASE_HW_ISSUE_T76X_3542,
BASE_HW_ISSUE_8443,
BASE_HW_ISSUE_8456,
BASE_HW_ISSUE_8564,
+ BASE_HW_ISSUE_8634,
BASE_HW_ISSUE_8791,
BASE_HW_ISSUE_8803,
BASE_HW_ISSUE_8833,
BASE_HW_ISSUE_11020,
BASE_HW_ISSUE_11024,
BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_3086,
BASE_HW_ISSUE_T76X_3542,
BASE_HW_ISSUE_T76X_3556,
BASE_HW_ISSUE_T76X_3700,
BASE_HW_ISSUE_11020,
BASE_HW_ISSUE_11024,
BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ /* List of hardware issues must end with BASE_HW_ISSUE_END */
+ BASE_HW_ISSUE_END
+};
+
+/* Mali T76x r0p1_50rel0 */
+static const base_hw_issue base_hw_issues_t76x_r0p1_50rel0[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_T76X_26,
BASE_HW_ISSUE_T76X_3542,
BASE_HW_ISSUE_T76X_3556,
BASE_HW_ISSUE_T76X_3700,
BASE_HW_ISSUE_11020,
BASE_HW_ISSUE_11024,
BASE_HW_ISSUE_T76X_26,
+ BASE_HW_ISSUE_T76X_3086,
+ BASE_HW_ISSUE_T76X_3542,
+ BASE_HW_ISSUE_T76X_3556,
+ BASE_HW_ISSUE_T76X_3700,
+ BASE_HW_ISSUE_T76X_3793,
+ /* List of hardware issues must end with BASE_HW_ISSUE_END */
+ BASE_HW_ISSUE_END
+};
+
+/* Mali T76x r0p3 */
+static const base_hw_issue base_hw_issues_t76x_r0p3[] = {
+ BASE_HW_ISSUE_8803,
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_10649,
+ BASE_HW_ISSUE_10821,
+ BASE_HW_ISSUE_10883,
+ BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_T76X_26,
BASE_HW_ISSUE_T76X_3542,
BASE_HW_ISSUE_T76X_3556,
BASE_HW_ISSUE_T76X_3700,
BASE_HW_ISSUE_10821,
BASE_HW_ISSUE_10883,
BASE_HW_ISSUE_10946,
+ BASE_HW_ISSUE_T76X_3086,
BASE_HW_ISSUE_T76X_3700,
BASE_HW_ISSUE_T76X_3793,
/* List of hardware issues must end with BASE_HW_ISSUE_END */
BASE_HW_ISSUE_5736,
BASE_HW_ISSUE_9275,
BASE_HW_ISSUE_9435,
- BASE_HW_ISSUE_10931,
BASE_HW_ISSUE_11020,
BASE_HW_ISSUE_11024,
+ BASE_HW_ISSUE_T76X_3086,
BASE_HW_ISSUE_T76X_3700,
BASE_HW_ISSUE_T76X_3793,
/* List of hardware issues must end with BASE_HW_ISSUE_END */
/*** Begin Power Manager defaults */
/* Milliseconds */
-#define DEFAULT_PM_DVFS_FREQ 100
+#define DEFAULT_PM_DVFS_FREQ 50
/**
* Default poweroff tick granuality, in nanoseconds
static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
{
- KBASE_LOG(2, reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
+ dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
kbase_mem_phy_alloc_put(reg->alloc);
kfree(reg);
}
#endif /* MALI_UNIT_TEST */
#define KBASE_DRV_NAME "mali"
-
+#define ROCKCHIP_VERSION 1
static const char kbase_drv_name[] = KBASE_DRV_NAME;
static int kbase_dev_nr;
/* version buffer size check is made in compile time assert */
memcpy(get_version->version_buffer, KERNEL_SIDE_DDK_VERSION_STRING, sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
get_version->version_string_size = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
+ get_version->rk_version = ROCKCHIP_VERSION;
break;
}
init_waitqueue_head(&kctx->event_queue);
filp->private_data = kctx;
- KBASE_LOG(1, kbdev->dev, "created base context\n");
+ dev_dbg(kbdev->dev, "created base context\n");
{
kbasep_kctx_list_element *element;
filp->private_data = NULL;
kbase_destroy_context(kctx);
- KBASE_LOG(1, kbdev->dev, "deleted base context\n");
+ dev_dbg(kbdev->dev, "deleted base context\n");
kbase_release_device(kbdev);
return 0;
}
if (!val)
return IRQ_NONE;
- KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbase_job_done(kbdev, val);
if (!val)
return IRQ_NONE;
- KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbase_mmu_interrupt(kbdev, val);
#ifdef CONFIG_MALI_DEBUG
if (!kbdev->pm.driver_ready_for_irqs)
- KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
__func__, irq, val );
#endif /* CONFIG_MALI_DEBUG */
spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
if (!val)
return IRQ_NONE;
- KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbase_gpu_interrupt(kbdev, val);
if (!val)
return IRQ_NONE;
- KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbasep_irq_test_data.triggered = 1;
wake_up(&kbasep_irq_test_data.wait);
if (!val)
return IRQ_NONE;
- KBASE_LOG(3, kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+ dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
kbasep_irq_test_data.triggered = 1;
wake_up(&kbasep_irq_test_data.wait);
dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n", kbdev->irqs[tag].irq, tag);
err = MALI_ERROR_FUNCTION_FAILED;
} else {
- KBASE_LOG(2, kbdev->dev, "Interrupt %d (index %d) reached CPU.\n", kbdev->irqs[tag].irq, tag);
+ dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n", kbdev->irqs[tag].irq, tag);
}
hrtimer_cancel(&kbasep_irq_test_data.timer);
mali_js0_affinity_mask = config->js0_mask;
mali_js1_affinity_mask = config->js1_mask;
mali_js2_affinity_mask = config->js2_mask;
- KBASE_LOG(2, dev, "Setting sc_split: '%s'\n", config->tag);
+ dev_dbg(dev, "Setting sc_split: '%s'\n", config->tag);
return count;
}
config++;
do_div(ticks, kbdev->js_data.scheduling_tick_ns);
kbdev->js_reset_ticks_nss = ticks;
- KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks, js_soft_stop_ms);
- KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks_cl, js_soft_stop_ms_cl);
- KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_ss, js_hard_stop_ms_ss);
- KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_cl, js_hard_stop_ms_cl);
- KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_nss, js_hard_stop_ms_nss);
- KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_ss, js_reset_ms_ss);
- KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_cl, js_reset_ms_cl);
- KBASE_LOG(2, kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_nss, js_reset_ms_nss);
+ dev_dbg( kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks, js_soft_stop_ms);
+ dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_SOFT_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_soft_stop_ticks_cl, js_soft_stop_ms_cl);
+ dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_ss, js_hard_stop_ms_ss);
+ dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_cl, js_hard_stop_ms_cl);
+ dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_hard_stop_ticks_nss, js_hard_stop_ms_nss);
+ dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_ss, js_reset_ms_ss);
+ dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_cl, js_reset_ms_cl);
+ dev_dbg(kbdev->dev, "Overriding KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS with %lu ticks (%lu ms)\n", (unsigned long)kbdev->js_reset_ticks_nss, js_reset_ms_nss);
return count;
} else {
items = sscanf(buf, "%d", &softstop_always);
if ((items == 1) && ((softstop_always == 0) || (softstop_always == 1))) {
kbdev->js_data.softstop_always = (mali_bool) softstop_always;
-
- KBASE_LOG(2, kbdev->dev, "Support for softstop on a single context: %s\n", (kbdev->js_data.softstop_always == MALI_FALSE) ? "Disabled" : "Enabled");
+ dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n", (kbdev->js_data.softstop_always == MALI_FALSE) ? "Disabled" : "Enabled");
return count;
} else {
dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\nUse format " "<soft_stop_always>\n");
#ifdef CONFIG_MALI_TRACE_TIMELINE
inited_timeline = (1u << 12),
#endif /* CONFIG_MALI_TRACE_LINE */
+ inited_pm_powerup = (1u << 14),
};
int inited = 0;
mali_err = kbase_pm_powerup(kbdev);
if (MALI_ERROR_NONE == mali_err) {
+ inited |= inited_pm_powerup;
#ifdef CONFIG_MALI_DEBUG
#ifndef CONFIG_MALI_NO_MALI
if (MALI_ERROR_NONE != kbasep_common_test_interrupt_handlers(kbdev)) {
mutex_init(&kbdev->kctx_list_lock);
INIT_LIST_HEAD(&kbdev->kctx_list);
return 0;
+ } else {
+ /* Failed to power up the GPU. */
+ dev_err(kbdev->dev, "GPU power up failed.\n");
+ err = -ENODEV;
}
out_partial:
if (inited & inited_mem)
kbase_mem_halt(kbdev);
- if (inited & inited_pm)
+ if (inited & inited_pm_powerup)
kbase_pm_halt(kbdev);
if (inited & inited_irqs)
#endif /* CONFIG_MALI_DEBUG */
#if MALI_CUSTOMER_RELEASE == 0
device_remove_file(kbdev->dev, &dev_attr_js_timeouts);
+ device_remove_file(kbdev->dev, &dev_attr_force_replay);
#endif /* MALI_CUSTOMER_RELEASE */
#ifdef CONFIG_DEBUG_FS
kbasep_gpu_memory_debugfs_term(kbdev);
if (kbdev->pm.callback_power_runtime_off) {
kbdev->pm.callback_power_runtime_off(kbdev);
- KBASE_LOG(1, dev, "runtime suspend\n");
+ dev_dbg(dev, "runtime suspend\n");
}
return 0;
}
if (kbdev->pm.callback_power_runtime_on) {
ret = kbdev->pm.callback_power_runtime_on(kbdev);
- KBASE_LOG(1, dev, "runtime resume\n");
+ dev_dbg(dev, "runtime resume\n");
}
return ret;
}
KBASE_DEBUG_ASSERT(kbdev->pm.gpu_powered);
KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
- KBASE_LOG(4, kbdev->dev, "w: reg %04x val %08x", offset, value);
+ dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value);
kbase_os_reg_write(kbdev, offset, value);
if (kctx && kctx->jctx.tb)
kbase_device_trace_register_access(kctx, REG_WRITE, offset, value);
KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
val = kbase_os_reg_read(kbdev, offset);
- KBASE_LOG(4, kbdev->dev, "r: reg %04x val %08x", offset, val);
+ dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val);
if (kctx && kctx->jctx.tb)
kbase_device_trace_register_access(kctx, REG_READ, offset, val);
return val;
char buffer[DEBUG_MESSAGE_SIZE];
kbasep_trace_format_msg(trace_msg, buffer, DEBUG_MESSAGE_SIZE);
- KBASE_LOG(1, kbdev->dev, "%s", buffer);
+ dev_dbg(kbdev->dev, "%s", buffer);
}
void kbasep_trace_add(kbase_device *kbdev, kbase_trace_code code, void *ctx, kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val)
u32 start;
u32 end;
- KBASE_LOG(1, kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
+ dev_dbg(kbdev->dev, "Dumping trace:\nsecs,nthread,cpu,code,ctx,katom,gpu_addr,jobslot,refcount,info_val");
spin_lock_irqsave(&kbdev->trace_lock, flags);
start = kbdev->trace_first_out;
end = kbdev->trace_next_in;
start = (start + 1) & KBASE_TRACE_MASK;
}
- KBASE_LOG(1, kbdev->dev, "TRACE_END");
+ dev_dbg(kbdev->dev, "TRACE_END");
spin_unlock_irqrestore(&kbdev->trace_lock, flags);
mutex_unlock(&ctx->event_mutex);
uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
memset(&uevent->udata, 0, sizeof(uevent->udata));
- KBASE_LOG(2, ctx->kbdev->dev,
+ dev_dbg(ctx->kbdev->dev,
"event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
BASE_JD_EVENT_DRV_TERMINATED);
return 0;
mutex_unlock(&ctx->event_mutex);
- KBASE_LOG(2, ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
+ dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
uevent->event_code = atom->event_code;
uevent->atom_number = (atom - ctx->jctx.atoms);
uevent->udata = kbase_event_process(ctx, atom);
issues = base_hw_issues_t76x_r0p1;
break;
case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 1, 9):
- /* TODO: MIDBASE-3084 - confirm hw issue list */
- issues = base_hw_issues_t76x_r0p1;
+ issues = base_hw_issues_t76x_r0p1_50rel0;
break;
case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 2, 1):
issues = base_hw_issues_t76x_r0p2;
break;
case GPU_ID_MAKE(GPU_ID_PI_T76X, 0, 3, 1):
- /* TODO: MIDBASE-3086 - confirm hw issue list */
- issues = base_hw_issues_t76x_r0p2;
+ issues = base_hw_issues_t76x_r0p3;
break;
case GPU_ID_MAKE(GPU_ID_PI_T76X, 1, 0, 0):
issues = base_hw_issues_t76x_r1p0;
err = MALI_ERROR_NONE;
- KBASE_LOG(1, kbdev->dev, "HW counters dumping set-up for context %p", kctx);
+ dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
return err;
out_unrequest_cores:
kbase_pm_unrequest_cores(kbdev, MALI_TRUE, shader_cores_needed);
/* Also release our Power Manager Active reference */
kbase_pm_context_idle(kbdev);
- KBASE_LOG(1, kbdev->dev, "HW counters dumping disabled for context %p", kctx);
+ dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p", kctx);
err = MALI_ERROR_NONE;
KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL, kbdev->hwcnt.addr, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_PRFCNT_SAMPLE, kctx);
- KBASE_LOG(1, kbdev->dev, "HW counters dumping done for context %p", kctx);
+ dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
err = MALI_ERROR_NONE;
#endif /* CONFIG_UMP */
#include <linux/random.h>
-#define beenthere(kctx,f, a...) KBASE_LOG(1, kctx->kbdev->dev, "%s:" f, __func__, ##a)
+#define beenthere(kctx,f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
/* random32 was renamed to prandom_u32 in 3.8 */
if (kbdev->gpu_props.num_core_groups > 1 &&
!(katom->affinity & kbdev->gpu_props.props.coherency_info.group[0].core_mask) &&
(katom->affinity & kbdev->gpu_props.props.coherency_info.group[1].core_mask)) {
- KBASE_LOG(2, kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
+ dev_dbg(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
kbasep_jd_cacheclean(kbdev);
}
}
katom->event_code == BASE_JD_EVENT_TILE_RANGE_FAULT &&
(katom->atom_flags & KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED) &&
!(katom->atom_flags & KBASE_KATOM_FLAGS_RERUN)){
- KBASE_LOG(2, kbdev->dev,
+ dev_dbg(kbdev->dev,
"Soft-stopped fragment shader job got a TILE_RANGE_FAULT." \
"Possible HW issue, trying SW workaround\n" );
if (kbasep_10969_workaround_clamp_coordinates(katom)){
* Due to an HW issue we try to execute the job
* again.
*/
- KBASE_LOG(2, kbdev->dev, "Clamping has been executed, try to rerun the job\n" );
+ dev_dbg(kbdev->dev, "Clamping has been executed, try to rerun the job\n" );
katom->event_code = BASE_JD_EVENT_STOPPED;
katom->atom_flags |= KBASE_KATOM_FLAGS_RERUN;
if (!kbasep_js_has_atom_finished(&katom_retained_state)) {
unsigned long flags;
/* Requeue the atom on soft-stop / removed from NEXT registers */
- KBASE_LOG(2, kbdev->dev, "JS: Soft Stopped/Removed from next on Ctx %p; Requeuing", kctx);
+ dev_dbg(kbdev->dev, "JS: Soft Stopped/Removed from next on Ctx %p; Requeuing", kctx);
mutex_lock(&js_devdata->runpool_mutex);
kbasep_js_clear_job_retry_submit(katom);
}
destroy_hrtimer_on_stack(&reset_data.timer);
- KBASE_LOG(1, kbdev->dev, "Zap: Finished Context %p", kctx);
+ dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
/* Ensure that the signallers of the waitqs have finished */
mutex_lock(&kctx->jctx.lock);
#include "mali_kbase_jm.h"
-#define beenthere(kctx, f, a...) KBASE_LOG(1, kctx->kbdev->dev, "%s:" f, __func__, ##a)
+#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
u64 mali_js0_affinity_mask = 0xFFFFFFFFFFFFFFFFULL;
katom->start_timestamp = ktime_get();
/* GO ! */
- KBASE_LOG(2, kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx", katom, kctx, js, jc_head, katom->affinity);
+ dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx", katom, kctx, js, jc_head, katom->affinity);
KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js, (u32) katom->affinity);
}
}
- KBASE_LOG(2, kbdev->dev, "Job ended with status 0x%08X\n", completion_code);
+ dev_dbg(kbdev->dev, "Job ended with status 0x%08X\n", completion_code);
nr_done = kbasep_jm_nr_jobs_submitted(slot);
nr_done -= (active >> i) & 1;
mali_bool soft_stop_allowed = kbasep_soft_stop_allowed(kbdev, core_reqs);
if (!soft_stop_allowed) {
#ifdef CONFIG_MALI_DEBUG
- KBASE_LOG(2, kbdev->dev, "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X", (unsigned int)core_reqs);
+ dev_dbg(kbdev->dev, "Attempt made to soft-stop a job that cannot be soft-stopped. core_reqs = 0x%X", (unsigned int)core_reqs);
#endif /* CONFIG_MALI_DEBUG */
return;
}
mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
js_kctx_info->ctx.is_dying = MALI_TRUE;
- KBASE_LOG(1, kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
+ dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %p", kctx);
mutex_lock(&js_devdata->queue_mutex);
evict_success = kbasep_js_policy_try_evict_ctx(&js_devdata->policy, kctx);
mutex_unlock(&js_devdata->queue_mutex);
KBASE_TRACE_ADD(kbdev, JM_ZAP_NON_SCHEDULED, kctx, NULL, 0u, js_kctx_info->ctx.is_scheduled);
- KBASE_LOG(2, kbdev->dev, "Zap: Ctx %p evict_success=%d, scheduled=%d", kctx, evict_success, js_kctx_info->ctx.is_scheduled);
+ dev_dbg(kbdev->dev, "Zap: Ctx %p evict_success=%d, scheduled=%d", kctx, evict_success, js_kctx_info->ctx.is_scheduled);
if (evict_success != MALI_FALSE) {
/* Only cancel jobs when we evicted from the policy queue. No Power
mali_bool was_retained;
/* Case c: didn't evict, but it is scheduled - it's in the Run Pool */
KBASE_TRACE_ADD(kbdev, JM_ZAP_SCHEDULED, kctx, NULL, 0u, js_kctx_info->ctx.is_scheduled);
- KBASE_LOG(2, kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
+ dev_dbg(kbdev->dev, "Zap: Ctx %p is in RunPool", kctx);
/* Disable the ctx from submitting any more jobs */
spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
/* Since it's scheduled and we have the jsctx_mutex, it must be retained successfully */
KBASE_DEBUG_ASSERT(was_retained != MALI_FALSE);
- KBASE_LOG(2, kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
+ dev_dbg(kbdev->dev, "Zap: Ctx %p Kill Any Running jobs", kctx);
/* Cancel any remaining running jobs for this kctx - if any. Submit is disallowed
* which takes effect immediately, so no more new jobs will appear after we do this. */
for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
- KBASE_LOG(2, kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)", kctx);
+ dev_dbg(kbdev->dev, "Zap: Ctx %p Release (may or may not schedule out immediately)", kctx);
kbasep_js_runpool_release_ctx(kbdev, kctx);
}
KBASE_TRACE_ADD(kbdev, JM_ZAP_DONE, kctx, NULL, 0u, 0u);
slot->submitted_head = (slot->submitted_head + 1) & BASE_JM_SUBMIT_SLOTS_MASK;
slot->submitted_nr--;
- KBASE_LOG(2, katom->kctx->kbdev->dev, "katom %p new head %u", (void *)katom, (unsigned int)slot->submitted_head);
+ dev_dbg(katom->kctx->kbdev->dev, "katom %p new head %u", (void *)katom, (unsigned int)slot->submitted_head);
return katom;
}
mali_bool use_workaround_for_security;
use_workaround_for_security = (mali_bool) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_SECURE_BUT_LOSS_OF_PERFORMANCE);
if (use_workaround_for_security != MALI_FALSE) {
- KBASE_LOG(2, kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
+ dev_dbg(kbdev->dev, "GPU has HW ISSUE 8987, and driver configured for security workaround: 1 address space only");
kbdev->nr_user_address_spaces = 1;
}
}
js_devdata->cfs_ctx_runtime_init_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_INIT_SLICES);
js_devdata->cfs_ctx_runtime_min_slices = (u32) kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_JS_CFS_CTX_RUNTIME_MIN_SLICES);
- KBASE_LOG(2, kbdev->dev, "JS Config Attribs: ");
- KBASE_LOG(2, kbdev->dev, "\tscheduling_tick_ns:%u", js_devdata->scheduling_tick_ns);
- KBASE_LOG(2, kbdev->dev, "\tsoft_stop_ticks:%u", js_devdata->soft_stop_ticks);
- KBASE_LOG(2, kbdev->dev, "\tsoft_stop_ticks_cl:%u", js_devdata->soft_stop_ticks_cl);
- KBASE_LOG(2, kbdev->dev, "\thard_stop_ticks_ss:%u", js_devdata->hard_stop_ticks_ss);
- KBASE_LOG(2, kbdev->dev, "\thard_stop_ticks_cl:%u", js_devdata->hard_stop_ticks_cl);
- KBASE_LOG(2, kbdev->dev, "\thard_stop_ticks_nss:%u", js_devdata->hard_stop_ticks_nss);
- KBASE_LOG(2, kbdev->dev, "\tgpu_reset_ticks_ss:%u", js_devdata->gpu_reset_ticks_ss);
- KBASE_LOG(2, kbdev->dev, "\tgpu_reset_ticks_cl:%u", js_devdata->gpu_reset_ticks_cl);
- KBASE_LOG(2, kbdev->dev, "\tgpu_reset_ticks_nss:%u", js_devdata->gpu_reset_ticks_nss);
- KBASE_LOG(2, kbdev->dev, "\tctx_timeslice_ns:%u", js_devdata->ctx_timeslice_ns);
- KBASE_LOG(2, kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u", js_devdata->cfs_ctx_runtime_init_slices);
- KBASE_LOG(2, kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u", js_devdata->cfs_ctx_runtime_min_slices);
+ dev_dbg(kbdev->dev, "JS Config Attribs: ");
+ dev_dbg(kbdev->dev, "\tscheduling_tick_ns:%u", js_devdata->scheduling_tick_ns);
+ dev_dbg(kbdev->dev, "\tsoft_stop_ticks:%u", js_devdata->soft_stop_ticks);
+ dev_dbg(kbdev->dev, "\tsoft_stop_ticks_cl:%u", js_devdata->soft_stop_ticks_cl);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_ss:%u", js_devdata->hard_stop_ticks_ss);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_cl:%u", js_devdata->hard_stop_ticks_cl);
+ dev_dbg(kbdev->dev, "\thard_stop_ticks_nss:%u", js_devdata->hard_stop_ticks_nss);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_ss:%u", js_devdata->gpu_reset_ticks_ss);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_cl:%u", js_devdata->gpu_reset_ticks_cl);
+ dev_dbg(kbdev->dev, "\tgpu_reset_ticks_nss:%u", js_devdata->gpu_reset_ticks_nss);
+ dev_dbg(kbdev->dev, "\tctx_timeslice_ns:%u", js_devdata->ctx_timeslice_ns);
+ dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_init_slices:%u", js_devdata->cfs_ctx_runtime_init_slices);
+ dev_dbg(kbdev->dev, "\tcfs_ctx_runtime_min_slices:%u", js_devdata->cfs_ctx_runtime_min_slices);
#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0
- KBASE_LOG(2, kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns);
+ dev_dbg(kbdev->dev, "Job Scheduling Policy Soft-stops disabled, ignoring value for soft_stop_ticks==%u at %uns per tick. Other soft-stops may still occur.", js_devdata->soft_stop_ticks, js_devdata->scheduling_tick_ns);
#endif
#if KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
- KBASE_LOG(2, kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%u at %uns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns);
+ dev_dbg(kbdev->dev, "Job Scheduling Policy Hard-stops disabled, ignoring values for hard_stop_ticks_ss==%d and hard_stop_ticks_nss==%u at %uns per tick. Other hard-stops may still occur.", js_devdata->hard_stop_ticks_ss, js_devdata->hard_stop_ticks_nss, js_devdata->scheduling_tick_ns);
#endif
#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS != 0 && KBASE_DISABLE_SCHEDULING_HARD_STOPS != 0
- KBASE_LOG(2, kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
+ dev_dbg(kbdev->dev, "Note: The JS policy's tick timer (if coded) will still be run, but do nothing.");
#endif
/* setup the number of irq throttle cycles base on given time */
} else if (js_kctx_info->ctx.nr_jobs == 1) {
/* Handle Refcount going from 0 to 1: schedule the context on the Policy Queue */
KBASE_DEBUG_ASSERT(js_kctx_info->ctx.is_scheduled == MALI_FALSE);
- KBASE_LOG(1, kbdev->dev, "JS: Enqueue Context %p", kctx);
+ dev_dbg(kbdev->dev, "JS: Enqueue Context %p", kctx);
mutex_lock(&js_devdata->queue_mutex);
kbasep_js_policy_enqueue_ctx(js_policy, kctx);
/* Make a set of checks to see if the context should be scheduled out */
if (new_ref_count == 0 && (kctx->jctx.sched_info.ctx.nr_jobs == 0 || kbasep_js_is_submit_allowed(js_devdata, kctx) == MALI_FALSE)) {
/* Last reference, and we've been told to remove this context from the Run Pool */
- KBASE_LOG(2, kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d", kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, kbasep_js_is_submit_allowed(js_devdata, kctx));
+ dev_dbg(kbdev->dev, "JS: RunPool Remove Context %p because as_busy_refcount=%d, jobs=%d, allowed=%d", kctx, new_ref_count, js_kctx_info->ctx.nr_jobs, kbasep_js_is_submit_allowed(js_devdata, kctx));
kbasep_js_policy_runpool_remove_ctx(js_policy, kctx);
if (js_kctx_info->ctx.is_dying != MALI_FALSE) {
/* Dying: don't requeue, but kill all jobs on the context. This happens
* asynchronously */
- KBASE_LOG(2, kbdev->dev, "JS: ** Killing Context %p on RunPool Remove **", kctx);
+ dev_dbg(kbdev->dev, "JS: ** Killing Context %p on RunPool Remove **", kctx);
kbasep_js_policy_foreach_ctx_job(js_policy, kctx, &kbase_jd_cancel, MALI_TRUE);
} else if (js_kctx_info->ctx.nr_jobs > 0) {
/* Not dying, has jobs: de-ref core counts from each job before addding
* back to the queue */
kbasep_js_policy_foreach_ctx_job(js_policy, kctx, &kbasep_js_job_check_deref_cores, MALI_FALSE);
- KBASE_LOG(1, kbdev->dev, "JS: Requeue Context %p", kctx);
+ dev_dbg(kbdev->dev, "JS: Requeue Context %p", kctx);
mutex_lock(&js_devdata->queue_mutex);
kbasep_js_policy_enqueue_ctx(js_policy, kctx);
mutex_unlock(&js_devdata->queue_mutex);
} else {
/* Not dying, no jobs: don't add back to the queue */
- KBASE_LOG(1, kbdev->dev, "JS: Idling Context %p (not requeued)", kctx);
+ dev_dbg(kbdev->dev, "JS: Idling Context %p (not requeued)", kctx);
}
if (has_pm_ref) {
}
js_kctx_info = &head_kctx->jctx.sched_info;
- KBASE_LOG(1, kbdev->dev, "JS: Dequeue Context %p", head_kctx);
+ dev_dbg(kbdev->dev, "JS: Dequeue Context %p", head_kctx);
pm_active_err = kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE);
set_bit = (u16) (1u << kctx->as_nr);
- KBASE_LOG(3, kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+ dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
js_devdata->runpool_irq.submit_allowed |= set_bit;
}
clear_bit = (u16) (1u << kctx->as_nr);
clear_mask = ~clear_bit;
- KBASE_LOG(3, kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+ dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
js_devdata->runpool_irq.submit_allowed &= clear_mask;
}
static const kbasep_atom_req core_req_variants[] = {
{
/* 0: Fragment variant */
- (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_COHERENT_GROUP),
+ (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_FS_AFBC |
+ BASE_JD_REQ_COHERENT_GROUP),
(JS_CTX_REQ_ALL_OTHERS),
0},
{
/* Job has been scheduled for at least js_devdata->soft_stop_ticks ticks.
* Soft stop the slot so we can run other jobs.
*/
- KBASE_LOG(1, kbdev->dev, "Soft-stop");
+ dev_dbg(kbdev->dev, "Soft-stop");
#if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
kbase_job_slot_softstop(kbdev, s, atom);
/* Job has been scheduled for at least js_devdata->soft_stop_ticks.
* We do not soft-stop during CINSTR_DUMPING_ENABLED, however.
*/
- KBASE_LOG(1, kbdev->dev, "Soft-stop");
+ dev_dbg(kbdev->dev, "Soft-stop");
} else if (ticks == js_devdata->hard_stop_ticks_nss) {
/* Job has been scheduled for at least js_devdata->hard_stop_ticks_nss ticks.
* Hard stop the slot.
u32 *tb;
int owns_tb = 1;
- KBASE_LOG(1, kctx->kbdev->dev, "in %s\n", __func__);
+ dev_dbg(kctx->kbdev->dev, "in %s\n", __func__);
size = (vma->vm_end - vma->vm_start);
nr_pages = size >> PAGE_SHIFT;
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
/* the rest of the flags is added by the cpu_mmap handler */
- KBASE_LOG(1, kctx->kbdev->dev, "%s done\n", __func__);
+ dev_dbg(kctx->kbdev->dev, "%s done\n", __func__);
return 0;
out_no_va_region:
size_t size;
int err = 0;
- KBASE_LOG(1, kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
+ dev_dbg(kctx->kbdev->dev, "in kbase_mmu_dump_mmap\n");
size = (vma->vm_end - vma->vm_start);
nr_pages = size >> PAGE_SHIFT;
*kmap_addr = kaddr;
*reg = new_reg;
- KBASE_LOG(1, kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
+ dev_dbg(kctx->kbdev->dev, "kbase_mmu_dump_mmap done\n");
return 0;
out_no_alloc:
int free_on_close = 0;
struct device *dev = kctx->kbdev->dev;
- KBASE_LOG(1, dev, "kbase_mmap\n");
+ dev_dbg(dev, "kbase_mmap\n");
nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
/* strip away corresponding VM_MAY% flags to the VM_% flags requested */
err = kbase_trace_buffer_mmap(kctx, vma, ®, &kaddr);
if (0 != err)
goto out_unlock;
- KBASE_LOG(1, dev, "kbase_trace_buffer_mmap ok\n");
+ dev_dbg(dev, "kbase_trace_buffer_mmap ok\n");
/* free the region on munmap */
free_on_close = 1;
goto map;
#include <mali_kbase_gator.h>
#include <mali_kbase_debug.h>
-#define beenthere(kctx, f, a...) KBASE_LOG(1, kctx->kbdev->dev, "%s:" f, __func__, ##a)
+#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
#include <mali_kbase_defs.h>
#include <mali_kbase_hw.h>
reset_required = MALI_TRUE;
}
+ spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
+ kbdev->pm.gpu_powered = MALI_TRUE;
+ spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
+
if (reset_required) {
/* GPU state was lost, reset GPU to ensure it is in a
* consistent state */
kbase_pm_init_hw(kbdev, MALI_TRUE);
}
- spin_lock_irqsave(&kbdev->pm.gpu_powered_lock, flags);
- kbdev->pm.gpu_powered = MALI_TRUE;
- spin_unlock_irqrestore(&kbdev->pm.gpu_powered_lock, flags);
-
/* Lastly, enable the interrupts */
kbase_pm_enable_interrupts(kbdev);
}
#ifdef CONFIG_MALI_DEBUG
struct device *dev = kctx->kbdev->dev;
- KBASE_LOG(2, dev, "%s\n", head_str);
- KBASE_LOG(2, dev, "addr = %p\n"
+ dev_dbg(dev, "%s\n", head_str);
+ dev_dbg(dev, "addr = %p\n"
"status = %x\n"
"not_complete_index = %x\n"
"fault_addr = %llx\n"
job->dependencies[1]);
if (job->flags & JOB_FLAG_DESC_SIZE)
- KBASE_LOG(2, dev, "next = %llx\n", job->next._64);
+ dev_dbg(dev, "next = %llx\n", job->next._64);
else
- KBASE_LOG(2, dev, "next = %x\n", job->next._32);
+ dev_dbg(dev, "next = %x\n", job->next._32);
#endif
}
} *fbd_tiler;
struct device *dev = kctx->kbdev->dev;
- KBASE_LOG(2, dev, "fbd_address: %llx\n", fbd_address);
+ dev_dbg(dev, "fbd_address: %llx\n", fbd_address);
fbd_tiler = kbasep_map_page_sync(kctx, fbd_address + SFBD_TILER_OFFSET,
&phys_addr);
return MALI_ERROR_FUNCTION_FAILED;
}
#ifdef CONFIG_MALI_DEBUG
- KBASE_LOG(2, dev, "FBD tiler:\n"
+ dev_dbg(dev, "FBD tiler:\n"
"flags = %x\n"
"heap_free_address = %llx\n",
fbd_tiler->flags,
}
- KBASE_LOG(2, dev,
+ dev_dbg(dev,
"Old hierarchy mask=%x New hierarchy mask=%x\n",
old_hierarchy_mask, hierarchy_mask);
for (i = 0; i < HIERARCHY_WEIGHTS; i++)
- KBASE_LOG(2, dev, " Hierarchy weight %02d: %08x\n",
+ dev_dbg(dev, " Hierarchy weight %02d: %08x\n",
i, weights[i]);
j = 0;
if (hierarchy_mask & (1 << i)) {
KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
- KBASE_LOG(2, dev,
+ dev_dbg(dev,
" Writing hierarchy level %02d (%08x) to %d\n",
i, weights[i], j);
fbd_tiler->heap_free_address = tiler_heap_free;
- KBASE_LOG(2, dev, "heap_free_address=%llx flags=%x\n",
+ dev_dbg(dev, "heap_free_address=%llx flags=%x\n",
fbd_tiler->heap_free_address, fbd_tiler->flags);
kbasep_unmap_page_sync(fbd_tiler, phys_addr);
mali_bool flags_different_page;
struct device *dev = kctx->kbdev->dev;
- KBASE_LOG(2, dev, "fbd_address: %llx\n", fbd_address);
+ dev_dbg(dev, "fbd_address: %llx\n", fbd_address);
fbd_tiler = kbasep_map_page_sync(kctx, fbd_address + MFBD_TILER_OFFSET,
&phys_addr);
return MALI_ERROR_FUNCTION_FAILED;
}
#ifdef CONFIG_MALI_DEBUG
- KBASE_LOG(2, dev, "FBD tiler:\n"
+ dev_dbg(dev, "FBD tiler:\n"
"heap_free_address = %llx\n",
fbd_tiler->heap_free_address);
#endif
}
- KBASE_LOG(2, dev,
+ dev_dbg(dev,
"Old hierarchy mask=%x New hierarchy mask=%x\n",
old_hierarchy_mask, hierarchy_mask);
for (i = 0; i < HIERARCHY_WEIGHTS; i++)
- KBASE_LOG(2, dev, " Hierarchy weight %02d: %08x\n",
+ dev_dbg(dev, " Hierarchy weight %02d: %08x\n",
i, weights[i]);
j = 0;
if (hierarchy_mask & (1 << i)) {
KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
- KBASE_LOG(2, dev,
+ dev_dbg(dev,
" Writing hierarchy level %02d (%08x) to %d\n",
i, weights[i], j);
job_head *job;
u64 phys_addr;
- KBASE_LOG(2, kctx->kbdev->dev,
+ dev_dbg(kctx->kbdev->dev,
"kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
job = kbasep_map_page_sync(kctx, jc, &phys_addr);
mali_bool first_in_chain = MALI_TRUE;
int nr_jobs = 0;
- KBASE_LOG(2, kctx->kbdev->dev,
+ dev_dbg(kctx->kbdev->dev,
"kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
jc, hw_job_id_offset);
while (jc) {
- KBASE_LOG(2, kctx->kbdev->dev,
+ dev_dbg(kctx->kbdev->dev,
"kbasep_replay_parse_jc: parsing jc=%llx\n",
jc);
for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
- KBASE_LOG(2, kctx->kbdev->dev,
+ dev_dbg(kctx->kbdev->dev,
"kbasep_allocate_katom: Allocated atom %d\n",
i);
return i;
{
kbase_jd_context *jctx = &kctx->jctx;
- KBASE_LOG(2, kctx->kbdev->dev,
+ dev_dbg(kctx->kbdev->dev,
"kbasep_release_katom: Released atom %d\n",
atom_id);
{
mali_addr64 next;
- KBASE_LOG(2, kctx->kbdev->dev, "Tiler jc list :\n");
+ dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n");
next = payload->tiler_jc_list;
while (next) {
if (!jc_struct)
return;
- KBASE_LOG(2, kctx->kbdev->dev,
+ dev_dbg(kctx->kbdev->dev,
"* jc_struct=%p jc=%llx next=%llx\n",
jc_struct,
jc_struct->jc,
u64 phys_addr;
struct device *dev = kctx->kbdev->dev;
- KBASE_LOG(2, dev,
+ dev_dbg(dev,
"kbasep_replay_parse_payload: replay_atom->jc = %llx "
"sizeof(payload) = %d\n",
replay_atom->jc, sizeof(payload));
}
#ifdef CONFIG_MALI_DEBUG
- KBASE_LOG(2, dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
- KBASE_LOG(2, dev, "Payload structure:\n"
+ dev_dbg(dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
+ dev_dbg(dev, "Payload structure:\n"
"tiler_jc_list = %llx\n"
"fragment_jc = %llx\n"
"tiler_heap_free = %llx\n"
goto out;
}
- KBASE_LOG(2, dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
+ dev_dbg(dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
t_atom->jc, f_atom->jc);
ret = MALI_ERROR_NONE;
struct device *dev = kctx->kbdev->dev;
if (katom->event_code == BASE_JD_EVENT_DONE) {
- KBASE_LOG(2, dev, "Previous job succeeded - not replaying\n");
+ dev_dbg(dev, "Previous job succeeded - not replaying\n");
return MALI_REPLAY_STATUS_COMPLETE;
}
if (jctx->sched_info.ctx.is_dying) {
- KBASE_LOG(2, dev, "Not replaying; context is dying\n");
+ dev_dbg(dev, "Not replaying; context is dying\n");
return MALI_REPLAY_STATUS_COMPLETE;
}
char version_buffer[KBASE_GET_VERSION_BUFFER_SIZE];
u32 version_string_size;
u32 padding;
+ u32 rk_version;
} kbase_uk_get_ddk_version;
typedef struct kbase_uk_set_flags {