/* This Bridge driver's device context: */
struct bridge_dev_context {
struct dev_object *hdev_obj; /* Handle to Bridge device object. */
- u32 dw_dsp_base_addr; /* Arm's API to DSP virt base addr */
+ u32 dsp_base_addr; /* Arm's API to DSP virt base addr */
/*
* DSP External memory prog address as seen virtually by the OS on
* the host side.
*/
- u32 dw_dsp_ext_base_addr; /* See the comment above */
- u32 dw_api_reg_base; /* API mem map'd registers */
+ u32 dsp_ext_base_addr; /* See the comment above */
+ u32 api_reg_base; /* API mem map'd registers */
void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
- u32 dw_api_clk_base; /* CLK Registers */
- u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
+ u32 api_clk_base; /* CLK Registers */
+ u32 dsp_clk_m2_base; /* DSP Clock Module m2 */
u32 dw_public_rhea; /* Pub Rhea */
u32 dw_int_addr; /* MB INTR reg */
u32 dw_tc_endianism; /* TC Endianism register */
u32 dw_test_base; /* DSP MMU Mapped registers */
u32 dw_self_loop; /* Pointer to the selfloop */
- u32 dw_dsp_start_add; /* API Boot vector */
+ u32 dsp_start_add; /* API Boot vector */
u32 dw_internal_size; /* Internal memory size */
struct omap_mbox *mbox; /* Mail box handle */
*/
/* DMMU TLB entries */
struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB];
- u32 dw_brd_state; /* Last known board state. */
+ u32 brd_state; /* Last known board state. */
/* TC Settings */
bool tc_word_swap_on; /* Traffic Controller Word Swap */
chnl_packet_obj->byte_size = byte_size;
chnl_packet_obj->buf_size = buf_size;
/* Only valid for output channel */
- chnl_packet_obj->dw_arg = dw_arg;
+ chnl_packet_obj->arg = dw_arg;
chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
CHNL_IOCSTATCOMPLETE);
list_add_tail(&chnl_packet_obj->link, &pchnl->pio_requests);
ioc.pbuf = chnl_packet_obj->host_user_buf;
ioc.byte_size = chnl_packet_obj->byte_size;
ioc.buf_size = chnl_packet_obj->buf_size;
- ioc.dw_arg = chnl_packet_obj->dw_arg;
+ ioc.arg = chnl_packet_obj->arg;
ioc.status |= chnl_packet_obj->status;
/* Place the used chirp on the free list: */
list_add_tail(&chnl_packet_obj->link,
} else {
ioc.pbuf = NULL;
ioc.byte_size = 0;
- ioc.dw_arg = 0;
+ ioc.arg = 0;
ioc.buf_size = 0;
}
/* Ensure invariant: If any IOC's are queued for this channel... */
pio_mgr->input, bytes);
pchnl->bytes_moved += bytes;
chnl_packet_obj->byte_size = bytes;
- chnl_packet_obj->dw_arg = dw_arg;
+ chnl_packet_obj->arg = dw_arg;
chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
if (bytes == 0) {
msg_input = pio_mgr->msg_input;
for (i = 0; i < num_msgs; i++) {
/* Read the next message */
- addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd);
- msg.msg.dw_cmd =
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd);
+ msg.msg.cmd =
read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
- addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1);
- msg.msg.dw_arg1 =
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1);
+ msg.msg.arg1 =
read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
- addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2);
- msg.msg.dw_arg2 =
+ addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2);
+ msg.msg.arg2 =
read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
msg.msgq_id =
msg_input += sizeof(struct msg_dspmsg);
/* Determine which queue to put the message in */
- dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
- "dw_arg2=0x%x msgq_id=0x%x\n", msg.msg.dw_cmd,
- msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
+ dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x "
+ "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd,
+ msg.msg.arg1, msg.msg.arg2, msg.msgq_id);
/*
* Interrupt may occur before shared memory and message
* input locations have been set up. If all nodes were
if (msg.msgq_id != msg_queue_obj->msgq_id)
continue;
/* Found it */
- if (msg.msg.dw_cmd == RMS_EXITACK) {
+ if (msg.msg.cmd == RMS_EXITACK) {
/*
* Call the node exit notification.
* The exit message does not get
* queued.
*/
(*hmsg_mgr->on_exit)(msg_queue_obj->arg,
- msg.msg.dw_arg1);
+ msg.msg.arg1);
break;
}
/*
chnl_packet_obj->byte_size);
pchnl->bytes_moved += chnl_packet_obj->byte_size;
/* Write all 32 bits of arg */
- sm->arg = chnl_packet_obj->dw_arg;
+ sm->arg = chnl_packet_obj->arg;
#if _CHNL_WORDSIZE == 2
/* Access can be different SM access word size (e.g. 16/32 bit words) */
sm->output_id = (u16) chnl_id;
addr = (u32) &msg_output->msgq_id;
write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val);
- val = (pmsg->msg_data).msg.dw_cmd;
- addr = (u32) &msg_output->msg.dw_cmd;
+ val = (pmsg->msg_data).msg.cmd;
+ addr = (u32) &msg_output->msg.cmd;
write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val);
- val = (pmsg->msg_data).msg.dw_arg1;
- addr = (u32) &msg_output->msg.dw_arg1;
+ val = (pmsg->msg_data).msg.arg1;
+ addr = (u32) &msg_output->msg.arg1;
write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val);
- val = (pmsg->msg_data).msg.dw_arg2;
- addr = (u32) &msg_output->msg.dw_arg2;
+ val = (pmsg->msg_data).msg.arg2;
+ addr = (u32) &msg_output->msg.arg2;
write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val);
msg_output++;
static inline void flush_all(struct bridge_dev_context *dev_context)
{
- if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
- dev_context->dw_brd_state == BRD_HIBERNATION)
+ if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
+ dev_context->brd_state == BRD_HIBERNATION)
wake_dsp(dev_context, NULL);
hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
dsp_clk_enable(DSP_CLK_IVA2);
/* set the device state to IDLE */
- dev_context->dw_brd_state = BRD_IDLE;
+ dev_context->brd_state = BRD_IDLE;
return 0;
}
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
u32 offset;
- u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
+ u32 dsp_base_addr = dev_ctxt->dsp_base_addr;
- if (dsp_addr < dev_context->dw_dsp_start_add) {
+ if (dsp_addr < dev_context->dsp_start_add) {
status = -EPERM;
return status;
}
/* change here to account for the 3 bands of the DSP internal memory */
- if ((dsp_addr - dev_context->dw_dsp_start_add) <
+ if ((dsp_addr - dev_context->dsp_start_add) <
dev_context->dw_internal_size) {
- offset = dsp_addr - dev_context->dw_dsp_start_add;
+ offset = dsp_addr - dev_context->dsp_start_add;
} else {
status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
ul_num_bytes, mem_type);
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
- dev_context->dw_brd_state = brd_state;
+ dev_context->brd_state = brd_state;
return status;
}
__raw_writel(0XCAFECAFE, dw_sync_addr);
/* update board state */
- dev_context->dw_brd_state = BRD_RUNNING;
+ dev_context->brd_state = BRD_RUNNING;
/* (void)chnlsm_enable_interrupt(dev_context); */
} else {
- dev_context->dw_brd_state = BRD_UNKNOWN;
+ dev_context->brd_state = BRD_UNKNOWN;
}
}
return status;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
- if (dev_context->dw_brd_state == BRD_STOPPED)
+ if (dev_context->brd_state == BRD_STOPPED)
return status;
/* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
udelay(10);
/* Release the Ext Base virtual Address as the next DSP Program
* may have a different load address */
- if (dev_context->dw_dsp_ext_base_addr)
- dev_context->dw_dsp_ext_base_addr = 0;
+ if (dev_context->dsp_ext_base_addr)
+ dev_context->dsp_ext_base_addr = 0;
- dev_context->dw_brd_state = BRD_STOPPED; /* update board state */
+ dev_context->brd_state = BRD_STOPPED; /* update board state */
dsp_wdt_enable(false);
int *board_state)
{
struct bridge_dev_context *dev_context = dev_ctxt;
- *board_state = dev_context->dw_brd_state;
+ *board_state = dev_context->brd_state;
return 0;
}
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
- if (dsp_addr < dev_context->dw_dsp_start_add) {
+ if (dsp_addr < dev_context->dsp_start_add) {
status = -EPERM;
return status;
}
- if ((dsp_addr - dev_context->dw_dsp_start_add) <
+ if ((dsp_addr - dev_context->dsp_start_add) <
dev_context->dw_internal_size) {
status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
ul_num_bytes, mem_type);
goto func_end;
}
- dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
+ dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
dev_context->dw_self_loop = (u32) NULL;
dev_context->dsp_per_clks = 0;
dev_context->dw_internal_size = OMAP_DSP_SIZE;
dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
}
- dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
+ dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
(config_param->
dw_mem_base
[3]),
config_param->
dw_mem_length
[3]);
- if (!dev_context->dw_dsp_base_addr)
+ if (!dev_context->dsp_base_addr)
status = -EPERM;
pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
if (!status) {
dev_context->hdev_obj = hdev_obj;
/* Store current board state. */
- dev_context->dw_brd_state = BRD_UNKNOWN;
+ dev_context->brd_state = BRD_UNKNOWN;
dev_context->resources = resources;
dsp_clk_enable(DSP_CLK_IVA2);
bridge_brd_stop(dev_context);
iounmap(host_res->dw_per_base);
if (host_res->dw_per_pm_base)
iounmap((void *)host_res->dw_per_pm_base);
- if (host_res->dw_core_pm_base)
- iounmap((void *)host_res->dw_core_pm_base);
+ if (host_res->core_pm_base)
+ iounmap((void *)host_res->core_pm_base);
host_res->dw_mem_base[0] = (u32) NULL;
host_res->dw_mem_base[2] = (u32) NULL;
status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
copy_bytes, mem_type);
if (!status) {
- if (dest_addr < (dev_context->dw_dsp_start_add +
+ if (dest_addr < (dev_context->dsp_start_add +
dev_context->dw_internal_size)) {
/* Write to Internal memory */
status = write_dsp_data(dev_ctxt, host_buf,
while (ul_remain_bytes > 0 && !status) {
ul_bytes =
ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
- if (dsp_addr < (dev_context->dw_dsp_start_add +
+ if (dsp_addr < (dev_context->dsp_start_add +
dev_context->dw_internal_size)) {
status =
write_dsp_data(dev_ctxt, host_buff, dsp_addr,
if (!status) {
/* Update the Bridger Driver state */
- dev_context->dw_brd_state = BRD_DSP_HIBERNATION;
+ dev_context->brd_state = BRD_DSP_HIBERNATION;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
status =
dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP))
return -EINVAL;
- switch (dev_context->dw_brd_state) {
+ switch (dev_context->brd_state) {
case BRD_RUNNING:
omap_mbox_save_ctx(dev_context->mbox);
if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
} else {
/* Update the Bridger Driver state */
if (dsp_test_sleepstate == PWRDM_POWER_OFF)
- dev_context->dw_brd_state = BRD_HIBERNATION;
+ dev_context->brd_state = BRD_HIBERNATION;
else
- dev_context->dw_brd_state = BRD_RETENTION;
+ dev_context->brd_state = BRD_RETENTION;
/* Disable wdt on hibernation. */
dsp_wdt_enable(false);
#ifdef CONFIG_PM
/* Check the board state, if it is not 'SLEEP' then return */
- if (dev_context->dw_brd_state == BRD_RUNNING ||
- dev_context->dw_brd_state == BRD_STOPPED) {
+ if (dev_context->brd_state == BRD_RUNNING ||
+ dev_context->brd_state == BRD_STOPPED) {
/* The Device is in 'RET' or 'OFF' state and Bridge state is not
* 'SLEEP', this means state inconsistency, so return */
return 0;
sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP);
/* Set the device state to RUNNIG */
- dev_context->dw_brd_state = BRD_RUNNING;
+ dev_context->brd_state = BRD_RUNNING;
#endif /* CONFIG_PM */
return status;
}
dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
__func__, voltage_domain, level);
- if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
- (dev_context->dw_brd_state == BRD_RETENTION) ||
- (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
+ if ((dev_context->brd_state == BRD_HIBERNATION) ||
+ (dev_context->brd_state == BRD_RETENTION) ||
+ (dev_context->brd_state == BRD_DSP_HIBERNATION)) {
dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
return 0;
- } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
+ } else if ((dev_context->brd_state == BRD_RUNNING)) {
/* Send a prenotificatio to DSP */
dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
level = *((u32 *) pargs + 1);
dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
__func__, voltage_domain, level);
- if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
- (dev_context->dw_brd_state == BRD_RETENTION) ||
- (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
+ if ((dev_context->brd_state == BRD_HIBERNATION) ||
+ (dev_context->brd_state == BRD_RETENTION) ||
+ (dev_context->brd_state == BRD_DSP_HIBERNATION)) {
/* Update the OPP value in shared memory */
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
__func__);
- } else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
+ } else if ((dev_context->brd_state == BRD_RUNNING)) {
/* Update the OPP value in shared memory */
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
/* Send a post notification to DSP */
writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4);
break;
case BPWR_MCBSP1:
- iva2_grpsel = readl(resources->dw_core_pm_base + 0xA8);
- mpu_grpsel = readl(resources->dw_core_pm_base + 0xA4);
+ iva2_grpsel = readl(resources->core_pm_base + 0xA8);
+ mpu_grpsel = readl(resources->core_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
}
- writel(iva2_grpsel, resources->dw_core_pm_base + 0xA8);
- writel(mpu_grpsel, resources->dw_core_pm_base + 0xA4);
+ writel(iva2_grpsel, resources->core_pm_base + 0xA8);
+ writel(mpu_grpsel, resources->core_pm_base + 0xA4);
break;
case BPWR_MCBSP2:
iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8);
u32 ul_tlb_base_virt = 0;
u32 ul_shm_offset_virt = 0;
u32 dw_ext_prog_virt_mem;
- u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
+ u32 dw_base_addr = dev_context->dsp_ext_base_addr;
bool trace_read = false;
if (!ul_shm_base_virt) {
/* If reading from TRACE, force remap/unmap */
if (trace_read && dw_base_addr) {
dw_base_addr = 0;
- dev_context->dw_dsp_ext_base_addr = 0;
+ dev_context->dsp_ext_base_addr = 0;
}
if (!dw_base_addr) {
dw_ext_prog_virt_mem -= ul_shm_offset_virt;
dw_ext_prog_virt_mem +=
(ul_ext_base - ul_dyn_ext_base);
- dev_context->dw_dsp_ext_base_addr =
+ dev_context->dsp_ext_base_addr =
dw_ext_prog_virt_mem;
/*
- * This dw_dsp_ext_base_addr will get cleared
+ * This dsp_ext_base_addr will get cleared
* only when the board is stopped.
*/
- if (!dev_context->dw_dsp_ext_base_addr)
+ if (!dev_context->dsp_ext_base_addr)
status = -EPERM;
}
u32 mem_type)
{
u32 offset;
- u32 dw_base_addr = dev_context->dw_dsp_base_addr;
+ u32 dw_base_addr = dev_context->dsp_base_addr;
struct cfg_hostres *resources = dev_context->resources;
int status = 0;
u32 base1, base2, base3;
if (!resources)
return -EPERM;
- offset = dsp_addr - dev_context->dw_dsp_start_add;
+ offset = dsp_addr - dev_context->dsp_start_add;
if (offset < base1) {
dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2],
resources->dw_mem_length[2]);
u32 ul_num_bytes, u32 mem_type,
bool dynamic_load)
{
- u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
+ u32 dw_base_addr = dev_context->dsp_ext_base_addr;
u32 dw_offset = 0;
u8 temp_byte1, temp_byte2;
u8 remain_byte[4];
if ((dynamic_load || trace_load) && dw_base_addr) {
dw_base_addr = 0;
MEM_UNMAP_LINEAR_ADDRESS((void *)
- dev_context->dw_dsp_ext_base_addr);
- dev_context->dw_dsp_ext_base_addr = 0x0;
+ dev_context->dsp_ext_base_addr);
+ dev_context->dsp_ext_base_addr = 0x0;
}
if (!dw_base_addr) {
if (symbols_reloaded)
(ul_ext_base - ul_dyn_ext_base);
}
- dev_context->dw_dsp_ext_base_addr =
+ dev_context->dsp_ext_base_addr =
(u32) MEM_LINEAR_ADDRESS((void *)
dw_ext_prog_virt_mem,
ul_ext_end - ul_ext_base);
- dw_base_addr += dev_context->dw_dsp_ext_base_addr;
- /* This dw_dsp_ext_base_addr will get cleared only when
+ dw_base_addr += dev_context->dsp_ext_base_addr;
+ /* This dsp_ext_base_addr will get cleared only when
* the board is stopped. */
- if (!dev_context->dw_dsp_ext_base_addr)
+ if (!dev_context->dsp_ext_base_addr)
ret = -EPERM;
}
}
*((u32 *) host_buff) = dw_base_addr + dw_offset;
}
/* Unmap here to force remap for other Ext loads */
- if ((dynamic_load || trace_load) && dev_context->dw_dsp_ext_base_addr) {
+ if ((dynamic_load || trace_load) && dev_context->dsp_ext_base_addr) {
MEM_UNMAP_LINEAR_ADDRESS((void *)
- dev_context->dw_dsp_ext_base_addr);
- dev_context->dw_dsp_ext_base_addr = 0x0;
+ dev_context->dsp_ext_base_addr);
+ dev_context->dsp_ext_base_addr = 0x0;
}
symbols_reloaded = false;
return ret;
if (!resources)
return -EPERM;
- if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
- dev_context->dw_brd_state == BRD_HIBERNATION) {
+ if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
+ dev_context->brd_state == BRD_HIBERNATION) {
#ifdef CONFIG_TIDSPBRIDGE_DVFS
if (pdata->dsp_get_opp)
opplevel = (*pdata->dsp_get_opp) ();
/* Access MMU SYS CONFIG register to generate a short wakeup */
temp = readl(resources->dw_dmmu_base + 0x10);
- dev_context->dw_brd_state = BRD_RUNNING;
- } else if (dev_context->dw_brd_state == BRD_RETENTION) {
+ dev_context->brd_state = BRD_RUNNING;
+ } else if (dev_context->brd_state == BRD_RETENTION) {
/* Restart the peripheral clocks */
dsp_clock_enable_all(dev_context->dsp_per_clks);
}
}
/* Filter subsequent notifications when an error occurs */
- if (dev_context->dw_brd_state != BRD_ERROR) {
+ if (dev_context->brd_state != BRD_ERROR) {
ntfy_notify(deh->ntfy_obj, event);
#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
bridge_recover_schedule();
}
/* Set the Board state as ERROR */
- dev_context->dw_brd_state = BRD_ERROR;
+ dev_context->brd_state = BRD_ERROR;
/* Disable all the clocks that were enabled by DSP */
dsp_clock_disable_all(dev_context->dsp_per_clks);
/*
u8 *host_user_buf;
/* Buffer to be filled/emptied. (System) */
u8 *host_sys_buf;
- u32 dw_arg; /* Issue/Reclaim argument. */
+ u32 arg; /* Issue/Reclaim argument. */
u32 dsp_tx_addr; /* Transfer address on DSP side. */
u32 byte_size; /* Bytes transferred. */
u32 buf_size; /* Actual buffer size when allocated. */
* dw_mem_base + this offset */
/*
* Info needed by NODE for allocating channels to communicate with RMS:
- * dw_chnl_offset: Offset of RMS channels. Lower channels are
+ * chnl_offset: Offset of RMS channels. Lower channels are
* reserved.
- * dw_chnl_buf_size: Size of channel buffer to send to RMS
+ * chnl_buf_size: Size of channel buffer to send to RMS
* dw_num_chnls: Total number of channels
* (including reserved).
*/
- u32 dw_chnl_offset;
- u32 dw_chnl_buf_size;
+ u32 chnl_offset;
+ u32 chnl_buf_size;
u32 dw_num_chnls;
void __iomem *dw_per_base;
u32 dw_per_pm_base;
- u32 dw_core_pm_base;
+ u32 core_pm_base;
void __iomem *dw_dmmu_base;
};
u32 byte_size; /* Bytes transferred. */
u32 buf_size; /* Actual buffer size in bytes */
u32 status; /* Status of IO completion. */
- u32 dw_arg; /* User argument associated with pbuf. */
+ u32 arg; /* User argument associated with pbuf. */
};
#endif /* CHNLDEFS_ */
u32 dw_seg_base_pa; /* Start Phys address of SM segment */
/* Total size in bytes of segment: DSP+GPP */
u32 ul_total_seg_size;
- u32 dw_gpp_base_pa; /* Start Phys addr of Gpp SM seg */
+ u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */
u32 ul_gpp_size; /* Size of Gpp SM seg in bytes */
- u32 dw_dsp_base_va; /* DSP virt base byte address */
+ u32 dsp_base_va; /* DSP virt base byte address */
u32 ul_dsp_size; /* DSP seg size in bytes */
/* # of current GPP allocations from this segment */
u32 ul_in_use_cnt;
/* XlatorCreate attributes */
struct cmm_xlatorattrs {
u32 ul_seg_id; /* segment Id used for SM allocations */
- u32 dw_dsp_bufs; /* # of DSP-side bufs */
- u32 dw_dsp_buf_size; /* size of DSP-side bufs in GPP bytes */
+ u32 dsp_bufs; /* # of DSP-side bufs */
+ u32 dsp_buf_size; /* size of DSP-side bufs in GPP bytes */
/* Vm base address alloc'd in client process context */
void *vm_base;
/* dw_vm_size must be >= (dwMaxNumBufs * dwMaxSize) */
/* The dsp_msg structure */
struct dsp_msg {
- u32 dw_cmd;
- u32 dw_arg1;
- u32 dw_arg2;
+ u32 cmd;
+ u32 arg1;
+ u32 arg2;
};
/* The dsp_resourcereqmts structure for node's resource requirements */
/* Error information of last DSP exception signalled to the GPP */
struct dsp_errorinfo {
- u32 dw_err_mask;
+ u32 err_mask;
u32 dw_val1;
u32 dw_val2;
u32 dw_val3;
struct {
void *hprocessor;
- u32 dw_cmd;
+ u32 cmd;
struct dsp_cbdata __user *pargs;
} args_proc_ctrl;
u8 *pbuffer;
u32 dw_bytes;
u32 dw_buf_size;
- u32 dw_arg;
+ u32 arg;
} args_strm_issue;
struct {
u32 ul_sm_size; /* Size of SM block in bytes */
unsigned int dw_vm_base; /* Start of VM block. (Dev driver
* context for 'sma') */
- u32 dw_dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
+ u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
* SM space */
s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
- unsigned int dw_dsp_base; /* DSP virt base byte address */
+ unsigned int dsp_base; /* DSP virt base byte address */
u32 ul_dsp_size; /* DSP seg size in bytes */
struct cmm_object *hcmm_mgr; /* back ref to parent mgr */
/* node list of available memory */
static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
/* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
1,
- 0, /* dw_dsp_bufs */
- 0, /* dw_dsp_buf_size */
+ 0, /* dsp_bufs */
+ 0, /* dsp_buf_size */
NULL, /* vm_base */
0, /* dw_vm_size */
};
altr->shm_base - altr->ul_dsp_size;
cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
altr->ul_dsp_size + altr->ul_sm_size;
- cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
+ cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
altr->shm_base;
cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
altr->ul_sm_size;
- cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
- altr->dw_dsp_base;
+ cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
+ altr->dsp_base;
cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
altr->ul_dsp_size;
cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
psma->ul_sm_size = ul_size; /* SM segment size in bytes */
psma->dw_vm_base = gpp_base_va;
- psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
+ psma->dsp_phys_addr_offset = dsp_addr_offset;
psma->c_factor = c_factor;
- psma->dw_dsp_base = dw_dsp_base;
+ psma->dsp_base = dw_dsp_base;
psma->ul_dsp_size = ul_dsp_size;
if (psma->dw_vm_base == 0) {
status = -EPERM;
dw_addr_xlate =
GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
dw_addr_xlate,
- allocator->dw_dsp_phys_addr_offset *
+ allocator->dsp_phys_addr_offset *
allocator->c_factor);
} else if (xtype == CMM_DSPPA2PA) {
/* Got DSP Pa, convert to GPP Pa */
dw_addr_xlate =
DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
dw_addr_xlate,
- allocator->dw_dsp_phys_addr_offset *
+ allocator->dsp_phys_addr_offset *
allocator->c_factor);
}
loop_cont:
}
if (!status) {
status = proc_ctrl(hprocessor,
- args->args_proc_ctrl.dw_cmd,
+ args->args_proc_ctrl.cmd,
(struct dsp_cbdata *)pargs);
}
args->args_strm_issue.pbuffer,
args->args_strm_issue.dw_bytes,
args->args_strm_issue.dw_buf_size,
- args->args_strm_issue.dw_arg);
+ args->args_strm_issue.arg);
return status;
}
host_res->birq_registers = 0;
host_res->birq_attrib = 0;
host_res->dw_offset_for_monitor = 0;
- host_res->dw_chnl_offset = 0;
+ host_res->chnl_offset = 0;
/* CHNL_MAXCHANNELS */
host_res->dw_num_chnls = CHNL_MAXCHANNELS;
- host_res->dw_chnl_buf_size = 0x400;
+ host_res->chnl_buf_size = 0x400;
return 0;
}
OMAP_PER_CM_SIZE);
host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
OMAP_PER_PRM_SIZE);
- host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
+ host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
OMAP_CORE_PRM_SIZE);
host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
OMAP_DMMU_SIZE);
host_res->birq_registers = 0;
host_res->birq_attrib = 0;
host_res->dw_offset_for_monitor = 0;
- host_res->dw_chnl_offset = 0;
+ host_res->chnl_offset = 0;
/* CHNL_MAXCHANNELS */
host_res->dw_num_chnls = CHNL_MAXCHANNELS;
- host_res->dw_chnl_buf_size = 0x400;
+ host_res->chnl_buf_size = 0x400;
dw_buff_size = sizeof(struct cfg_hostres);
}
*phost_resources = host_res;
status =
(*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, message, utimeout);
/* Check if message contains SM descriptor */
- if (status || !(message->dw_cmd & DSP_RMSBUFDESC))
+ if (status || !(message->cmd & DSP_RMSBUFDESC))
goto func_end;
/* Translate DSP byte addr to GPP Va. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
- (void *)(message->dw_arg1 *
+ (void *)(message->arg1 *
hnode->hnode_mgr->
udsp_word_size), CMM_DSPPA2PA);
if (tmp_buf != NULL) {
CMM_PA2VA);
if (tmp_buf != NULL) {
/* Adjust SM size in msg */
- message->dw_arg1 = (u32) tmp_buf;
- message->dw_arg2 *= hnode->hnode_mgr->udsp_word_size;
+ message->arg1 = (u32) tmp_buf;
+ message->arg2 *= hnode->hnode_mgr->udsp_word_size;
} else {
status = -ESRCH;
}
/* assign pmsg values to new msg */
new_msg = *pmsg;
/* Now, check if message contains a SM buffer descriptor */
- if (pmsg->dw_cmd & DSP_RMSBUFDESC) {
+ if (pmsg->cmd & DSP_RMSBUFDESC) {
/* Translate GPP Va to DSP physical buf Ptr. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
- (void *)new_msg.dw_arg1,
+ (void *)new_msg.arg1,
CMM_VA2DSPPA);
if (tmp_buf != NULL) {
/* got translation, convert to MAUs in msg */
if (hnode->hnode_mgr->udsp_word_size != 0) {
- new_msg.dw_arg1 =
+ new_msg.arg1 =
(u32) tmp_buf /
hnode->hnode_mgr->udsp_word_size;
/* MAUs */
- new_msg.dw_arg2 /= hnode->hnode_mgr->
+ new_msg.arg2 /= hnode->hnode_mgr->
udsp_word_size;
} else {
pr_err("%s: udsp_word_size is zero!\n",
goto func_cont;
}
- msg.dw_cmd = RMS_EXIT;
- msg.dw_arg1 = hnode->node_env;
- killmsg.dw_cmd = RMS_KILLTASK;
- killmsg.dw_arg1 = hnode->node_env;
+ msg.cmd = RMS_EXIT;
+ msg.arg1 = hnode->node_env;
+ killmsg.cmd = RMS_KILLTASK;
+ killmsg.arg1 = hnode->node_env;
intf_fxns = hnode_mgr->intf_fxns;
if (hnode->utimeout > MAXTIMEOUT)
host_res = pbridge_context->resources;
if (!host_res)
return -EPERM;
- hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset;
- hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size;
+ hnode_mgr->ul_chnl_offset = host_res->chnl_offset;
+ hnode_mgr->ul_chnl_buf_size = host_res->chnl_buf_size;
hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
/*
if (buff_size)
*buff_size = chnl_ioc_obj.buf_size;
- *pdw_arg = chnl_ioc_obj.dw_arg;
+ *pdw_arg = chnl_ioc_obj.arg;
if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
status = -ETIME;