void __iomem *regs = pi->base;
u32 id = 0;
-#ifdef CONFIG_PLAT_RK
+#ifdef CONFIG_ARCH_RK30
id |= ((readl(regs + off + 0x0) & 0xff) << 0);
- id |= ((readl(regs + off + 0x4) & 0xff) << 8);
+ id |= ((readl(regs + off + 0x4) & 0xff)<< 8);
id |= ((readl(regs + off + 0x8) & 0xff) << 16);
- id |= ((readl(regs + off + 0xc) & 0xff) << 24);
+ id |= ((readl(regs + off + 0xc) & 0xff)<< 24);
#else
id |= (readb(regs + off + 0x0) << 0);
id |= (readb(regs + off + 0x4) << 8);
return off;
}
+/* Returns bytes consumed and updates bursts */
+static inline int _loop_infiniteloop(unsigned dry_run, u8 buf[],
+ unsigned long bursts, const struct _xfer_spec *pxs, int ev)
+{
+ int cyc, off;
+ unsigned lcnt0, lcnt1, ljmp0, ljmp1, ljmpfe;
+ struct _arg_LPEND lpend;
+
+ off = 0;
+ ljmpfe = off;
+ lcnt0 = pxs->r->infiniteloop;
+ //hhb
+ /* Max iterations possible in DMALP is 256 */
+ if (bursts > 256) {
+ lcnt1 = 256;
+ cyc = bursts/256; //cyc shuold be less than 8
+ } else {
+ lcnt1 = bursts;
+ cyc = 1;
+ }
+
+ /* forever loop */
+ off += _emit_MOV(dry_run, &buf[off], SAR, pxs->x->src_addr);
+ off += _emit_MOV(dry_run, &buf[off], DAR, pxs->x->dst_addr);
+
+ /* loop0 */
+ off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
+ ljmp0 = off;
+
+ /* loop1 */
+ off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
+ ljmp1 = off;
+ off += _bursts(dry_run, &buf[off], pxs, cyc);
+ lpend.cond = ALWAYS;
+ lpend.forever = false;
+ lpend.loop = 1;
+ lpend.bjump = off - ljmp1;
+ off += _emit_LPEND(dry_run, &buf[off], &lpend);
+ if(pxs->r->infiniteloop_sev) //may be we don't need interrupt when dma transfer
+ off += _emit_SEV(dry_run, &buf[off], ev);
+ /* end loop1 */
+ lpend.cond = ALWAYS;
+ lpend.forever = false;
+ lpend.loop = 0;
+ lpend.bjump = off - ljmp0;
+ off += _emit_LPEND(dry_run, &buf[off], &lpend);
+ /* end loop0 */
+ lpend.cond = ALWAYS;
+ lpend.forever = true;
+ lpend.loop = 1;
+ lpend.bjump = off - ljmpfe;
+ off += _emit_LPEND(dry_run, &buf[off], &lpend);
+
+ return off;
+}
+
static inline int _setup_loops(unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs)
{
return off;
}
+static inline int _setup_xfer_infiniteloop(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int ev)
+{
+ struct pl330_xfer *x = pxs->x;
+ u32 ccr = pxs->ccr;
+ unsigned long bursts = BYTE_TO_BURST(x->bytes, ccr);
+ int off = 0;
+
+ /* Setup Loop(s) */
+ off += _loop_infiniteloop(dry_run, &buf[off], bursts, pxs, ev);
+
+ return off;
+}
+
/*
* A req is a sequence of one or more xfer units.
* Returns the number of bytes taken to setup the MC for the req.
off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
x = pxs->r->x;
- do {
+
+ if (!pxs->r->infiniteloop) {
+ do {
+ /* Error if xfer length is not aligned at burst size */
+ if (x->bytes % (BRST_SIZE(pxs->ccr)
+ * BRST_LEN(pxs->ccr)))
+ return -EINVAL;
+
+ pxs->x = x;
+ off += _setup_xfer(dry_run, &buf[off], pxs);
+
+ x = x->next;
+ } while (x);
+
+ /* DMASEV peripheral/event */
+ off += _emit_SEV(dry_run, &buf[off], thrd->ev);
+ /* DMAEND */
+ off += _emit_END(dry_run, &buf[off]);
+ } else {
/* Error if xfer length is not aligned at burst size */
if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
return -EINVAL;
pxs->x = x;
- off += _setup_xfer(dry_run, &buf[off], pxs);
-
- x = x->next;
- } while (x);
-
- /* DMASEV peripheral/event */
- off += _emit_SEV(dry_run, &buf[off], thrd->ev);
- /* DMAEND */
- off += _emit_END(dry_run, &buf[off]);
-
+ off += _setup_xfer_infiniteloop
+ (dry_run, &buf[off], pxs, thrd->ev);
+ }
return off;
}
goto xfer_exit;
}
- /* Prefer Secure Channel */
- if (!_manager_ns(thrd))
- r->cfg->nonsecure = 0;
- else
- r->cfg->nonsecure = 1;
-
/* Use last settings, if not provided */
- if (r->cfg)
+ if (r->cfg) {
+ /* Prefer Secure Channel */
+ if (!_manager_ns(thrd))
+ r->cfg->nonsecure = 0;
+ else
+ r->cfg->nonsecure = 1;
ccr = _prepare_ccr(r->cfg);
- else
+ } else {
ccr = readl(regs + CC(thrd->id));
+ }
/* If this req doesn't have valid xfer settings */
if (!_is_valid(ccr)) {
active -= 1;
rqdone = &thrd->req[active];
- MARK_FREE(rqdone);
- /* Get going again ASAP */
- _start(thrd);
+ if (!rqdone->r->infiniteloop) {
+ MARK_FREE(rqdone);
+
+ /* Get going again ASAP */
+ _start(thrd);
+ }
/* For now, just make a list of callbacks to be done */
list_add_tail(&rqdone->rqd, &pl330->req_done);
* @req: Two requests to communicate with the PL330 engine.
* @callback_fn: Callback function to the client.
* @rqcfg: Channel configuration for the xfers.
- * @xfer_head: Pointer to the xfer to be next excecuted.
+ * @xfer_head: Pointer to the xfer to be next executed.
* @dmac: Pointer to the DMAC that manages this channel, NULL if the
* channel is available to be acquired.
* @client: Client of this channel. NULL if the
enum rk29_dma_buffresult res;
spin_lock_irqsave(&res_lock, flags);
-
xl = r->x;
- r->x = NULL;
+ if (!r->infiniteloop) {
+ r->x = NULL;
- rk29_pl330_submit(ch, r);
+ rk29_pl330_submit(ch, r);
+ }
spin_unlock_irqrestore(&res_lock, flags);
res = RK29_RES_ERR;
/* If last request had some xfer */
- if (xl) {
- xfer = container_of(xl, struct rk29_pl330_xfer, px);
- _finish_off(xfer, res, 0);
+ if (!r->infiniteloop) {
+ if (xl) {
+ xfer = container_of(xl, struct rk29_pl330_xfer, px);
+ _finish_off(xfer, res, 0);
+ } else {
+ dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
+ __func__, __LINE__);
+ }
} else {
- dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
- __func__, __LINE__);
+ /* Do callback */
+
+ xfer = container_of(xl, struct rk29_pl330_xfer, px);
+ if (ch->callback_fn)
+ ch->callback_fn(xfer->token, xfer->px.bytes, res);
}
}
return ret;
}
EXPORT_SYMBOL(rk29_dma_ctrl);
-
-int rk29_dma_enqueue(enum dma_ch id, void *token,
- dma_addr_t addr, int size)
+//hhb@rock-chips.com 2012-06-14
+int rk29_dma_enqueue_ring(enum dma_ch id, void *token,
+ dma_addr_t addr, int size, int numofblock, bool sev)
{
struct rk29_pl330_chan *ch;
struct rk29_pl330_xfer *xfer;
/* Try submitting on either request */
idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
- if (!ch->req[idx].x)
+ if (!ch->req[idx].x) {
+ ch->req[idx].infiniteloop = numofblock;
+ if(numofblock)
+ ch->req[idx].infiniteloop_sev = sev;
rk29_pl330_submit(ch, &ch->req[idx]);
- else
+ } else {
+ ch->req[1 - idx].infiniteloop = numofblock;
+ if(numofblock)
+ ch->req[1 - idx].infiniteloop_sev = sev;
rk29_pl330_submit(ch, &ch->req[1 - idx]);
-
+ }
spin_unlock_irqrestore(&res_lock, flags);
if (ch->options & RK29_DMAF_AUTOSTART)
return ret;
}
+EXPORT_SYMBOL(rk29_dma_enqueue_ring);
+
+int rk29_dma_enqueue(enum dma_ch id, void *token,
+ dma_addr_t addr, int size)
+{
+ return rk29_dma_enqueue_ring(id, token, addr, size, 0, false);
+}
EXPORT_SYMBOL(rk29_dma_enqueue);
int rk29_dma_request(enum dma_ch id,
{
return true;
}
-
+static inline bool rk29_dma_has_infiniteloop(void)
+{
+ return true;
+}
/*
* Every PL330 DMAC has max 32 peripheral interfaces,
* of which some may be not be really used in your
extern int rk29_dma_free(unsigned int channel, struct rk29_dma_client *);
+/* rk29_dma_enqueue_ring
+ *
+ * place the given buffer onto the queue of operations for the channel.
+ * The buffer must be allocated from dma coherent memory, or the Dcache/WB
+ * drained before the buffer is given to the DMA system.
+*/
+
+extern int rk29_dma_enqueue_ring(enum dma_ch channel, void *id,
+ dma_addr_t data, int size, int numofblock, bool sev);
+
/* rk29_dma_enqueue
*
* place the given buffer onto the queue of operations for the channel.
* drained before the buffer is given to the DMA system.
*/
-extern int rk29_dma_enqueue(unsigned int channel, void *id,
+extern int rk29_dma_enqueue(enum dma_ch channel, void *id,
dma_addr_t data, int size);
+
/* rk29_dma_config
*
* configure the dma channel