2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
23 #include <linux/memory.h>
24 #include <linux/clk.h>
26 #include <linux/of_irq.h>
27 #include <linux/irqdomain.h>
28 #include <linux/platform_data/dma-mv_xor.h>
30 #include "dmaengine.h"
33 static void mv_xor_issue_pending(struct dma_chan *chan);
35 #define to_mv_xor_chan(chan) \
36 container_of(chan, struct mv_xor_chan, dmachan)
38 #define to_mv_xor_slot(tx) \
39 container_of(tx, struct mv_xor_desc_slot, async_tx)
41 #define mv_chan_to_devp(chan) \
44 static void mv_desc_init(struct mv_xor_desc_slot *desc,
45 dma_addr_t addr, u32 byte_count,
46 enum dma_ctrl_flags flags)
48 struct mv_xor_desc *hw_desc = desc->hw_desc;
50 hw_desc->status = XOR_DESC_DMA_OWNED;
51 hw_desc->phy_next_desc = 0;
52 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
53 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
54 XOR_DESC_EOD_INT_EN : 0;
55 hw_desc->phy_dest_addr = addr;
56 hw_desc->byte_count = byte_count;
59 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
62 struct mv_xor_desc *hw_desc = desc->hw_desc;
63 BUG_ON(hw_desc->phy_next_desc);
64 hw_desc->phy_next_desc = next_desc_addr;
67 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
69 struct mv_xor_desc *hw_desc = desc->hw_desc;
70 hw_desc->phy_next_desc = 0;
73 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
74 int index, dma_addr_t addr)
76 struct mv_xor_desc *hw_desc = desc->hw_desc;
77 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
78 if (desc->type == DMA_XOR)
79 hw_desc->desc_command |= (1 << index);
82 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
84 return readl_relaxed(XOR_CURR_DESC(chan));
87 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
90 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
93 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
95 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
96 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
97 writel_relaxed(val, XOR_INTR_MASK(chan));
100 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
102 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
103 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
107 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
111 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
112 val = ~(val << (chan->idx * 16));
113 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
114 writel_relaxed(val, XOR_INTR_CAUSE(chan));
117 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
119 u32 val = 0xFFFF0000 >> (chan->idx * 16);
120 writel_relaxed(val, XOR_INTR_CAUSE(chan));
123 static void mv_chan_set_mode(struct mv_xor_chan *chan,
124 enum dma_transaction_type type)
127 u32 config = readl_relaxed(XOR_CONFIG(chan));
131 op_mode = XOR_OPERATION_MODE_XOR;
134 op_mode = XOR_OPERATION_MODE_MEMCPY;
137 dev_err(mv_chan_to_devp(chan),
138 "error: unsupported operation %d\n",
147 #if defined(__BIG_ENDIAN)
148 config |= XOR_DESCRIPTOR_SWAP;
150 config &= ~XOR_DESCRIPTOR_SWAP;
153 writel_relaxed(config, XOR_CONFIG(chan));
154 chan->current_type = type;
157 static void mv_chan_activate(struct mv_xor_chan *chan)
159 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
161 /* writel ensures all descriptors are flushed before activation */
162 writel(BIT(0), XOR_ACTIVATION(chan));
165 static char mv_chan_is_busy(struct mv_xor_chan *chan)
167 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
169 state = (state >> 4) & 0x3;
171 return (state == 1) ? 1 : 0;
175 * mv_chan_free_slots - flags descriptor slots for reuse
176 * @slot: Slot to free
177 * Caller must hold &mv_chan->lock while calling this function
179 static void mv_chan_free_slots(struct mv_xor_chan *mv_chan,
180 struct mv_xor_desc_slot *slot)
182 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
183 __func__, __LINE__, slot);
190 * mv_chan_start_new_chain - program the engine to operate on new
191 * chain headed by sw_desc
192 * Caller must hold &mv_chan->lock while calling this function
194 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
195 struct mv_xor_desc_slot *sw_desc)
197 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
198 __func__, __LINE__, sw_desc);
200 /* set the hardware chain */
201 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
204 mv_xor_issue_pending(&mv_chan->dmachan);
208 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
209 struct mv_xor_chan *mv_chan,
212 BUG_ON(desc->async_tx.cookie < 0);
214 if (desc->async_tx.cookie > 0) {
215 cookie = desc->async_tx.cookie;
217 /* call the callback (must not sleep or submit new
218 * operations to this channel)
220 if (desc->async_tx.callback)
221 desc->async_tx.callback(
222 desc->async_tx.callback_param);
224 dma_descriptor_unmap(&desc->async_tx);
227 /* run dependent operations */
228 dma_run_dependencies(&desc->async_tx);
234 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
236 struct mv_xor_desc_slot *iter, *_iter;
238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
239 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
242 if (async_tx_test_ack(&iter->async_tx)) {
243 list_del(&iter->completed_node);
244 mv_chan_free_slots(mv_chan, iter);
251 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
252 struct mv_xor_chan *mv_chan)
254 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
255 __func__, __LINE__, desc, desc->async_tx.flags);
256 list_del(&desc->chain_node);
257 /* the client is allowed to attach dependent operations
260 if (!async_tx_test_ack(&desc->async_tx)) {
261 /* move this slot to the completed_slots */
262 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
266 mv_chan_free_slots(mv_chan, desc);
270 /* This function must be called with the mv_xor_chan spinlock held */
271 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
273 struct mv_xor_desc_slot *iter, *_iter;
274 dma_cookie_t cookie = 0;
275 int busy = mv_chan_is_busy(mv_chan);
276 u32 current_desc = mv_chan_get_current_desc(mv_chan);
277 int current_cleaned = 0;
278 struct mv_xor_desc *hw_desc;
280 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
281 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
282 mv_chan_clean_completed_slots(mv_chan);
284 /* free completed slots from the chain starting with
285 * the oldest descriptor
288 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
291 /* clean finished descriptors */
292 hw_desc = iter->hw_desc;
293 if (hw_desc->status & XOR_DESC_SUCCESS) {
294 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
297 /* done processing desc, clean slot */
298 mv_desc_clean_slot(iter, mv_chan);
300 /* break if we did cleaned the current */
301 if (iter->async_tx.phys == current_desc) {
306 if (iter->async_tx.phys == current_desc) {
313 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
314 if (current_cleaned) {
316 * current descriptor cleaned and removed, run
319 iter = list_entry(mv_chan->chain.next,
320 struct mv_xor_desc_slot,
322 mv_chan_start_new_chain(mv_chan, iter);
324 if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
326 * descriptors are still waiting after
327 * current, trigger them
329 iter = list_entry(iter->chain_node.next,
330 struct mv_xor_desc_slot,
332 mv_chan_start_new_chain(mv_chan, iter);
335 * some descriptors are still waiting
338 tasklet_schedule(&mv_chan->irq_tasklet);
344 mv_chan->dmachan.completed_cookie = cookie;
347 static void mv_xor_tasklet(unsigned long data)
349 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
351 spin_lock_bh(&chan->lock);
352 mv_chan_slot_cleanup(chan);
353 spin_unlock_bh(&chan->lock);
356 static struct mv_xor_desc_slot *
357 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
359 struct mv_xor_desc_slot *iter, *_iter;
362 /* start search from the last allocated descrtiptor
363 * if a contiguous allocation can not be found start searching
364 * from the beginning of the list
368 iter = mv_chan->last_used;
370 iter = list_entry(&mv_chan->all_slots,
371 struct mv_xor_desc_slot,
374 list_for_each_entry_safe_continue(
375 iter, _iter, &mv_chan->all_slots, slot_node) {
378 prefetch(&_iter->async_tx);
379 if (iter->slot_used) {
380 /* give up after finding the first busy slot
381 * on the second pass through the list
388 /* pre-ack descriptor */
389 async_tx_ack(&iter->async_tx);
392 INIT_LIST_HEAD(&iter->chain_node);
393 iter->async_tx.cookie = -EBUSY;
394 mv_chan->last_used = iter;
395 mv_desc_clear_next_desc(iter);
403 /* try to free some slots if the allocation fails */
404 tasklet_schedule(&mv_chan->irq_tasklet);
409 /************************ DMA engine API functions ****************************/
411 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
413 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
414 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
415 struct mv_xor_desc_slot *old_chain_tail;
417 int new_hw_chain = 1;
419 dev_dbg(mv_chan_to_devp(mv_chan),
420 "%s sw_desc %p: async_tx %p\n",
421 __func__, sw_desc, &sw_desc->async_tx);
423 spin_lock_bh(&mv_chan->lock);
424 cookie = dma_cookie_assign(tx);
426 if (list_empty(&mv_chan->chain))
427 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
431 old_chain_tail = list_entry(mv_chan->chain.prev,
432 struct mv_xor_desc_slot,
434 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
436 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
437 &old_chain_tail->async_tx.phys);
439 /* fix up the hardware chain */
440 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
442 /* if the channel is not busy */
443 if (!mv_chan_is_busy(mv_chan)) {
444 u32 current_desc = mv_chan_get_current_desc(mv_chan);
446 * and the curren desc is the end of the chain before
447 * the append, then we need to start the channel
449 if (current_desc == old_chain_tail->async_tx.phys)
455 mv_chan_start_new_chain(mv_chan, sw_desc);
457 spin_unlock_bh(&mv_chan->lock);
462 /* returns the number of allocated descriptors */
463 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
468 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
469 struct mv_xor_desc_slot *slot = NULL;
470 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
472 /* Allocate descriptor slots */
473 idx = mv_chan->slots_allocated;
474 while (idx < num_descs_in_pool) {
475 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
477 dev_info(mv_chan_to_devp(mv_chan),
478 "channel only initialized %d descriptor slots",
482 virt_desc = mv_chan->dma_desc_pool_virt;
483 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
485 dma_async_tx_descriptor_init(&slot->async_tx, chan);
486 slot->async_tx.tx_submit = mv_xor_tx_submit;
487 INIT_LIST_HEAD(&slot->chain_node);
488 INIT_LIST_HEAD(&slot->slot_node);
489 dma_desc = mv_chan->dma_desc_pool;
490 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
493 spin_lock_bh(&mv_chan->lock);
494 mv_chan->slots_allocated = idx;
495 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
496 spin_unlock_bh(&mv_chan->lock);
499 if (mv_chan->slots_allocated && !mv_chan->last_used)
500 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
501 struct mv_xor_desc_slot,
504 dev_dbg(mv_chan_to_devp(mv_chan),
505 "allocated %d descriptor slots last_used: %p\n",
506 mv_chan->slots_allocated, mv_chan->last_used);
508 return mv_chan->slots_allocated ? : -ENOMEM;
511 static struct dma_async_tx_descriptor *
512 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
513 unsigned int src_cnt, size_t len, unsigned long flags)
515 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
516 struct mv_xor_desc_slot *sw_desc;
518 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
521 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
523 dev_dbg(mv_chan_to_devp(mv_chan),
524 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
525 __func__, src_cnt, len, &dest, flags);
527 spin_lock_bh(&mv_chan->lock);
528 sw_desc = mv_chan_alloc_slot(mv_chan);
530 sw_desc->type = DMA_XOR;
531 sw_desc->async_tx.flags = flags;
532 mv_desc_init(sw_desc, dest, len, flags);
534 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
536 spin_unlock_bh(&mv_chan->lock);
537 dev_dbg(mv_chan_to_devp(mv_chan),
538 "%s sw_desc %p async_tx %p \n",
539 __func__, sw_desc, &sw_desc->async_tx);
540 return sw_desc ? &sw_desc->async_tx : NULL;
543 static struct dma_async_tx_descriptor *
544 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
545 size_t len, unsigned long flags)
548 * A MEMCPY operation is identical to an XOR operation with only
549 * a single source address.
551 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
554 static struct dma_async_tx_descriptor *
555 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
557 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
558 dma_addr_t src, dest;
561 src = mv_chan->dummy_src_addr;
562 dest = mv_chan->dummy_dst_addr;
563 len = MV_XOR_MIN_BYTE_COUNT;
566 * We implement the DMA_INTERRUPT operation as a minimum sized
567 * XOR operation with a single dummy source address.
569 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
572 static void mv_xor_free_chan_resources(struct dma_chan *chan)
574 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
575 struct mv_xor_desc_slot *iter, *_iter;
576 int in_use_descs = 0;
578 spin_lock_bh(&mv_chan->lock);
580 mv_chan_slot_cleanup(mv_chan);
582 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
585 list_del(&iter->chain_node);
587 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
590 list_del(&iter->completed_node);
592 list_for_each_entry_safe_reverse(
593 iter, _iter, &mv_chan->all_slots, slot_node) {
594 list_del(&iter->slot_node);
596 mv_chan->slots_allocated--;
598 mv_chan->last_used = NULL;
600 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
601 __func__, mv_chan->slots_allocated);
602 spin_unlock_bh(&mv_chan->lock);
605 dev_err(mv_chan_to_devp(mv_chan),
606 "freeing %d in use descriptors!\n", in_use_descs);
610 * mv_xor_status - poll the status of an XOR transaction
611 * @chan: XOR channel handle
612 * @cookie: XOR transaction identifier
613 * @txstate: XOR transactions state holder (or NULL)
615 static enum dma_status mv_xor_status(struct dma_chan *chan,
617 struct dma_tx_state *txstate)
619 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
622 ret = dma_cookie_status(chan, cookie, txstate);
623 if (ret == DMA_COMPLETE)
626 spin_lock_bh(&mv_chan->lock);
627 mv_chan_slot_cleanup(mv_chan);
628 spin_unlock_bh(&mv_chan->lock);
630 return dma_cookie_status(chan, cookie, txstate);
633 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
637 val = readl_relaxed(XOR_CONFIG(chan));
638 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
640 val = readl_relaxed(XOR_ACTIVATION(chan));
641 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
643 val = readl_relaxed(XOR_INTR_CAUSE(chan));
644 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
646 val = readl_relaxed(XOR_INTR_MASK(chan));
647 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
649 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
650 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
652 val = readl_relaxed(XOR_ERROR_ADDR(chan));
653 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
656 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
659 if (intr_cause & XOR_INT_ERR_DECODE) {
660 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
664 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
665 chan->idx, intr_cause);
667 mv_chan_dump_regs(chan);
671 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
673 struct mv_xor_chan *chan = data;
674 u32 intr_cause = mv_chan_get_intr_cause(chan);
676 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
678 if (intr_cause & XOR_INTR_ERRORS)
679 mv_chan_err_interrupt_handler(chan, intr_cause);
681 tasklet_schedule(&chan->irq_tasklet);
683 mv_chan_clear_eoc_cause(chan);
688 static void mv_xor_issue_pending(struct dma_chan *chan)
690 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
692 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
693 mv_chan->pending = 0;
694 mv_chan_activate(mv_chan);
699 * Perform a transaction to verify the HW works.
702 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
706 dma_addr_t src_dma, dest_dma;
707 struct dma_chan *dma_chan;
709 struct dma_async_tx_descriptor *tx;
710 struct dmaengine_unmap_data *unmap;
713 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
717 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
723 /* Fill in src buffer */
724 for (i = 0; i < PAGE_SIZE; i++)
725 ((u8 *) src)[i] = (u8)i;
727 dma_chan = &mv_chan->dmachan;
728 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
733 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
739 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
740 PAGE_SIZE, DMA_TO_DEVICE);
741 unmap->addr[0] = src_dma;
743 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
750 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
751 PAGE_SIZE, DMA_FROM_DEVICE);
752 unmap->addr[1] = dest_dma;
754 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
760 unmap->len = PAGE_SIZE;
762 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
765 dev_err(dma_chan->device->dev,
766 "Self-test cannot prepare operation, disabling\n");
771 cookie = mv_xor_tx_submit(tx);
772 if (dma_submit_error(cookie)) {
773 dev_err(dma_chan->device->dev,
774 "Self-test submit error, disabling\n");
779 mv_xor_issue_pending(dma_chan);
783 if (mv_xor_status(dma_chan, cookie, NULL) !=
785 dev_err(dma_chan->device->dev,
786 "Self-test copy timed out, disabling\n");
791 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
792 PAGE_SIZE, DMA_FROM_DEVICE);
793 if (memcmp(src, dest, PAGE_SIZE)) {
794 dev_err(dma_chan->device->dev,
795 "Self-test copy failed compare, disabling\n");
801 dmaengine_unmap_put(unmap);
802 mv_xor_free_chan_resources(dma_chan);
809 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
811 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
815 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
816 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
818 struct dma_async_tx_descriptor *tx;
819 struct dmaengine_unmap_data *unmap;
820 struct dma_chan *dma_chan;
825 int src_count = MV_XOR_NUM_SRC_TEST;
827 for (src_idx = 0; src_idx < src_count; src_idx++) {
828 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
829 if (!xor_srcs[src_idx]) {
831 __free_page(xor_srcs[src_idx]);
836 dest = alloc_page(GFP_KERNEL);
839 __free_page(xor_srcs[src_idx]);
843 /* Fill in src buffers */
844 for (src_idx = 0; src_idx < src_count; src_idx++) {
845 u8 *ptr = page_address(xor_srcs[src_idx]);
846 for (i = 0; i < PAGE_SIZE; i++)
847 ptr[i] = (1 << src_idx);
850 for (src_idx = 0; src_idx < src_count; src_idx++)
851 cmp_byte ^= (u8) (1 << src_idx);
853 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
854 (cmp_byte << 8) | cmp_byte;
856 memset(page_address(dest), 0, PAGE_SIZE);
858 dma_chan = &mv_chan->dmachan;
859 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
864 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
872 for (i = 0; i < src_count; i++) {
873 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
874 0, PAGE_SIZE, DMA_TO_DEVICE);
875 dma_srcs[i] = unmap->addr[i];
876 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
884 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
886 dest_dma = unmap->addr[src_count];
887 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
893 unmap->len = PAGE_SIZE;
895 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
896 src_count, PAGE_SIZE, 0);
898 dev_err(dma_chan->device->dev,
899 "Self-test cannot prepare operation, disabling\n");
904 cookie = mv_xor_tx_submit(tx);
905 if (dma_submit_error(cookie)) {
906 dev_err(dma_chan->device->dev,
907 "Self-test submit error, disabling\n");
912 mv_xor_issue_pending(dma_chan);
916 if (mv_xor_status(dma_chan, cookie, NULL) !=
918 dev_err(dma_chan->device->dev,
919 "Self-test xor timed out, disabling\n");
924 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
925 PAGE_SIZE, DMA_FROM_DEVICE);
926 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
927 u32 *ptr = page_address(dest);
928 if (ptr[i] != cmp_word) {
929 dev_err(dma_chan->device->dev,
930 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
931 i, ptr[i], cmp_word);
938 dmaengine_unmap_put(unmap);
939 mv_xor_free_chan_resources(dma_chan);
943 __free_page(xor_srcs[src_idx]);
948 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
950 struct dma_chan *chan, *_chan;
951 struct device *dev = mv_chan->dmadev.dev;
953 dma_async_device_unregister(&mv_chan->dmadev);
955 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
956 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
957 dma_unmap_single(dev, mv_chan->dummy_src_addr,
958 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
959 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
960 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
962 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
964 list_del(&chan->device_node);
967 free_irq(mv_chan->irq, mv_chan);
972 static struct mv_xor_chan *
973 mv_xor_channel_add(struct mv_xor_device *xordev,
974 struct platform_device *pdev,
975 int idx, dma_cap_mask_t cap_mask, int irq)
978 struct mv_xor_chan *mv_chan;
979 struct dma_device *dma_dev;
981 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
983 return ERR_PTR(-ENOMEM);
988 dma_dev = &mv_chan->dmadev;
991 * These source and destination dummy buffers are used to implement
992 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
993 * Hence, we only need to map the buffers at initialization-time.
995 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
996 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
997 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
998 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1000 /* allocate coherent memory for hardware descriptors
1001 * note: writecombine gives slightly better performance, but
1002 * requires that we explicitly flush the writes
1004 mv_chan->dma_desc_pool_virt =
1005 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1006 &mv_chan->dma_desc_pool, GFP_KERNEL);
1007 if (!mv_chan->dma_desc_pool_virt)
1008 return ERR_PTR(-ENOMEM);
1010 /* discover transaction capabilites from the platform data */
1011 dma_dev->cap_mask = cap_mask;
1013 INIT_LIST_HEAD(&dma_dev->channels);
1015 /* set base routines */
1016 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1017 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1018 dma_dev->device_tx_status = mv_xor_status;
1019 dma_dev->device_issue_pending = mv_xor_issue_pending;
1020 dma_dev->dev = &pdev->dev;
1022 /* set prep routines based on capability */
1023 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1024 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1025 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1026 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1027 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1028 dma_dev->max_xor = 8;
1029 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1032 mv_chan->mmr_base = xordev->xor_base;
1033 mv_chan->mmr_high_base = xordev->xor_high_base;
1034 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1037 /* clear errors before enabling interrupts */
1038 mv_chan_clear_err_status(mv_chan);
1040 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1041 0, dev_name(&pdev->dev), mv_chan);
1045 mv_chan_unmask_interrupts(mv_chan);
1047 mv_chan_set_mode(mv_chan, DMA_XOR);
1049 spin_lock_init(&mv_chan->lock);
1050 INIT_LIST_HEAD(&mv_chan->chain);
1051 INIT_LIST_HEAD(&mv_chan->completed_slots);
1052 INIT_LIST_HEAD(&mv_chan->all_slots);
1053 mv_chan->dmachan.device = dma_dev;
1054 dma_cookie_init(&mv_chan->dmachan);
1056 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1058 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1059 ret = mv_chan_memcpy_self_test(mv_chan);
1060 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1065 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1066 ret = mv_chan_xor_self_test(mv_chan);
1067 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1072 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1073 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1074 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1075 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1077 dma_async_device_register(dma_dev);
1081 free_irq(mv_chan->irq, mv_chan);
1083 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1084 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1085 return ERR_PTR(ret);
1089 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1090 const struct mbus_dram_target_info *dram)
1092 void __iomem *base = xordev->xor_high_base;
1096 for (i = 0; i < 8; i++) {
1097 writel(0, base + WINDOW_BASE(i));
1098 writel(0, base + WINDOW_SIZE(i));
1100 writel(0, base + WINDOW_REMAP_HIGH(i));
1103 for (i = 0; i < dram->num_cs; i++) {
1104 const struct mbus_dram_window *cs = dram->cs + i;
1106 writel((cs->base & 0xffff0000) |
1107 (cs->mbus_attr << 8) |
1108 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1109 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1111 win_enable |= (1 << i);
1112 win_enable |= 3 << (16 + (2 * i));
1115 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1116 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1117 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1118 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1121 static int mv_xor_probe(struct platform_device *pdev)
1123 const struct mbus_dram_target_info *dram;
1124 struct mv_xor_device *xordev;
1125 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1126 struct resource *res;
1129 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1131 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1135 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1139 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1140 resource_size(res));
1141 if (!xordev->xor_base)
1144 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1148 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1149 resource_size(res));
1150 if (!xordev->xor_high_base)
1153 platform_set_drvdata(pdev, xordev);
1156 * (Re-)program MBUS remapping windows if we are asked to.
1158 dram = mv_mbus_dram_info();
1160 mv_xor_conf_mbus_windows(xordev, dram);
1162 /* Not all platforms can gate the clock, so it is not
1163 * an error if the clock does not exists.
1165 xordev->clk = clk_get(&pdev->dev, NULL);
1166 if (!IS_ERR(xordev->clk))
1167 clk_prepare_enable(xordev->clk);
1169 if (pdev->dev.of_node) {
1170 struct device_node *np;
1173 for_each_child_of_node(pdev->dev.of_node, np) {
1174 struct mv_xor_chan *chan;
1175 dma_cap_mask_t cap_mask;
1178 dma_cap_zero(cap_mask);
1179 if (of_property_read_bool(np, "dmacap,memcpy"))
1180 dma_cap_set(DMA_MEMCPY, cap_mask);
1181 if (of_property_read_bool(np, "dmacap,xor"))
1182 dma_cap_set(DMA_XOR, cap_mask);
1183 if (of_property_read_bool(np, "dmacap,interrupt"))
1184 dma_cap_set(DMA_INTERRUPT, cap_mask);
1186 irq = irq_of_parse_and_map(np, 0);
1189 goto err_channel_add;
1192 chan = mv_xor_channel_add(xordev, pdev, i,
1195 ret = PTR_ERR(chan);
1196 irq_dispose_mapping(irq);
1197 goto err_channel_add;
1200 xordev->channels[i] = chan;
1203 } else if (pdata && pdata->channels) {
1204 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1205 struct mv_xor_channel_data *cd;
1206 struct mv_xor_chan *chan;
1209 cd = &pdata->channels[i];
1212 goto err_channel_add;
1215 irq = platform_get_irq(pdev, i);
1218 goto err_channel_add;
1221 chan = mv_xor_channel_add(xordev, pdev, i,
1224 ret = PTR_ERR(chan);
1225 goto err_channel_add;
1228 xordev->channels[i] = chan;
1235 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1236 if (xordev->channels[i]) {
1237 mv_xor_channel_remove(xordev->channels[i]);
1238 if (pdev->dev.of_node)
1239 irq_dispose_mapping(xordev->channels[i]->irq);
1242 if (!IS_ERR(xordev->clk)) {
1243 clk_disable_unprepare(xordev->clk);
1244 clk_put(xordev->clk);
1250 static int mv_xor_remove(struct platform_device *pdev)
1252 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1255 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1256 if (xordev->channels[i])
1257 mv_xor_channel_remove(xordev->channels[i]);
1260 if (!IS_ERR(xordev->clk)) {
1261 clk_disable_unprepare(xordev->clk);
1262 clk_put(xordev->clk);
1269 static const struct of_device_id mv_xor_dt_ids[] = {
1270 { .compatible = "marvell,orion-xor", },
1273 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1276 static struct platform_driver mv_xor_driver = {
1277 .probe = mv_xor_probe,
1278 .remove = mv_xor_remove,
1280 .name = MV_XOR_NAME,
1281 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1286 static int __init mv_xor_init(void)
1288 return platform_driver_register(&mv_xor_driver);
1290 module_init(mv_xor_init);
1292 /* it's currently unsafe to unload this module */
1294 static void __exit mv_xor_exit(void)
1296 platform_driver_unregister(&mv_xor_driver);
1300 module_exit(mv_xor_exit);
1303 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1304 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1305 MODULE_LICENSE("GPL");