2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/of_device.h>
23 #include <linux/platform_device.h>
24 #include <linux/memory.h>
25 #include <linux/clk.h>
27 #include <linux/of_irq.h>
28 #include <linux/irqdomain.h>
29 #include <linux/platform_data/dma-mv_xor.h>
31 #include "dmaengine.h"
39 static void mv_xor_issue_pending(struct dma_chan *chan);
41 #define to_mv_xor_chan(chan) \
42 container_of(chan, struct mv_xor_chan, dmachan)
44 #define to_mv_xor_slot(tx) \
45 container_of(tx, struct mv_xor_desc_slot, async_tx)
47 #define mv_chan_to_devp(chan) \
50 static void mv_desc_init(struct mv_xor_desc_slot *desc,
51 dma_addr_t addr, u32 byte_count,
52 enum dma_ctrl_flags flags)
54 struct mv_xor_desc *hw_desc = desc->hw_desc;
56 hw_desc->status = XOR_DESC_DMA_OWNED;
57 hw_desc->phy_next_desc = 0;
58 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
59 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
60 XOR_DESC_EOD_INT_EN : 0;
61 hw_desc->phy_dest_addr = addr;
62 hw_desc->byte_count = byte_count;
65 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
67 struct mv_xor_desc *hw_desc = desc->hw_desc;
72 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
75 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
83 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
86 struct mv_xor_desc *hw_desc = desc->hw_desc;
87 BUG_ON(hw_desc->phy_next_desc);
88 hw_desc->phy_next_desc = next_desc_addr;
91 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
93 struct mv_xor_desc *hw_desc = desc->hw_desc;
94 hw_desc->phy_next_desc = 0;
97 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
98 int index, dma_addr_t addr)
100 struct mv_xor_desc *hw_desc = desc->hw_desc;
101 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
102 if (desc->type == DMA_XOR)
103 hw_desc->desc_command |= (1 << index);
106 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
108 return readl_relaxed(XOR_CURR_DESC(chan));
111 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
114 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
117 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
119 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
120 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
121 writel_relaxed(val, XOR_INTR_MASK(chan));
124 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
126 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
127 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
131 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
135 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
136 val = ~(val << (chan->idx * 16));
137 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
138 writel_relaxed(val, XOR_INTR_CAUSE(chan));
141 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
143 u32 val = 0xFFFF0000 >> (chan->idx * 16);
144 writel_relaxed(val, XOR_INTR_CAUSE(chan));
147 static void mv_chan_set_mode(struct mv_xor_chan *chan,
148 enum dma_transaction_type type)
151 u32 config = readl_relaxed(XOR_CONFIG(chan));
155 op_mode = XOR_OPERATION_MODE_XOR;
158 op_mode = XOR_OPERATION_MODE_MEMCPY;
161 dev_err(mv_chan_to_devp(chan),
162 "error: unsupported operation %d\n",
171 if (IS_ENABLED(__BIG_ENDIAN))
172 config |= XOR_DESCRIPTOR_SWAP;
174 config &= ~XOR_DESCRIPTOR_SWAP;
176 writel_relaxed(config, XOR_CONFIG(chan));
177 chan->current_type = type;
180 static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan)
183 u32 config = readl_relaxed(XOR_CONFIG(chan));
185 op_mode = XOR_OPERATION_MODE_IN_DESC;
190 #if defined(__BIG_ENDIAN)
191 config |= XOR_DESCRIPTOR_SWAP;
193 config &= ~XOR_DESCRIPTOR_SWAP;
196 writel_relaxed(config, XOR_CONFIG(chan));
199 static void mv_chan_activate(struct mv_xor_chan *chan)
201 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
203 /* writel ensures all descriptors are flushed before activation */
204 writel(BIT(0), XOR_ACTIVATION(chan));
207 static char mv_chan_is_busy(struct mv_xor_chan *chan)
209 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
211 state = (state >> 4) & 0x3;
213 return (state == 1) ? 1 : 0;
217 * mv_chan_free_slots - flags descriptor slots for reuse
218 * @slot: Slot to free
219 * Caller must hold &mv_chan->lock while calling this function
221 static void mv_chan_free_slots(struct mv_xor_chan *mv_chan,
222 struct mv_xor_desc_slot *slot)
224 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
225 __func__, __LINE__, slot);
232 * mv_chan_start_new_chain - program the engine to operate on new
233 * chain headed by sw_desc
234 * Caller must hold &mv_chan->lock while calling this function
236 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
237 struct mv_xor_desc_slot *sw_desc)
239 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
240 __func__, __LINE__, sw_desc);
242 /* set the hardware chain */
243 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
246 mv_xor_issue_pending(&mv_chan->dmachan);
250 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
251 struct mv_xor_chan *mv_chan,
254 BUG_ON(desc->async_tx.cookie < 0);
256 if (desc->async_tx.cookie > 0) {
257 cookie = desc->async_tx.cookie;
259 /* call the callback (must not sleep or submit new
260 * operations to this channel)
262 if (desc->async_tx.callback)
263 desc->async_tx.callback(
264 desc->async_tx.callback_param);
266 dma_descriptor_unmap(&desc->async_tx);
269 /* run dependent operations */
270 dma_run_dependencies(&desc->async_tx);
276 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
278 struct mv_xor_desc_slot *iter, *_iter;
280 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
281 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
284 if (async_tx_test_ack(&iter->async_tx)) {
285 list_del(&iter->completed_node);
286 mv_chan_free_slots(mv_chan, iter);
293 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
294 struct mv_xor_chan *mv_chan)
296 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
297 __func__, __LINE__, desc, desc->async_tx.flags);
298 list_del(&desc->chain_node);
299 /* the client is allowed to attach dependent operations
302 if (!async_tx_test_ack(&desc->async_tx)) {
303 /* move this slot to the completed_slots */
304 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
308 mv_chan_free_slots(mv_chan, desc);
312 /* This function must be called with the mv_xor_chan spinlock held */
313 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
315 struct mv_xor_desc_slot *iter, *_iter;
316 dma_cookie_t cookie = 0;
317 int busy = mv_chan_is_busy(mv_chan);
318 u32 current_desc = mv_chan_get_current_desc(mv_chan);
319 int current_cleaned = 0;
320 struct mv_xor_desc *hw_desc;
322 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
323 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
324 mv_chan_clean_completed_slots(mv_chan);
326 /* free completed slots from the chain starting with
327 * the oldest descriptor
330 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
333 /* clean finished descriptors */
334 hw_desc = iter->hw_desc;
335 if (hw_desc->status & XOR_DESC_SUCCESS) {
336 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
339 /* done processing desc, clean slot */
340 mv_desc_clean_slot(iter, mv_chan);
342 /* break if we did cleaned the current */
343 if (iter->async_tx.phys == current_desc) {
348 if (iter->async_tx.phys == current_desc) {
355 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
356 if (current_cleaned) {
358 * current descriptor cleaned and removed, run
361 iter = list_entry(mv_chan->chain.next,
362 struct mv_xor_desc_slot,
364 mv_chan_start_new_chain(mv_chan, iter);
366 if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
368 * descriptors are still waiting after
369 * current, trigger them
371 iter = list_entry(iter->chain_node.next,
372 struct mv_xor_desc_slot,
374 mv_chan_start_new_chain(mv_chan, iter);
377 * some descriptors are still waiting
380 tasklet_schedule(&mv_chan->irq_tasklet);
386 mv_chan->dmachan.completed_cookie = cookie;
389 static void mv_xor_tasklet(unsigned long data)
391 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
393 spin_lock_bh(&chan->lock);
394 mv_chan_slot_cleanup(chan);
395 spin_unlock_bh(&chan->lock);
398 static struct mv_xor_desc_slot *
399 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
401 struct mv_xor_desc_slot *iter, *_iter;
404 /* start search from the last allocated descrtiptor
405 * if a contiguous allocation can not be found start searching
406 * from the beginning of the list
410 iter = mv_chan->last_used;
412 iter = list_entry(&mv_chan->all_slots,
413 struct mv_xor_desc_slot,
416 list_for_each_entry_safe_continue(
417 iter, _iter, &mv_chan->all_slots, slot_node) {
420 prefetch(&_iter->async_tx);
421 if (iter->slot_used) {
422 /* give up after finding the first busy slot
423 * on the second pass through the list
430 /* pre-ack descriptor */
431 async_tx_ack(&iter->async_tx);
434 INIT_LIST_HEAD(&iter->chain_node);
435 iter->async_tx.cookie = -EBUSY;
436 mv_chan->last_used = iter;
437 mv_desc_clear_next_desc(iter);
445 /* try to free some slots if the allocation fails */
446 tasklet_schedule(&mv_chan->irq_tasklet);
451 /************************ DMA engine API functions ****************************/
453 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
455 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
456 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
457 struct mv_xor_desc_slot *old_chain_tail;
459 int new_hw_chain = 1;
461 dev_dbg(mv_chan_to_devp(mv_chan),
462 "%s sw_desc %p: async_tx %p\n",
463 __func__, sw_desc, &sw_desc->async_tx);
465 spin_lock_bh(&mv_chan->lock);
466 cookie = dma_cookie_assign(tx);
468 if (list_empty(&mv_chan->chain))
469 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
473 old_chain_tail = list_entry(mv_chan->chain.prev,
474 struct mv_xor_desc_slot,
476 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
478 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
479 &old_chain_tail->async_tx.phys);
481 /* fix up the hardware chain */
482 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
484 /* if the channel is not busy */
485 if (!mv_chan_is_busy(mv_chan)) {
486 u32 current_desc = mv_chan_get_current_desc(mv_chan);
488 * and the curren desc is the end of the chain before
489 * the append, then we need to start the channel
491 if (current_desc == old_chain_tail->async_tx.phys)
497 mv_chan_start_new_chain(mv_chan, sw_desc);
499 spin_unlock_bh(&mv_chan->lock);
504 /* returns the number of allocated descriptors */
505 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
510 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
511 struct mv_xor_desc_slot *slot = NULL;
512 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
514 /* Allocate descriptor slots */
515 idx = mv_chan->slots_allocated;
516 while (idx < num_descs_in_pool) {
517 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
519 dev_info(mv_chan_to_devp(mv_chan),
520 "channel only initialized %d descriptor slots",
524 virt_desc = mv_chan->dma_desc_pool_virt;
525 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
527 dma_async_tx_descriptor_init(&slot->async_tx, chan);
528 slot->async_tx.tx_submit = mv_xor_tx_submit;
529 INIT_LIST_HEAD(&slot->chain_node);
530 INIT_LIST_HEAD(&slot->slot_node);
531 dma_desc = mv_chan->dma_desc_pool;
532 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
535 spin_lock_bh(&mv_chan->lock);
536 mv_chan->slots_allocated = idx;
537 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
538 spin_unlock_bh(&mv_chan->lock);
541 if (mv_chan->slots_allocated && !mv_chan->last_used)
542 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
543 struct mv_xor_desc_slot,
546 dev_dbg(mv_chan_to_devp(mv_chan),
547 "allocated %d descriptor slots last_used: %p\n",
548 mv_chan->slots_allocated, mv_chan->last_used);
550 return mv_chan->slots_allocated ? : -ENOMEM;
553 static struct dma_async_tx_descriptor *
554 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
555 unsigned int src_cnt, size_t len, unsigned long flags)
557 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
558 struct mv_xor_desc_slot *sw_desc;
560 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
563 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
565 dev_dbg(mv_chan_to_devp(mv_chan),
566 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
567 __func__, src_cnt, len, &dest, flags);
569 spin_lock_bh(&mv_chan->lock);
570 sw_desc = mv_chan_alloc_slot(mv_chan);
572 sw_desc->type = DMA_XOR;
573 sw_desc->async_tx.flags = flags;
574 mv_desc_init(sw_desc, dest, len, flags);
575 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
576 mv_desc_set_mode(sw_desc);
578 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
580 spin_unlock_bh(&mv_chan->lock);
581 dev_dbg(mv_chan_to_devp(mv_chan),
582 "%s sw_desc %p async_tx %p \n",
583 __func__, sw_desc, &sw_desc->async_tx);
584 return sw_desc ? &sw_desc->async_tx : NULL;
587 static struct dma_async_tx_descriptor *
588 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
589 size_t len, unsigned long flags)
592 * A MEMCPY operation is identical to an XOR operation with only
593 * a single source address.
595 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
598 static struct dma_async_tx_descriptor *
599 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
601 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
602 dma_addr_t src, dest;
605 src = mv_chan->dummy_src_addr;
606 dest = mv_chan->dummy_dst_addr;
607 len = MV_XOR_MIN_BYTE_COUNT;
610 * We implement the DMA_INTERRUPT operation as a minimum sized
611 * XOR operation with a single dummy source address.
613 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
616 static void mv_xor_free_chan_resources(struct dma_chan *chan)
618 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
619 struct mv_xor_desc_slot *iter, *_iter;
620 int in_use_descs = 0;
622 spin_lock_bh(&mv_chan->lock);
624 mv_chan_slot_cleanup(mv_chan);
626 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
629 list_del(&iter->chain_node);
631 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
634 list_del(&iter->completed_node);
636 list_for_each_entry_safe_reverse(
637 iter, _iter, &mv_chan->all_slots, slot_node) {
638 list_del(&iter->slot_node);
640 mv_chan->slots_allocated--;
642 mv_chan->last_used = NULL;
644 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
645 __func__, mv_chan->slots_allocated);
646 spin_unlock_bh(&mv_chan->lock);
649 dev_err(mv_chan_to_devp(mv_chan),
650 "freeing %d in use descriptors!\n", in_use_descs);
654 * mv_xor_status - poll the status of an XOR transaction
655 * @chan: XOR channel handle
656 * @cookie: XOR transaction identifier
657 * @txstate: XOR transactions state holder (or NULL)
659 static enum dma_status mv_xor_status(struct dma_chan *chan,
661 struct dma_tx_state *txstate)
663 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
666 ret = dma_cookie_status(chan, cookie, txstate);
667 if (ret == DMA_COMPLETE)
670 spin_lock_bh(&mv_chan->lock);
671 mv_chan_slot_cleanup(mv_chan);
672 spin_unlock_bh(&mv_chan->lock);
674 return dma_cookie_status(chan, cookie, txstate);
677 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
681 val = readl_relaxed(XOR_CONFIG(chan));
682 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
684 val = readl_relaxed(XOR_ACTIVATION(chan));
685 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
687 val = readl_relaxed(XOR_INTR_CAUSE(chan));
688 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
690 val = readl_relaxed(XOR_INTR_MASK(chan));
691 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
693 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
694 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
696 val = readl_relaxed(XOR_ERROR_ADDR(chan));
697 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
700 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
703 if (intr_cause & XOR_INT_ERR_DECODE) {
704 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
708 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
709 chan->idx, intr_cause);
711 mv_chan_dump_regs(chan);
715 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
717 struct mv_xor_chan *chan = data;
718 u32 intr_cause = mv_chan_get_intr_cause(chan);
720 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
722 if (intr_cause & XOR_INTR_ERRORS)
723 mv_chan_err_interrupt_handler(chan, intr_cause);
725 tasklet_schedule(&chan->irq_tasklet);
727 mv_chan_clear_eoc_cause(chan);
732 static void mv_xor_issue_pending(struct dma_chan *chan)
734 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
736 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
737 mv_chan->pending = 0;
738 mv_chan_activate(mv_chan);
743 * Perform a transaction to verify the HW works.
746 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
750 dma_addr_t src_dma, dest_dma;
751 struct dma_chan *dma_chan;
753 struct dma_async_tx_descriptor *tx;
754 struct dmaengine_unmap_data *unmap;
757 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
761 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
767 /* Fill in src buffer */
768 for (i = 0; i < PAGE_SIZE; i++)
769 ((u8 *) src)[i] = (u8)i;
771 dma_chan = &mv_chan->dmachan;
772 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
777 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
783 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
784 PAGE_SIZE, DMA_TO_DEVICE);
785 unmap->addr[0] = src_dma;
787 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
794 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
795 PAGE_SIZE, DMA_FROM_DEVICE);
796 unmap->addr[1] = dest_dma;
798 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
804 unmap->len = PAGE_SIZE;
806 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
809 dev_err(dma_chan->device->dev,
810 "Self-test cannot prepare operation, disabling\n");
815 cookie = mv_xor_tx_submit(tx);
816 if (dma_submit_error(cookie)) {
817 dev_err(dma_chan->device->dev,
818 "Self-test submit error, disabling\n");
823 mv_xor_issue_pending(dma_chan);
827 if (mv_xor_status(dma_chan, cookie, NULL) !=
829 dev_err(dma_chan->device->dev,
830 "Self-test copy timed out, disabling\n");
835 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
836 PAGE_SIZE, DMA_FROM_DEVICE);
837 if (memcmp(src, dest, PAGE_SIZE)) {
838 dev_err(dma_chan->device->dev,
839 "Self-test copy failed compare, disabling\n");
845 dmaengine_unmap_put(unmap);
846 mv_xor_free_chan_resources(dma_chan);
853 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
855 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
859 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
860 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
862 struct dma_async_tx_descriptor *tx;
863 struct dmaengine_unmap_data *unmap;
864 struct dma_chan *dma_chan;
869 int src_count = MV_XOR_NUM_SRC_TEST;
871 for (src_idx = 0; src_idx < src_count; src_idx++) {
872 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
873 if (!xor_srcs[src_idx]) {
875 __free_page(xor_srcs[src_idx]);
880 dest = alloc_page(GFP_KERNEL);
883 __free_page(xor_srcs[src_idx]);
887 /* Fill in src buffers */
888 for (src_idx = 0; src_idx < src_count; src_idx++) {
889 u8 *ptr = page_address(xor_srcs[src_idx]);
890 for (i = 0; i < PAGE_SIZE; i++)
891 ptr[i] = (1 << src_idx);
894 for (src_idx = 0; src_idx < src_count; src_idx++)
895 cmp_byte ^= (u8) (1 << src_idx);
897 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
898 (cmp_byte << 8) | cmp_byte;
900 memset(page_address(dest), 0, PAGE_SIZE);
902 dma_chan = &mv_chan->dmachan;
903 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
908 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
916 for (i = 0; i < src_count; i++) {
917 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
918 0, PAGE_SIZE, DMA_TO_DEVICE);
919 dma_srcs[i] = unmap->addr[i];
920 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
928 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
930 dest_dma = unmap->addr[src_count];
931 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
937 unmap->len = PAGE_SIZE;
939 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
940 src_count, PAGE_SIZE, 0);
942 dev_err(dma_chan->device->dev,
943 "Self-test cannot prepare operation, disabling\n");
948 cookie = mv_xor_tx_submit(tx);
949 if (dma_submit_error(cookie)) {
950 dev_err(dma_chan->device->dev,
951 "Self-test submit error, disabling\n");
956 mv_xor_issue_pending(dma_chan);
960 if (mv_xor_status(dma_chan, cookie, NULL) !=
962 dev_err(dma_chan->device->dev,
963 "Self-test xor timed out, disabling\n");
968 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
969 PAGE_SIZE, DMA_FROM_DEVICE);
970 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
971 u32 *ptr = page_address(dest);
972 if (ptr[i] != cmp_word) {
973 dev_err(dma_chan->device->dev,
974 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
975 i, ptr[i], cmp_word);
982 dmaengine_unmap_put(unmap);
983 mv_xor_free_chan_resources(dma_chan);
987 __free_page(xor_srcs[src_idx]);
992 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
994 struct dma_chan *chan, *_chan;
995 struct device *dev = mv_chan->dmadev.dev;
997 dma_async_device_unregister(&mv_chan->dmadev);
999 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1000 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1001 dma_unmap_single(dev, mv_chan->dummy_src_addr,
1002 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1003 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1004 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1006 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1008 list_del(&chan->device_node);
1011 free_irq(mv_chan->irq, mv_chan);
1016 static struct mv_xor_chan *
1017 mv_xor_channel_add(struct mv_xor_device *xordev,
1018 struct platform_device *pdev,
1019 int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
1022 struct mv_xor_chan *mv_chan;
1023 struct dma_device *dma_dev;
1025 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1027 return ERR_PTR(-ENOMEM);
1031 mv_chan->op_in_desc = op_in_desc;
1033 dma_dev = &mv_chan->dmadev;
1036 * These source and destination dummy buffers are used to implement
1037 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
1038 * Hence, we only need to map the buffers at initialization-time.
1040 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1041 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1042 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1043 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1045 /* allocate coherent memory for hardware descriptors
1046 * note: writecombine gives slightly better performance, but
1047 * requires that we explicitly flush the writes
1049 mv_chan->dma_desc_pool_virt =
1050 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1051 &mv_chan->dma_desc_pool, GFP_KERNEL);
1052 if (!mv_chan->dma_desc_pool_virt)
1053 return ERR_PTR(-ENOMEM);
1055 /* discover transaction capabilites from the platform data */
1056 dma_dev->cap_mask = cap_mask;
1058 INIT_LIST_HEAD(&dma_dev->channels);
1060 /* set base routines */
1061 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1062 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1063 dma_dev->device_tx_status = mv_xor_status;
1064 dma_dev->device_issue_pending = mv_xor_issue_pending;
1065 dma_dev->dev = &pdev->dev;
1067 /* set prep routines based on capability */
1068 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1069 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1070 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1071 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1072 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1073 dma_dev->max_xor = 8;
1074 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1077 mv_chan->mmr_base = xordev->xor_base;
1078 mv_chan->mmr_high_base = xordev->xor_high_base;
1079 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1082 /* clear errors before enabling interrupts */
1083 mv_chan_clear_err_status(mv_chan);
1085 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1086 0, dev_name(&pdev->dev), mv_chan);
1090 mv_chan_unmask_interrupts(mv_chan);
1092 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1093 mv_chan_set_mode_to_desc(mv_chan);
1095 mv_chan_set_mode(mv_chan, DMA_XOR);
1097 spin_lock_init(&mv_chan->lock);
1098 INIT_LIST_HEAD(&mv_chan->chain);
1099 INIT_LIST_HEAD(&mv_chan->completed_slots);
1100 INIT_LIST_HEAD(&mv_chan->all_slots);
1101 mv_chan->dmachan.device = dma_dev;
1102 dma_cookie_init(&mv_chan->dmachan);
1104 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1106 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1107 ret = mv_chan_memcpy_self_test(mv_chan);
1108 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1113 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1114 ret = mv_chan_xor_self_test(mv_chan);
1115 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1120 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1121 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1122 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1123 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1124 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1126 dma_async_device_register(dma_dev);
1130 free_irq(mv_chan->irq, mv_chan);
1132 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1133 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1134 return ERR_PTR(ret);
1138 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1139 const struct mbus_dram_target_info *dram)
1141 void __iomem *base = xordev->xor_high_base;
1145 for (i = 0; i < 8; i++) {
1146 writel(0, base + WINDOW_BASE(i));
1147 writel(0, base + WINDOW_SIZE(i));
1149 writel(0, base + WINDOW_REMAP_HIGH(i));
1152 for (i = 0; i < dram->num_cs; i++) {
1153 const struct mbus_dram_window *cs = dram->cs + i;
1155 writel((cs->base & 0xffff0000) |
1156 (cs->mbus_attr << 8) |
1157 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1158 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1160 win_enable |= (1 << i);
1161 win_enable |= 3 << (16 + (2 * i));
1164 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1165 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1166 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1167 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1170 static const struct of_device_id mv_xor_dt_ids[] = {
1171 { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
1172 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
1175 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1177 static int mv_xor_probe(struct platform_device *pdev)
1179 const struct mbus_dram_target_info *dram;
1180 struct mv_xor_device *xordev;
1181 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1182 struct resource *res;
1186 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1188 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1192 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1196 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1197 resource_size(res));
1198 if (!xordev->xor_base)
1201 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1205 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1206 resource_size(res));
1207 if (!xordev->xor_high_base)
1210 platform_set_drvdata(pdev, xordev);
1213 * (Re-)program MBUS remapping windows if we are asked to.
1215 dram = mv_mbus_dram_info();
1217 mv_xor_conf_mbus_windows(xordev, dram);
1219 /* Not all platforms can gate the clock, so it is not
1220 * an error if the clock does not exists.
1222 xordev->clk = clk_get(&pdev->dev, NULL);
1223 if (!IS_ERR(xordev->clk))
1224 clk_prepare_enable(xordev->clk);
1226 if (pdev->dev.of_node) {
1227 struct device_node *np;
1229 const struct of_device_id *of_id =
1230 of_match_device(mv_xor_dt_ids,
1233 for_each_child_of_node(pdev->dev.of_node, np) {
1234 struct mv_xor_chan *chan;
1235 dma_cap_mask_t cap_mask;
1237 op_in_desc = (int)of_id->data;
1239 dma_cap_zero(cap_mask);
1240 if (of_property_read_bool(np, "dmacap,memcpy"))
1241 dma_cap_set(DMA_MEMCPY, cap_mask);
1242 if (of_property_read_bool(np, "dmacap,xor"))
1243 dma_cap_set(DMA_XOR, cap_mask);
1244 if (of_property_read_bool(np, "dmacap,interrupt"))
1245 dma_cap_set(DMA_INTERRUPT, cap_mask);
1247 irq = irq_of_parse_and_map(np, 0);
1250 goto err_channel_add;
1253 chan = mv_xor_channel_add(xordev, pdev, i,
1254 cap_mask, irq, op_in_desc);
1256 ret = PTR_ERR(chan);
1257 irq_dispose_mapping(irq);
1258 goto err_channel_add;
1261 xordev->channels[i] = chan;
1264 } else if (pdata && pdata->channels) {
1265 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1266 struct mv_xor_channel_data *cd;
1267 struct mv_xor_chan *chan;
1270 cd = &pdata->channels[i];
1273 goto err_channel_add;
1276 irq = platform_get_irq(pdev, i);
1279 goto err_channel_add;
1282 chan = mv_xor_channel_add(xordev, pdev, i,
1286 ret = PTR_ERR(chan);
1287 goto err_channel_add;
1290 xordev->channels[i] = chan;
1297 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1298 if (xordev->channels[i]) {
1299 mv_xor_channel_remove(xordev->channels[i]);
1300 if (pdev->dev.of_node)
1301 irq_dispose_mapping(xordev->channels[i]->irq);
1304 if (!IS_ERR(xordev->clk)) {
1305 clk_disable_unprepare(xordev->clk);
1306 clk_put(xordev->clk);
1312 static int mv_xor_remove(struct platform_device *pdev)
1314 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1317 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1318 if (xordev->channels[i])
1319 mv_xor_channel_remove(xordev->channels[i]);
1322 if (!IS_ERR(xordev->clk)) {
1323 clk_disable_unprepare(xordev->clk);
1324 clk_put(xordev->clk);
1330 static struct platform_driver mv_xor_driver = {
1331 .probe = mv_xor_probe,
1332 .remove = mv_xor_remove,
1334 .name = MV_XOR_NAME,
1335 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1340 static int __init mv_xor_init(void)
1342 return platform_driver_register(&mv_xor_driver);
1344 module_init(mv_xor_init);
1346 /* it's currently unsafe to unload this module */
1348 static void __exit mv_xor_exit(void)
1350 platform_driver_unregister(&mv_xor_driver);
1354 module_exit(mv_xor_exit);
1357 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1358 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1359 MODULE_LICENSE("GPL");