2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h>
34 #include "dmaengine.h"
37 static void mv_xor_issue_pending(struct dma_chan *chan);
39 #define to_mv_xor_chan(chan) \
40 container_of(chan, struct mv_xor_chan, dmachan)
42 #define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
45 #define mv_chan_to_devp(chan) \
48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
50 struct mv_xor_desc *hw_desc = desc->hw_desc;
52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31);
57 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
63 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
66 struct mv_xor_desc *hw_desc = desc->hw_desc;
67 return hw_desc->phy_src_addr[src_idx];
71 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->byte_count = byte_count;
78 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 BUG_ON(hw_desc->phy_next_desc);
83 hw_desc->phy_next_desc = next_desc_addr;
86 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
88 struct mv_xor_desc *hw_desc = desc->hw_desc;
89 hw_desc->phy_next_desc = 0;
92 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
97 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
100 struct mv_xor_desc *hw_desc = desc->hw_desc;
101 hw_desc->phy_dest_addr = addr;
104 static int mv_chan_memset_slot_count(size_t len)
109 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
111 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
112 int index, dma_addr_t addr)
114 struct mv_xor_desc *hw_desc = desc->hw_desc;
115 hw_desc->phy_src_addr[index] = addr;
116 if (desc->type == DMA_XOR)
117 hw_desc->desc_command |= (1 << index);
120 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
122 return __raw_readl(XOR_CURR_DESC(chan));
125 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
128 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
131 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
133 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
136 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
138 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
141 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
143 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
144 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
147 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
149 u32 val = __raw_readl(XOR_INTR_MASK(chan));
150 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
151 __raw_writel(val, XOR_INTR_MASK(chan));
154 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
156 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
157 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
161 static int mv_is_err_intr(u32 intr_cause)
163 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
169 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
171 u32 val = ~(1 << (chan->idx * 16));
172 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
173 __raw_writel(val, XOR_INTR_CAUSE(chan));
176 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
178 u32 val = 0xFFFF0000 >> (chan->idx * 16);
179 __raw_writel(val, XOR_INTR_CAUSE(chan));
182 static int mv_can_chain(struct mv_xor_desc_slot *desc)
184 struct mv_xor_desc_slot *chain_old_tail = list_entry(
185 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
187 if (chain_old_tail->type != desc->type)
189 if (desc->type == DMA_MEMSET)
195 static void mv_set_mode(struct mv_xor_chan *chan,
196 enum dma_transaction_type type)
199 u32 config = __raw_readl(XOR_CONFIG(chan));
203 op_mode = XOR_OPERATION_MODE_XOR;
206 op_mode = XOR_OPERATION_MODE_MEMCPY;
209 op_mode = XOR_OPERATION_MODE_MEMSET;
212 dev_err(mv_chan_to_devp(chan),
213 "error: unsupported operation %d\n",
221 __raw_writel(config, XOR_CONFIG(chan));
222 chan->current_type = type;
225 static void mv_chan_activate(struct mv_xor_chan *chan)
229 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
230 activation = __raw_readl(XOR_ACTIVATION(chan));
232 __raw_writel(activation, XOR_ACTIVATION(chan));
235 static char mv_chan_is_busy(struct mv_xor_chan *chan)
237 u32 state = __raw_readl(XOR_ACTIVATION(chan));
239 state = (state >> 4) & 0x3;
241 return (state == 1) ? 1 : 0;
244 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
250 * mv_xor_free_slots - flags descriptor slots for reuse
251 * @slot: Slot to free
252 * Caller must hold &mv_chan->lock while calling this function
254 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
255 struct mv_xor_desc_slot *slot)
257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
258 __func__, __LINE__, slot);
260 slot->slots_per_op = 0;
265 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
267 * Caller must hold &mv_chan->lock while calling this function
269 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
270 struct mv_xor_desc_slot *sw_desc)
272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
273 __func__, __LINE__, sw_desc);
274 if (sw_desc->type != mv_chan->current_type)
275 mv_set_mode(mv_chan, sw_desc->type);
277 if (sw_desc->type == DMA_MEMSET) {
278 /* for memset requests we need to program the engine, no
281 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
282 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
283 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
284 mv_chan_set_value(mv_chan, sw_desc->value);
286 /* set the hardware chain */
287 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
289 mv_chan->pending += sw_desc->slot_cnt;
290 mv_xor_issue_pending(&mv_chan->dmachan);
294 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
295 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
297 BUG_ON(desc->async_tx.cookie < 0);
299 if (desc->async_tx.cookie > 0) {
300 cookie = desc->async_tx.cookie;
302 /* call the callback (must not sleep or submit new
303 * operations to this channel)
305 if (desc->async_tx.callback)
306 desc->async_tx.callback(
307 desc->async_tx.callback_param);
309 /* unmap dma addresses
310 * (unmap_single vs unmap_page?)
312 if (desc->group_head && desc->unmap_len) {
313 struct mv_xor_desc_slot *unmap = desc->group_head;
314 struct device *dev = mv_chan_to_devp(mv_chan);
315 u32 len = unmap->unmap_len;
316 enum dma_ctrl_flags flags = desc->async_tx.flags;
321 src_cnt = unmap->unmap_src_cnt;
322 dest = mv_desc_get_dest_addr(unmap);
323 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
324 enum dma_data_direction dir;
326 if (src_cnt > 1) /* is xor ? */
327 dir = DMA_BIDIRECTIONAL;
329 dir = DMA_FROM_DEVICE;
330 dma_unmap_page(dev, dest, len, dir);
333 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
335 addr = mv_desc_get_src_addr(unmap,
339 dma_unmap_page(dev, addr, len,
343 desc->group_head = NULL;
347 /* run dependent operations */
348 dma_run_dependencies(&desc->async_tx);
354 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
356 struct mv_xor_desc_slot *iter, *_iter;
358 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
359 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
362 if (async_tx_test_ack(&iter->async_tx)) {
363 list_del(&iter->completed_node);
364 mv_xor_free_slots(mv_chan, iter);
371 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
372 struct mv_xor_chan *mv_chan)
374 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
375 __func__, __LINE__, desc, desc->async_tx.flags);
376 list_del(&desc->chain_node);
377 /* the client is allowed to attach dependent operations
380 if (!async_tx_test_ack(&desc->async_tx)) {
381 /* move this slot to the completed_slots */
382 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
386 mv_xor_free_slots(mv_chan, desc);
390 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
392 struct mv_xor_desc_slot *iter, *_iter;
393 dma_cookie_t cookie = 0;
394 int busy = mv_chan_is_busy(mv_chan);
395 u32 current_desc = mv_chan_get_current_desc(mv_chan);
396 int current_cleaned = 0;
397 struct mv_xor_desc *hw_desc;
399 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
400 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
401 mv_xor_clean_completed_slots(mv_chan);
403 /* free completed slots from the chain starting with
404 * the oldest descriptor
407 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
410 /* clean finished descriptors */
411 hw_desc = iter->hw_desc;
412 if (hw_desc->status & XOR_DESC_SUCCESS) {
413 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
416 /* done processing desc, clean slot */
417 mv_xor_clean_slot(iter, mv_chan);
419 /* break if we did cleaned the current */
420 if (iter->async_tx.phys == current_desc) {
425 if (iter->async_tx.phys == current_desc) {
432 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
433 if (current_cleaned) {
435 * current descriptor cleaned and removed, run
438 iter = list_entry(mv_chan->chain.next,
439 struct mv_xor_desc_slot,
441 mv_xor_start_new_chain(mv_chan, iter);
443 if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
445 * descriptors are still waiting after
446 * current, trigger them
448 iter = list_entry(iter->chain_node.next,
449 struct mv_xor_desc_slot,
451 mv_xor_start_new_chain(mv_chan, iter);
454 * some descriptors are still waiting
457 tasklet_schedule(&mv_chan->irq_tasklet);
463 mv_chan->dmachan.completed_cookie = cookie;
467 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
469 spin_lock_bh(&mv_chan->lock);
470 __mv_xor_slot_cleanup(mv_chan);
471 spin_unlock_bh(&mv_chan->lock);
474 static void mv_xor_tasklet(unsigned long data)
476 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
477 mv_xor_slot_cleanup(chan);
480 static struct mv_xor_desc_slot *
481 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
484 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
486 int slots_found, retry = 0;
488 /* start search from the last allocated descrtiptor
489 * if a contiguous allocation can not be found start searching
490 * from the beginning of the list
495 iter = mv_chan->last_used;
497 iter = list_entry(&mv_chan->all_slots,
498 struct mv_xor_desc_slot,
501 list_for_each_entry_safe_continue(
502 iter, _iter, &mv_chan->all_slots, slot_node) {
504 prefetch(&_iter->async_tx);
505 if (iter->slots_per_op) {
506 /* give up after finding the first busy slot
507 * on the second pass through the list
516 /* start the allocation if the slot is correctly aligned */
520 if (slots_found == num_slots) {
521 struct mv_xor_desc_slot *alloc_tail = NULL;
522 struct mv_xor_desc_slot *last_used = NULL;
527 /* pre-ack all but the last descriptor */
528 async_tx_ack(&iter->async_tx);
530 list_add_tail(&iter->chain_node, &chain);
532 iter->async_tx.cookie = 0;
533 iter->slot_cnt = num_slots;
534 iter->xor_check_result = NULL;
535 for (i = 0; i < slots_per_op; i++) {
536 iter->slots_per_op = slots_per_op - i;
538 iter = list_entry(iter->slot_node.next,
539 struct mv_xor_desc_slot,
542 num_slots -= slots_per_op;
544 alloc_tail->group_head = alloc_start;
545 alloc_tail->async_tx.cookie = -EBUSY;
546 list_splice(&chain, &alloc_tail->tx_list);
547 mv_chan->last_used = last_used;
548 mv_desc_clear_next_desc(alloc_start);
549 mv_desc_clear_next_desc(alloc_tail);
556 /* try to free some slots if the allocation fails */
557 tasklet_schedule(&mv_chan->irq_tasklet);
562 /************************ DMA engine API functions ****************************/
564 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
566 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
567 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
568 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
570 int new_hw_chain = 1;
572 dev_dbg(mv_chan_to_devp(mv_chan),
573 "%s sw_desc %p: async_tx %p\n",
574 __func__, sw_desc, &sw_desc->async_tx);
576 grp_start = sw_desc->group_head;
578 spin_lock_bh(&mv_chan->lock);
579 cookie = dma_cookie_assign(tx);
581 if (list_empty(&mv_chan->chain))
582 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
586 old_chain_tail = list_entry(mv_chan->chain.prev,
587 struct mv_xor_desc_slot,
589 list_splice_init(&grp_start->tx_list,
590 &old_chain_tail->chain_node);
592 if (!mv_can_chain(grp_start))
595 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
596 old_chain_tail->async_tx.phys);
598 /* fix up the hardware chain */
599 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
601 /* if the channel is not busy */
602 if (!mv_chan_is_busy(mv_chan)) {
603 u32 current_desc = mv_chan_get_current_desc(mv_chan);
605 * and the curren desc is the end of the chain before
606 * the append, then we need to start the channel
608 if (current_desc == old_chain_tail->async_tx.phys)
614 mv_xor_start_new_chain(mv_chan, grp_start);
617 spin_unlock_bh(&mv_chan->lock);
622 /* returns the number of allocated descriptors */
623 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
627 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
628 struct mv_xor_desc_slot *slot = NULL;
629 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
631 /* Allocate descriptor slots */
632 idx = mv_chan->slots_allocated;
633 while (idx < num_descs_in_pool) {
634 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
636 printk(KERN_INFO "MV XOR Channel only initialized"
637 " %d descriptor slots", idx);
640 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
641 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
643 dma_async_tx_descriptor_init(&slot->async_tx, chan);
644 slot->async_tx.tx_submit = mv_xor_tx_submit;
645 INIT_LIST_HEAD(&slot->chain_node);
646 INIT_LIST_HEAD(&slot->slot_node);
647 INIT_LIST_HEAD(&slot->tx_list);
648 hw_desc = (char *) mv_chan->dma_desc_pool;
649 slot->async_tx.phys =
650 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
653 spin_lock_bh(&mv_chan->lock);
654 mv_chan->slots_allocated = idx;
655 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
656 spin_unlock_bh(&mv_chan->lock);
659 if (mv_chan->slots_allocated && !mv_chan->last_used)
660 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
661 struct mv_xor_desc_slot,
664 dev_dbg(mv_chan_to_devp(mv_chan),
665 "allocated %d descriptor slots last_used: %p\n",
666 mv_chan->slots_allocated, mv_chan->last_used);
668 return mv_chan->slots_allocated ? : -ENOMEM;
671 static struct dma_async_tx_descriptor *
672 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
673 size_t len, unsigned long flags)
675 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
676 struct mv_xor_desc_slot *sw_desc, *grp_start;
679 dev_dbg(mv_chan_to_devp(mv_chan),
680 "%s dest: %x src %x len: %u flags: %ld\n",
681 __func__, dest, src, len, flags);
682 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
685 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
687 spin_lock_bh(&mv_chan->lock);
688 slot_cnt = mv_chan_memcpy_slot_count(len);
689 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
691 sw_desc->type = DMA_MEMCPY;
692 sw_desc->async_tx.flags = flags;
693 grp_start = sw_desc->group_head;
694 mv_desc_init(grp_start, flags);
695 mv_desc_set_byte_count(grp_start, len);
696 mv_desc_set_dest_addr(sw_desc->group_head, dest);
697 mv_desc_set_src_addr(grp_start, 0, src);
698 sw_desc->unmap_src_cnt = 1;
699 sw_desc->unmap_len = len;
701 spin_unlock_bh(&mv_chan->lock);
703 dev_dbg(mv_chan_to_devp(mv_chan),
704 "%s sw_desc %p async_tx %p\n",
705 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
707 return sw_desc ? &sw_desc->async_tx : NULL;
710 static struct dma_async_tx_descriptor *
711 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
712 size_t len, unsigned long flags)
714 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
715 struct mv_xor_desc_slot *sw_desc, *grp_start;
718 dev_dbg(mv_chan_to_devp(mv_chan),
719 "%s dest: %x len: %u flags: %ld\n",
720 __func__, dest, len, flags);
721 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
724 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
726 spin_lock_bh(&mv_chan->lock);
727 slot_cnt = mv_chan_memset_slot_count(len);
728 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
730 sw_desc->type = DMA_MEMSET;
731 sw_desc->async_tx.flags = flags;
732 grp_start = sw_desc->group_head;
733 mv_desc_init(grp_start, flags);
734 mv_desc_set_byte_count(grp_start, len);
735 mv_desc_set_dest_addr(sw_desc->group_head, dest);
736 mv_desc_set_block_fill_val(grp_start, value);
737 sw_desc->unmap_src_cnt = 1;
738 sw_desc->unmap_len = len;
740 spin_unlock_bh(&mv_chan->lock);
741 dev_dbg(mv_chan_to_devp(mv_chan),
742 "%s sw_desc %p async_tx %p \n",
743 __func__, sw_desc, &sw_desc->async_tx);
744 return sw_desc ? &sw_desc->async_tx : NULL;
747 static struct dma_async_tx_descriptor *
748 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
749 unsigned int src_cnt, size_t len, unsigned long flags)
751 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
752 struct mv_xor_desc_slot *sw_desc, *grp_start;
755 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
758 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
760 dev_dbg(mv_chan_to_devp(mv_chan),
761 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
762 __func__, src_cnt, len, dest, flags);
764 spin_lock_bh(&mv_chan->lock);
765 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
766 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
768 sw_desc->type = DMA_XOR;
769 sw_desc->async_tx.flags = flags;
770 grp_start = sw_desc->group_head;
771 mv_desc_init(grp_start, flags);
772 /* the byte count field is the same as in memcpy desc*/
773 mv_desc_set_byte_count(grp_start, len);
774 mv_desc_set_dest_addr(sw_desc->group_head, dest);
775 sw_desc->unmap_src_cnt = src_cnt;
776 sw_desc->unmap_len = len;
778 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
780 spin_unlock_bh(&mv_chan->lock);
781 dev_dbg(mv_chan_to_devp(mv_chan),
782 "%s sw_desc %p async_tx %p \n",
783 __func__, sw_desc, &sw_desc->async_tx);
784 return sw_desc ? &sw_desc->async_tx : NULL;
787 static void mv_xor_free_chan_resources(struct dma_chan *chan)
789 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
790 struct mv_xor_desc_slot *iter, *_iter;
791 int in_use_descs = 0;
793 mv_xor_slot_cleanup(mv_chan);
795 spin_lock_bh(&mv_chan->lock);
796 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
799 list_del(&iter->chain_node);
801 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
804 list_del(&iter->completed_node);
806 list_for_each_entry_safe_reverse(
807 iter, _iter, &mv_chan->all_slots, slot_node) {
808 list_del(&iter->slot_node);
810 mv_chan->slots_allocated--;
812 mv_chan->last_used = NULL;
814 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
815 __func__, mv_chan->slots_allocated);
816 spin_unlock_bh(&mv_chan->lock);
819 dev_err(mv_chan_to_devp(mv_chan),
820 "freeing %d in use descriptors!\n", in_use_descs);
824 * mv_xor_status - poll the status of an XOR transaction
825 * @chan: XOR channel handle
826 * @cookie: XOR transaction identifier
827 * @txstate: XOR transactions state holder (or NULL)
829 static enum dma_status mv_xor_status(struct dma_chan *chan,
831 struct dma_tx_state *txstate)
833 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
836 ret = dma_cookie_status(chan, cookie, txstate);
837 if (ret == DMA_SUCCESS) {
838 mv_xor_clean_completed_slots(mv_chan);
841 mv_xor_slot_cleanup(mv_chan);
843 return dma_cookie_status(chan, cookie, txstate);
846 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
850 val = __raw_readl(XOR_CONFIG(chan));
851 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
853 val = __raw_readl(XOR_ACTIVATION(chan));
854 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
856 val = __raw_readl(XOR_INTR_CAUSE(chan));
857 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
859 val = __raw_readl(XOR_INTR_MASK(chan));
860 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
862 val = __raw_readl(XOR_ERROR_CAUSE(chan));
863 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
865 val = __raw_readl(XOR_ERROR_ADDR(chan));
866 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
869 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
872 if (intr_cause & (1 << 4)) {
873 dev_dbg(mv_chan_to_devp(chan),
874 "ignore this error\n");
878 dev_err(mv_chan_to_devp(chan),
879 "error on chan %d. intr cause 0x%08x\n",
880 chan->idx, intr_cause);
882 mv_dump_xor_regs(chan);
886 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
888 struct mv_xor_chan *chan = data;
889 u32 intr_cause = mv_chan_get_intr_cause(chan);
891 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
893 if (mv_is_err_intr(intr_cause))
894 mv_xor_err_interrupt_handler(chan, intr_cause);
896 tasklet_schedule(&chan->irq_tasklet);
898 mv_xor_device_clear_eoc_cause(chan);
903 static void mv_xor_issue_pending(struct dma_chan *chan)
905 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
907 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
908 mv_chan->pending = 0;
909 mv_chan_activate(mv_chan);
914 * Perform a transaction to verify the HW works.
916 #define MV_XOR_TEST_SIZE 2000
918 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
922 dma_addr_t src_dma, dest_dma;
923 struct dma_chan *dma_chan;
925 struct dma_async_tx_descriptor *tx;
928 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
932 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
938 /* Fill in src buffer */
939 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
940 ((u8 *) src)[i] = (u8)i;
942 dma_chan = &mv_chan->dmachan;
943 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
948 dest_dma = dma_map_single(dma_chan->device->dev, dest,
949 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
951 src_dma = dma_map_single(dma_chan->device->dev, src,
952 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
954 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
955 MV_XOR_TEST_SIZE, 0);
956 cookie = mv_xor_tx_submit(tx);
957 mv_xor_issue_pending(dma_chan);
961 if (mv_xor_status(dma_chan, cookie, NULL) !=
963 dev_err(dma_chan->device->dev,
964 "Self-test copy timed out, disabling\n");
969 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
970 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
971 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
972 dev_err(dma_chan->device->dev,
973 "Self-test copy failed compare, disabling\n");
979 mv_xor_free_chan_resources(dma_chan);
986 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
988 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
992 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
993 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
995 struct dma_async_tx_descriptor *tx;
996 struct dma_chan *dma_chan;
1002 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1003 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1004 if (!xor_srcs[src_idx]) {
1006 __free_page(xor_srcs[src_idx]);
1011 dest = alloc_page(GFP_KERNEL);
1014 __free_page(xor_srcs[src_idx]);
1018 /* Fill in src buffers */
1019 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1020 u8 *ptr = page_address(xor_srcs[src_idx]);
1021 for (i = 0; i < PAGE_SIZE; i++)
1022 ptr[i] = (1 << src_idx);
1025 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1026 cmp_byte ^= (u8) (1 << src_idx);
1028 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1029 (cmp_byte << 8) | cmp_byte;
1031 memset(page_address(dest), 0, PAGE_SIZE);
1033 dma_chan = &mv_chan->dmachan;
1034 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1040 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1043 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1044 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1045 0, PAGE_SIZE, DMA_TO_DEVICE);
1047 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1048 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1050 cookie = mv_xor_tx_submit(tx);
1051 mv_xor_issue_pending(dma_chan);
1055 if (mv_xor_status(dma_chan, cookie, NULL) !=
1057 dev_err(dma_chan->device->dev,
1058 "Self-test xor timed out, disabling\n");
1060 goto free_resources;
1063 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1064 PAGE_SIZE, DMA_FROM_DEVICE);
1065 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1066 u32 *ptr = page_address(dest);
1067 if (ptr[i] != cmp_word) {
1068 dev_err(dma_chan->device->dev,
1069 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
1070 i, ptr[i], cmp_word);
1072 goto free_resources;
1077 mv_xor_free_chan_resources(dma_chan);
1079 src_idx = MV_XOR_NUM_SRC_TEST;
1081 __free_page(xor_srcs[src_idx]);
1086 /* This driver does not implement any of the optional DMA operations. */
1088 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1094 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1096 struct dma_chan *chan, *_chan;
1097 struct device *dev = mv_chan->dmadev.dev;
1099 dma_async_device_unregister(&mv_chan->dmadev);
1101 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1102 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1104 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1106 list_del(&chan->device_node);
1109 free_irq(mv_chan->irq, mv_chan);
1114 static struct mv_xor_chan *
1115 mv_xor_channel_add(struct mv_xor_device *xordev,
1116 struct platform_device *pdev,
1117 int idx, dma_cap_mask_t cap_mask, int irq)
1120 struct mv_xor_chan *mv_chan;
1121 struct dma_device *dma_dev;
1123 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1132 dma_dev = &mv_chan->dmadev;
1134 /* allocate coherent memory for hardware descriptors
1135 * note: writecombine gives slightly better performance, but
1136 * requires that we explicitly flush the writes
1138 mv_chan->dma_desc_pool_virt =
1139 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1140 &mv_chan->dma_desc_pool, GFP_KERNEL);
1141 if (!mv_chan->dma_desc_pool_virt)
1142 return ERR_PTR(-ENOMEM);
1144 /* discover transaction capabilites from the platform data */
1145 dma_dev->cap_mask = cap_mask;
1147 INIT_LIST_HEAD(&dma_dev->channels);
1149 /* set base routines */
1150 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1151 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1152 dma_dev->device_tx_status = mv_xor_status;
1153 dma_dev->device_issue_pending = mv_xor_issue_pending;
1154 dma_dev->device_control = mv_xor_control;
1155 dma_dev->dev = &pdev->dev;
1157 /* set prep routines based on capability */
1158 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1159 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1160 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1161 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1162 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1163 dma_dev->max_xor = 8;
1164 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1167 mv_chan->mmr_base = xordev->xor_base;
1168 if (!mv_chan->mmr_base) {
1172 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1175 /* clear errors before enabling interrupts */
1176 mv_xor_device_clear_err_status(mv_chan);
1178 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1179 0, dev_name(&pdev->dev), mv_chan);
1183 mv_chan_unmask_interrupts(mv_chan);
1185 mv_set_mode(mv_chan, DMA_MEMCPY);
1187 spin_lock_init(&mv_chan->lock);
1188 INIT_LIST_HEAD(&mv_chan->chain);
1189 INIT_LIST_HEAD(&mv_chan->completed_slots);
1190 INIT_LIST_HEAD(&mv_chan->all_slots);
1191 mv_chan->dmachan.device = dma_dev;
1192 dma_cookie_init(&mv_chan->dmachan);
1194 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1196 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1197 ret = mv_xor_memcpy_self_test(mv_chan);
1198 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1203 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1204 ret = mv_xor_xor_self_test(mv_chan);
1205 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1210 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n",
1211 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1212 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1213 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1214 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1216 dma_async_device_register(dma_dev);
1220 free_irq(mv_chan->irq, mv_chan);
1222 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1223 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1224 return ERR_PTR(ret);
1228 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1229 const struct mbus_dram_target_info *dram)
1231 void __iomem *base = xordev->xor_base;
1235 for (i = 0; i < 8; i++) {
1236 writel(0, base + WINDOW_BASE(i));
1237 writel(0, base + WINDOW_SIZE(i));
1239 writel(0, base + WINDOW_REMAP_HIGH(i));
1242 for (i = 0; i < dram->num_cs; i++) {
1243 const struct mbus_dram_window *cs = dram->cs + i;
1245 writel((cs->base & 0xffff0000) |
1246 (cs->mbus_attr << 8) |
1247 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1248 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1250 win_enable |= (1 << i);
1251 win_enable |= 3 << (16 + (2 * i));
1254 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1255 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1256 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1257 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1260 static int mv_xor_probe(struct platform_device *pdev)
1262 const struct mbus_dram_target_info *dram;
1263 struct mv_xor_device *xordev;
1264 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
1265 struct resource *res;
1268 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1270 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1274 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1278 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1279 resource_size(res));
1280 if (!xordev->xor_base)
1283 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1287 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1288 resource_size(res));
1289 if (!xordev->xor_high_base)
1292 platform_set_drvdata(pdev, xordev);
1295 * (Re-)program MBUS remapping windows if we are asked to.
1297 dram = mv_mbus_dram_info();
1299 mv_xor_conf_mbus_windows(xordev, dram);
1301 /* Not all platforms can gate the clock, so it is not
1302 * an error if the clock does not exists.
1304 xordev->clk = clk_get(&pdev->dev, NULL);
1305 if (!IS_ERR(xordev->clk))
1306 clk_prepare_enable(xordev->clk);
1308 if (pdev->dev.of_node) {
1309 struct device_node *np;
1312 for_each_child_of_node(pdev->dev.of_node, np) {
1313 dma_cap_mask_t cap_mask;
1316 dma_cap_zero(cap_mask);
1317 if (of_property_read_bool(np, "dmacap,memcpy"))
1318 dma_cap_set(DMA_MEMCPY, cap_mask);
1319 if (of_property_read_bool(np, "dmacap,xor"))
1320 dma_cap_set(DMA_XOR, cap_mask);
1321 if (of_property_read_bool(np, "dmacap,memset"))
1322 dma_cap_set(DMA_MEMSET, cap_mask);
1323 if (of_property_read_bool(np, "dmacap,interrupt"))
1324 dma_cap_set(DMA_INTERRUPT, cap_mask);
1326 irq = irq_of_parse_and_map(np, 0);
1329 goto err_channel_add;
1332 xordev->channels[i] =
1333 mv_xor_channel_add(xordev, pdev, i,
1335 if (IS_ERR(xordev->channels[i])) {
1336 ret = PTR_ERR(xordev->channels[i]);
1337 xordev->channels[i] = NULL;
1338 irq_dispose_mapping(irq);
1339 goto err_channel_add;
1344 } else if (pdata && pdata->channels) {
1345 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1346 struct mv_xor_channel_data *cd;
1349 cd = &pdata->channels[i];
1352 goto err_channel_add;
1355 irq = platform_get_irq(pdev, i);
1358 goto err_channel_add;
1361 xordev->channels[i] =
1362 mv_xor_channel_add(xordev, pdev, i,
1364 if (IS_ERR(xordev->channels[i])) {
1365 ret = PTR_ERR(xordev->channels[i]);
1366 goto err_channel_add;
1374 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1375 if (xordev->channels[i]) {
1376 mv_xor_channel_remove(xordev->channels[i]);
1377 if (pdev->dev.of_node)
1378 irq_dispose_mapping(xordev->channels[i]->irq);
1381 if (!IS_ERR(xordev->clk)) {
1382 clk_disable_unprepare(xordev->clk);
1383 clk_put(xordev->clk);
1389 static int mv_xor_remove(struct platform_device *pdev)
1391 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1394 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1395 if (xordev->channels[i])
1396 mv_xor_channel_remove(xordev->channels[i]);
1399 if (!IS_ERR(xordev->clk)) {
1400 clk_disable_unprepare(xordev->clk);
1401 clk_put(xordev->clk);
1408 static struct of_device_id mv_xor_dt_ids[] = {
1409 { .compatible = "marvell,orion-xor", },
1412 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1415 static struct platform_driver mv_xor_driver = {
1416 .probe = mv_xor_probe,
1417 .remove = mv_xor_remove,
1419 .owner = THIS_MODULE,
1420 .name = MV_XOR_NAME,
1421 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1426 static int __init mv_xor_init(void)
1428 return platform_driver_register(&mv_xor_driver);
1430 module_init(mv_xor_init);
1432 /* it's currently unsafe to unload this module */
1434 static void __exit mv_xor_exit(void)
1436 platform_driver_unregister(&mv_xor_driver);
1440 module_exit(mv_xor_exit);
1443 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1444 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1445 MODULE_LICENSE("GPL");