dmaengine: mv_xor: add support for a38x command in descriptor mode
[firefly-linux-kernel-4.4.55.git] / drivers / dma / mv_xor.c
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/of_device.h>
23 #include <linux/platform_device.h>
24 #include <linux/memory.h>
25 #include <linux/clk.h>
26 #include <linux/of.h>
27 #include <linux/of_irq.h>
28 #include <linux/irqdomain.h>
29 #include <linux/platform_data/dma-mv_xor.h>
30
31 #include "dmaengine.h"
32 #include "mv_xor.h"
33
34 enum mv_xor_mode {
35         XOR_MODE_IN_REG,
36         XOR_MODE_IN_DESC,
37 };
38
39 static void mv_xor_issue_pending(struct dma_chan *chan);
40
41 #define to_mv_xor_chan(chan)            \
42         container_of(chan, struct mv_xor_chan, dmachan)
43
44 #define to_mv_xor_slot(tx)              \
45         container_of(tx, struct mv_xor_desc_slot, async_tx)
46
47 #define mv_chan_to_devp(chan)           \
48         ((chan)->dmadev.dev)
49
50 static void mv_desc_init(struct mv_xor_desc_slot *desc,
51                          dma_addr_t addr, u32 byte_count,
52                          enum dma_ctrl_flags flags)
53 {
54         struct mv_xor_desc *hw_desc = desc->hw_desc;
55
56         hw_desc->status = XOR_DESC_DMA_OWNED;
57         hw_desc->phy_next_desc = 0;
58         /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
59         hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
60                                 XOR_DESC_EOD_INT_EN : 0;
61         hw_desc->phy_dest_addr = addr;
62         hw_desc->byte_count = byte_count;
63 }
64
65 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
66 {
67         struct mv_xor_desc *hw_desc = desc->hw_desc;
68
69         switch (desc->type) {
70         case DMA_XOR:
71         case DMA_INTERRUPT:
72                 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
73                 break;
74         case DMA_MEMCPY:
75                 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
76                 break;
77         default:
78                 BUG();
79                 return;
80         }
81 }
82
83 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
84                                   u32 next_desc_addr)
85 {
86         struct mv_xor_desc *hw_desc = desc->hw_desc;
87         BUG_ON(hw_desc->phy_next_desc);
88         hw_desc->phy_next_desc = next_desc_addr;
89 }
90
91 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
92 {
93         struct mv_xor_desc *hw_desc = desc->hw_desc;
94         hw_desc->phy_next_desc = 0;
95 }
96
97 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
98                                  int index, dma_addr_t addr)
99 {
100         struct mv_xor_desc *hw_desc = desc->hw_desc;
101         hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
102         if (desc->type == DMA_XOR)
103                 hw_desc->desc_command |= (1 << index);
104 }
105
106 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
107 {
108         return readl_relaxed(XOR_CURR_DESC(chan));
109 }
110
111 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
112                                         u32 next_desc_addr)
113 {
114         writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
115 }
116
117 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
118 {
119         u32 val = readl_relaxed(XOR_INTR_MASK(chan));
120         val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
121         writel_relaxed(val, XOR_INTR_MASK(chan));
122 }
123
124 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
125 {
126         u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
127         intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
128         return intr_cause;
129 }
130
131 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
132 {
133         u32 val;
134
135         val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
136         val = ~(val << (chan->idx * 16));
137         dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
138         writel_relaxed(val, XOR_INTR_CAUSE(chan));
139 }
140
141 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
142 {
143         u32 val = 0xFFFF0000 >> (chan->idx * 16);
144         writel_relaxed(val, XOR_INTR_CAUSE(chan));
145 }
146
147 static void mv_chan_set_mode(struct mv_xor_chan *chan,
148                              enum dma_transaction_type type)
149 {
150         u32 op_mode;
151         u32 config = readl_relaxed(XOR_CONFIG(chan));
152
153         switch (type) {
154         case DMA_XOR:
155                 op_mode = XOR_OPERATION_MODE_XOR;
156                 break;
157         case DMA_MEMCPY:
158                 op_mode = XOR_OPERATION_MODE_MEMCPY;
159                 break;
160         default:
161                 dev_err(mv_chan_to_devp(chan),
162                         "error: unsupported operation %d\n",
163                         type);
164                 BUG();
165                 return;
166         }
167
168         config &= ~0x7;
169         config |= op_mode;
170
171         if (IS_ENABLED(__BIG_ENDIAN))
172                 config |= XOR_DESCRIPTOR_SWAP;
173         else
174                 config &= ~XOR_DESCRIPTOR_SWAP;
175
176         writel_relaxed(config, XOR_CONFIG(chan));
177         chan->current_type = type;
178 }
179
180 static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan)
181 {
182         u32 op_mode;
183         u32 config = readl_relaxed(XOR_CONFIG(chan));
184
185         op_mode = XOR_OPERATION_MODE_IN_DESC;
186
187         config &= ~0x7;
188         config |= op_mode;
189
190 #if defined(__BIG_ENDIAN)
191         config |= XOR_DESCRIPTOR_SWAP;
192 #else
193         config &= ~XOR_DESCRIPTOR_SWAP;
194 #endif
195
196         writel_relaxed(config, XOR_CONFIG(chan));
197 }
198
199 static void mv_chan_activate(struct mv_xor_chan *chan)
200 {
201         dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
202
203         /* writel ensures all descriptors are flushed before activation */
204         writel(BIT(0), XOR_ACTIVATION(chan));
205 }
206
207 static char mv_chan_is_busy(struct mv_xor_chan *chan)
208 {
209         u32 state = readl_relaxed(XOR_ACTIVATION(chan));
210
211         state = (state >> 4) & 0x3;
212
213         return (state == 1) ? 1 : 0;
214 }
215
216 /**
217  * mv_chan_free_slots - flags descriptor slots for reuse
218  * @slot: Slot to free
219  * Caller must hold &mv_chan->lock while calling this function
220  */
221 static void mv_chan_free_slots(struct mv_xor_chan *mv_chan,
222                                struct mv_xor_desc_slot *slot)
223 {
224         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
225                 __func__, __LINE__, slot);
226
227         slot->slot_used = 0;
228
229 }
230
231 /*
232  * mv_chan_start_new_chain - program the engine to operate on new
233  * chain headed by sw_desc
234  * Caller must hold &mv_chan->lock while calling this function
235  */
236 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
237                                     struct mv_xor_desc_slot *sw_desc)
238 {
239         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
240                 __func__, __LINE__, sw_desc);
241
242         /* set the hardware chain */
243         mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
244
245         mv_chan->pending++;
246         mv_xor_issue_pending(&mv_chan->dmachan);
247 }
248
249 static dma_cookie_t
250 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
251                                 struct mv_xor_chan *mv_chan,
252                                 dma_cookie_t cookie)
253 {
254         BUG_ON(desc->async_tx.cookie < 0);
255
256         if (desc->async_tx.cookie > 0) {
257                 cookie = desc->async_tx.cookie;
258
259                 /* call the callback (must not sleep or submit new
260                  * operations to this channel)
261                  */
262                 if (desc->async_tx.callback)
263                         desc->async_tx.callback(
264                                 desc->async_tx.callback_param);
265
266                 dma_descriptor_unmap(&desc->async_tx);
267         }
268
269         /* run dependent operations */
270         dma_run_dependencies(&desc->async_tx);
271
272         return cookie;
273 }
274
275 static int
276 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
277 {
278         struct mv_xor_desc_slot *iter, *_iter;
279
280         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
281         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
282                                  completed_node) {
283
284                 if (async_tx_test_ack(&iter->async_tx)) {
285                         list_del(&iter->completed_node);
286                         mv_chan_free_slots(mv_chan, iter);
287                 }
288         }
289         return 0;
290 }
291
292 static int
293 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
294                    struct mv_xor_chan *mv_chan)
295 {
296         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
297                 __func__, __LINE__, desc, desc->async_tx.flags);
298         list_del(&desc->chain_node);
299         /* the client is allowed to attach dependent operations
300          * until 'ack' is set
301          */
302         if (!async_tx_test_ack(&desc->async_tx)) {
303                 /* move this slot to the completed_slots */
304                 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
305                 return 0;
306         }
307
308         mv_chan_free_slots(mv_chan, desc);
309         return 0;
310 }
311
312 /* This function must be called with the mv_xor_chan spinlock held */
313 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
314 {
315         struct mv_xor_desc_slot *iter, *_iter;
316         dma_cookie_t cookie = 0;
317         int busy = mv_chan_is_busy(mv_chan);
318         u32 current_desc = mv_chan_get_current_desc(mv_chan);
319         int current_cleaned = 0;
320         struct mv_xor_desc *hw_desc;
321
322         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
323         dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
324         mv_chan_clean_completed_slots(mv_chan);
325
326         /* free completed slots from the chain starting with
327          * the oldest descriptor
328          */
329
330         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
331                                         chain_node) {
332
333                 /* clean finished descriptors */
334                 hw_desc = iter->hw_desc;
335                 if (hw_desc->status & XOR_DESC_SUCCESS) {
336                         cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
337                                                                  cookie);
338
339                         /* done processing desc, clean slot */
340                         mv_desc_clean_slot(iter, mv_chan);
341
342                         /* break if we did cleaned the current */
343                         if (iter->async_tx.phys == current_desc) {
344                                 current_cleaned = 1;
345                                 break;
346                         }
347                 } else {
348                         if (iter->async_tx.phys == current_desc) {
349                                 current_cleaned = 0;
350                                 break;
351                         }
352                 }
353         }
354
355         if ((busy == 0) && !list_empty(&mv_chan->chain)) {
356                 if (current_cleaned) {
357                         /*
358                          * current descriptor cleaned and removed, run
359                          * from list head
360                          */
361                         iter = list_entry(mv_chan->chain.next,
362                                           struct mv_xor_desc_slot,
363                                           chain_node);
364                         mv_chan_start_new_chain(mv_chan, iter);
365                 } else {
366                         if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
367                                 /*
368                                  * descriptors are still waiting after
369                                  * current, trigger them
370                                  */
371                                 iter = list_entry(iter->chain_node.next,
372                                                   struct mv_xor_desc_slot,
373                                                   chain_node);
374                                 mv_chan_start_new_chain(mv_chan, iter);
375                         } else {
376                                 /*
377                                  * some descriptors are still waiting
378                                  * to be cleaned
379                                  */
380                                 tasklet_schedule(&mv_chan->irq_tasklet);
381                         }
382                 }
383         }
384
385         if (cookie > 0)
386                 mv_chan->dmachan.completed_cookie = cookie;
387 }
388
389 static void mv_xor_tasklet(unsigned long data)
390 {
391         struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
392
393         spin_lock_bh(&chan->lock);
394         mv_chan_slot_cleanup(chan);
395         spin_unlock_bh(&chan->lock);
396 }
397
398 static struct mv_xor_desc_slot *
399 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
400 {
401         struct mv_xor_desc_slot *iter, *_iter;
402         int retry = 0;
403
404         /* start search from the last allocated descrtiptor
405          * if a contiguous allocation can not be found start searching
406          * from the beginning of the list
407          */
408 retry:
409         if (retry == 0)
410                 iter = mv_chan->last_used;
411         else
412                 iter = list_entry(&mv_chan->all_slots,
413                         struct mv_xor_desc_slot,
414                         slot_node);
415
416         list_for_each_entry_safe_continue(
417                 iter, _iter, &mv_chan->all_slots, slot_node) {
418
419                 prefetch(_iter);
420                 prefetch(&_iter->async_tx);
421                 if (iter->slot_used) {
422                         /* give up after finding the first busy slot
423                          * on the second pass through the list
424                          */
425                         if (retry)
426                                 break;
427                         continue;
428                 }
429
430                 /* pre-ack descriptor */
431                 async_tx_ack(&iter->async_tx);
432
433                 iter->slot_used = 1;
434                 INIT_LIST_HEAD(&iter->chain_node);
435                 iter->async_tx.cookie = -EBUSY;
436                 mv_chan->last_used = iter;
437                 mv_desc_clear_next_desc(iter);
438
439                 return iter;
440
441         }
442         if (!retry++)
443                 goto retry;
444
445         /* try to free some slots if the allocation fails */
446         tasklet_schedule(&mv_chan->irq_tasklet);
447
448         return NULL;
449 }
450
451 /************************ DMA engine API functions ****************************/
452 static dma_cookie_t
453 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
454 {
455         struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
456         struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
457         struct mv_xor_desc_slot *old_chain_tail;
458         dma_cookie_t cookie;
459         int new_hw_chain = 1;
460
461         dev_dbg(mv_chan_to_devp(mv_chan),
462                 "%s sw_desc %p: async_tx %p\n",
463                 __func__, sw_desc, &sw_desc->async_tx);
464
465         spin_lock_bh(&mv_chan->lock);
466         cookie = dma_cookie_assign(tx);
467
468         if (list_empty(&mv_chan->chain))
469                 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
470         else {
471                 new_hw_chain = 0;
472
473                 old_chain_tail = list_entry(mv_chan->chain.prev,
474                                             struct mv_xor_desc_slot,
475                                             chain_node);
476                 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
477
478                 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
479                         &old_chain_tail->async_tx.phys);
480
481                 /* fix up the hardware chain */
482                 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
483
484                 /* if the channel is not busy */
485                 if (!mv_chan_is_busy(mv_chan)) {
486                         u32 current_desc = mv_chan_get_current_desc(mv_chan);
487                         /*
488                          * and the curren desc is the end of the chain before
489                          * the append, then we need to start the channel
490                          */
491                         if (current_desc == old_chain_tail->async_tx.phys)
492                                 new_hw_chain = 1;
493                 }
494         }
495
496         if (new_hw_chain)
497                 mv_chan_start_new_chain(mv_chan, sw_desc);
498
499         spin_unlock_bh(&mv_chan->lock);
500
501         return cookie;
502 }
503
504 /* returns the number of allocated descriptors */
505 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
506 {
507         void *virt_desc;
508         dma_addr_t dma_desc;
509         int idx;
510         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
511         struct mv_xor_desc_slot *slot = NULL;
512         int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
513
514         /* Allocate descriptor slots */
515         idx = mv_chan->slots_allocated;
516         while (idx < num_descs_in_pool) {
517                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
518                 if (!slot) {
519                         dev_info(mv_chan_to_devp(mv_chan),
520                                  "channel only initialized %d descriptor slots",
521                                  idx);
522                         break;
523                 }
524                 virt_desc = mv_chan->dma_desc_pool_virt;
525                 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
526
527                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
528                 slot->async_tx.tx_submit = mv_xor_tx_submit;
529                 INIT_LIST_HEAD(&slot->chain_node);
530                 INIT_LIST_HEAD(&slot->slot_node);
531                 dma_desc = mv_chan->dma_desc_pool;
532                 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
533                 slot->idx = idx++;
534
535                 spin_lock_bh(&mv_chan->lock);
536                 mv_chan->slots_allocated = idx;
537                 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
538                 spin_unlock_bh(&mv_chan->lock);
539         }
540
541         if (mv_chan->slots_allocated && !mv_chan->last_used)
542                 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
543                                         struct mv_xor_desc_slot,
544                                         slot_node);
545
546         dev_dbg(mv_chan_to_devp(mv_chan),
547                 "allocated %d descriptor slots last_used: %p\n",
548                 mv_chan->slots_allocated, mv_chan->last_used);
549
550         return mv_chan->slots_allocated ? : -ENOMEM;
551 }
552
553 static struct dma_async_tx_descriptor *
554 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
555                     unsigned int src_cnt, size_t len, unsigned long flags)
556 {
557         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
558         struct mv_xor_desc_slot *sw_desc;
559
560         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
561                 return NULL;
562
563         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
564
565         dev_dbg(mv_chan_to_devp(mv_chan),
566                 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
567                 __func__, src_cnt, len, &dest, flags);
568
569         spin_lock_bh(&mv_chan->lock);
570         sw_desc = mv_chan_alloc_slot(mv_chan);
571         if (sw_desc) {
572                 sw_desc->type = DMA_XOR;
573                 sw_desc->async_tx.flags = flags;
574                 mv_desc_init(sw_desc, dest, len, flags);
575                 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
576                         mv_desc_set_mode(sw_desc);
577                 while (src_cnt--)
578                         mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
579         }
580         spin_unlock_bh(&mv_chan->lock);
581         dev_dbg(mv_chan_to_devp(mv_chan),
582                 "%s sw_desc %p async_tx %p \n",
583                 __func__, sw_desc, &sw_desc->async_tx);
584         return sw_desc ? &sw_desc->async_tx : NULL;
585 }
586
587 static struct dma_async_tx_descriptor *
588 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
589                 size_t len, unsigned long flags)
590 {
591         /*
592          * A MEMCPY operation is identical to an XOR operation with only
593          * a single source address.
594          */
595         return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
596 }
597
598 static struct dma_async_tx_descriptor *
599 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
600 {
601         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
602         dma_addr_t src, dest;
603         size_t len;
604
605         src = mv_chan->dummy_src_addr;
606         dest = mv_chan->dummy_dst_addr;
607         len = MV_XOR_MIN_BYTE_COUNT;
608
609         /*
610          * We implement the DMA_INTERRUPT operation as a minimum sized
611          * XOR operation with a single dummy source address.
612          */
613         return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
614 }
615
616 static void mv_xor_free_chan_resources(struct dma_chan *chan)
617 {
618         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
619         struct mv_xor_desc_slot *iter, *_iter;
620         int in_use_descs = 0;
621
622         spin_lock_bh(&mv_chan->lock);
623
624         mv_chan_slot_cleanup(mv_chan);
625
626         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
627                                         chain_node) {
628                 in_use_descs++;
629                 list_del(&iter->chain_node);
630         }
631         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
632                                  completed_node) {
633                 in_use_descs++;
634                 list_del(&iter->completed_node);
635         }
636         list_for_each_entry_safe_reverse(
637                 iter, _iter, &mv_chan->all_slots, slot_node) {
638                 list_del(&iter->slot_node);
639                 kfree(iter);
640                 mv_chan->slots_allocated--;
641         }
642         mv_chan->last_used = NULL;
643
644         dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
645                 __func__, mv_chan->slots_allocated);
646         spin_unlock_bh(&mv_chan->lock);
647
648         if (in_use_descs)
649                 dev_err(mv_chan_to_devp(mv_chan),
650                         "freeing %d in use descriptors!\n", in_use_descs);
651 }
652
653 /**
654  * mv_xor_status - poll the status of an XOR transaction
655  * @chan: XOR channel handle
656  * @cookie: XOR transaction identifier
657  * @txstate: XOR transactions state holder (or NULL)
658  */
659 static enum dma_status mv_xor_status(struct dma_chan *chan,
660                                           dma_cookie_t cookie,
661                                           struct dma_tx_state *txstate)
662 {
663         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
664         enum dma_status ret;
665
666         ret = dma_cookie_status(chan, cookie, txstate);
667         if (ret == DMA_COMPLETE)
668                 return ret;
669
670         spin_lock_bh(&mv_chan->lock);
671         mv_chan_slot_cleanup(mv_chan);
672         spin_unlock_bh(&mv_chan->lock);
673
674         return dma_cookie_status(chan, cookie, txstate);
675 }
676
677 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
678 {
679         u32 val;
680
681         val = readl_relaxed(XOR_CONFIG(chan));
682         dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
683
684         val = readl_relaxed(XOR_ACTIVATION(chan));
685         dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
686
687         val = readl_relaxed(XOR_INTR_CAUSE(chan));
688         dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
689
690         val = readl_relaxed(XOR_INTR_MASK(chan));
691         dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
692
693         val = readl_relaxed(XOR_ERROR_CAUSE(chan));
694         dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
695
696         val = readl_relaxed(XOR_ERROR_ADDR(chan));
697         dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
698 }
699
700 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
701                                           u32 intr_cause)
702 {
703         if (intr_cause & XOR_INT_ERR_DECODE) {
704                 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
705                 return;
706         }
707
708         dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
709                 chan->idx, intr_cause);
710
711         mv_chan_dump_regs(chan);
712         WARN_ON(1);
713 }
714
715 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
716 {
717         struct mv_xor_chan *chan = data;
718         u32 intr_cause = mv_chan_get_intr_cause(chan);
719
720         dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
721
722         if (intr_cause & XOR_INTR_ERRORS)
723                 mv_chan_err_interrupt_handler(chan, intr_cause);
724
725         tasklet_schedule(&chan->irq_tasklet);
726
727         mv_chan_clear_eoc_cause(chan);
728
729         return IRQ_HANDLED;
730 }
731
732 static void mv_xor_issue_pending(struct dma_chan *chan)
733 {
734         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
735
736         if (mv_chan->pending >= MV_XOR_THRESHOLD) {
737                 mv_chan->pending = 0;
738                 mv_chan_activate(mv_chan);
739         }
740 }
741
742 /*
743  * Perform a transaction to verify the HW works.
744  */
745
746 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
747 {
748         int i, ret;
749         void *src, *dest;
750         dma_addr_t src_dma, dest_dma;
751         struct dma_chan *dma_chan;
752         dma_cookie_t cookie;
753         struct dma_async_tx_descriptor *tx;
754         struct dmaengine_unmap_data *unmap;
755         int err = 0;
756
757         src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
758         if (!src)
759                 return -ENOMEM;
760
761         dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
762         if (!dest) {
763                 kfree(src);
764                 return -ENOMEM;
765         }
766
767         /* Fill in src buffer */
768         for (i = 0; i < PAGE_SIZE; i++)
769                 ((u8 *) src)[i] = (u8)i;
770
771         dma_chan = &mv_chan->dmachan;
772         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
773                 err = -ENODEV;
774                 goto out;
775         }
776
777         unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
778         if (!unmap) {
779                 err = -ENOMEM;
780                 goto free_resources;
781         }
782
783         src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
784                                  PAGE_SIZE, DMA_TO_DEVICE);
785         unmap->addr[0] = src_dma;
786
787         ret = dma_mapping_error(dma_chan->device->dev, src_dma);
788         if (ret) {
789                 err = -ENOMEM;
790                 goto free_resources;
791         }
792         unmap->to_cnt = 1;
793
794         dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
795                                   PAGE_SIZE, DMA_FROM_DEVICE);
796         unmap->addr[1] = dest_dma;
797
798         ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
799         if (ret) {
800                 err = -ENOMEM;
801                 goto free_resources;
802         }
803         unmap->from_cnt = 1;
804         unmap->len = PAGE_SIZE;
805
806         tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
807                                     PAGE_SIZE, 0);
808         if (!tx) {
809                 dev_err(dma_chan->device->dev,
810                         "Self-test cannot prepare operation, disabling\n");
811                 err = -ENODEV;
812                 goto free_resources;
813         }
814
815         cookie = mv_xor_tx_submit(tx);
816         if (dma_submit_error(cookie)) {
817                 dev_err(dma_chan->device->dev,
818                         "Self-test submit error, disabling\n");
819                 err = -ENODEV;
820                 goto free_resources;
821         }
822
823         mv_xor_issue_pending(dma_chan);
824         async_tx_ack(tx);
825         msleep(1);
826
827         if (mv_xor_status(dma_chan, cookie, NULL) !=
828             DMA_COMPLETE) {
829                 dev_err(dma_chan->device->dev,
830                         "Self-test copy timed out, disabling\n");
831                 err = -ENODEV;
832                 goto free_resources;
833         }
834
835         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
836                                 PAGE_SIZE, DMA_FROM_DEVICE);
837         if (memcmp(src, dest, PAGE_SIZE)) {
838                 dev_err(dma_chan->device->dev,
839                         "Self-test copy failed compare, disabling\n");
840                 err = -ENODEV;
841                 goto free_resources;
842         }
843
844 free_resources:
845         dmaengine_unmap_put(unmap);
846         mv_xor_free_chan_resources(dma_chan);
847 out:
848         kfree(src);
849         kfree(dest);
850         return err;
851 }
852
853 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
854 static int
855 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
856 {
857         int i, src_idx, ret;
858         struct page *dest;
859         struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
860         dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
861         dma_addr_t dest_dma;
862         struct dma_async_tx_descriptor *tx;
863         struct dmaengine_unmap_data *unmap;
864         struct dma_chan *dma_chan;
865         dma_cookie_t cookie;
866         u8 cmp_byte = 0;
867         u32 cmp_word;
868         int err = 0;
869         int src_count = MV_XOR_NUM_SRC_TEST;
870
871         for (src_idx = 0; src_idx < src_count; src_idx++) {
872                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
873                 if (!xor_srcs[src_idx]) {
874                         while (src_idx--)
875                                 __free_page(xor_srcs[src_idx]);
876                         return -ENOMEM;
877                 }
878         }
879
880         dest = alloc_page(GFP_KERNEL);
881         if (!dest) {
882                 while (src_idx--)
883                         __free_page(xor_srcs[src_idx]);
884                 return -ENOMEM;
885         }
886
887         /* Fill in src buffers */
888         for (src_idx = 0; src_idx < src_count; src_idx++) {
889                 u8 *ptr = page_address(xor_srcs[src_idx]);
890                 for (i = 0; i < PAGE_SIZE; i++)
891                         ptr[i] = (1 << src_idx);
892         }
893
894         for (src_idx = 0; src_idx < src_count; src_idx++)
895                 cmp_byte ^= (u8) (1 << src_idx);
896
897         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
898                 (cmp_byte << 8) | cmp_byte;
899
900         memset(page_address(dest), 0, PAGE_SIZE);
901
902         dma_chan = &mv_chan->dmachan;
903         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
904                 err = -ENODEV;
905                 goto out;
906         }
907
908         unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
909                                          GFP_KERNEL);
910         if (!unmap) {
911                 err = -ENOMEM;
912                 goto free_resources;
913         }
914
915         /* test xor */
916         for (i = 0; i < src_count; i++) {
917                 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
918                                               0, PAGE_SIZE, DMA_TO_DEVICE);
919                 dma_srcs[i] = unmap->addr[i];
920                 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
921                 if (ret) {
922                         err = -ENOMEM;
923                         goto free_resources;
924                 }
925                 unmap->to_cnt++;
926         }
927
928         unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
929                                       DMA_FROM_DEVICE);
930         dest_dma = unmap->addr[src_count];
931         ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
932         if (ret) {
933                 err = -ENOMEM;
934                 goto free_resources;
935         }
936         unmap->from_cnt = 1;
937         unmap->len = PAGE_SIZE;
938
939         tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
940                                  src_count, PAGE_SIZE, 0);
941         if (!tx) {
942                 dev_err(dma_chan->device->dev,
943                         "Self-test cannot prepare operation, disabling\n");
944                 err = -ENODEV;
945                 goto free_resources;
946         }
947
948         cookie = mv_xor_tx_submit(tx);
949         if (dma_submit_error(cookie)) {
950                 dev_err(dma_chan->device->dev,
951                         "Self-test submit error, disabling\n");
952                 err = -ENODEV;
953                 goto free_resources;
954         }
955
956         mv_xor_issue_pending(dma_chan);
957         async_tx_ack(tx);
958         msleep(8);
959
960         if (mv_xor_status(dma_chan, cookie, NULL) !=
961             DMA_COMPLETE) {
962                 dev_err(dma_chan->device->dev,
963                         "Self-test xor timed out, disabling\n");
964                 err = -ENODEV;
965                 goto free_resources;
966         }
967
968         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
969                                 PAGE_SIZE, DMA_FROM_DEVICE);
970         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
971                 u32 *ptr = page_address(dest);
972                 if (ptr[i] != cmp_word) {
973                         dev_err(dma_chan->device->dev,
974                                 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
975                                 i, ptr[i], cmp_word);
976                         err = -ENODEV;
977                         goto free_resources;
978                 }
979         }
980
981 free_resources:
982         dmaengine_unmap_put(unmap);
983         mv_xor_free_chan_resources(dma_chan);
984 out:
985         src_idx = src_count;
986         while (src_idx--)
987                 __free_page(xor_srcs[src_idx]);
988         __free_page(dest);
989         return err;
990 }
991
992 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
993 {
994         struct dma_chan *chan, *_chan;
995         struct device *dev = mv_chan->dmadev.dev;
996
997         dma_async_device_unregister(&mv_chan->dmadev);
998
999         dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1000                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1001         dma_unmap_single(dev, mv_chan->dummy_src_addr,
1002                          MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1003         dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1004                          MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1005
1006         list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1007                                  device_node) {
1008                 list_del(&chan->device_node);
1009         }
1010
1011         free_irq(mv_chan->irq, mv_chan);
1012
1013         return 0;
1014 }
1015
1016 static struct mv_xor_chan *
1017 mv_xor_channel_add(struct mv_xor_device *xordev,
1018                    struct platform_device *pdev,
1019                    int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
1020 {
1021         int ret = 0;
1022         struct mv_xor_chan *mv_chan;
1023         struct dma_device *dma_dev;
1024
1025         mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1026         if (!mv_chan)
1027                 return ERR_PTR(-ENOMEM);
1028
1029         mv_chan->idx = idx;
1030         mv_chan->irq = irq;
1031         mv_chan->op_in_desc = op_in_desc;
1032
1033         dma_dev = &mv_chan->dmadev;
1034
1035         /*
1036          * These source and destination dummy buffers are used to implement
1037          * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
1038          * Hence, we only need to map the buffers at initialization-time.
1039          */
1040         mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1041                 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1042         mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1043                 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1044
1045         /* allocate coherent memory for hardware descriptors
1046          * note: writecombine gives slightly better performance, but
1047          * requires that we explicitly flush the writes
1048          */
1049         mv_chan->dma_desc_pool_virt =
1050           dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1051                                  &mv_chan->dma_desc_pool, GFP_KERNEL);
1052         if (!mv_chan->dma_desc_pool_virt)
1053                 return ERR_PTR(-ENOMEM);
1054
1055         /* discover transaction capabilites from the platform data */
1056         dma_dev->cap_mask = cap_mask;
1057
1058         INIT_LIST_HEAD(&dma_dev->channels);
1059
1060         /* set base routines */
1061         dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1062         dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1063         dma_dev->device_tx_status = mv_xor_status;
1064         dma_dev->device_issue_pending = mv_xor_issue_pending;
1065         dma_dev->dev = &pdev->dev;
1066
1067         /* set prep routines based on capability */
1068         if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1069                 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1070         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1071                 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1072         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1073                 dma_dev->max_xor = 8;
1074                 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1075         }
1076
1077         mv_chan->mmr_base = xordev->xor_base;
1078         mv_chan->mmr_high_base = xordev->xor_high_base;
1079         tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1080                      mv_chan);
1081
1082         /* clear errors before enabling interrupts */
1083         mv_chan_clear_err_status(mv_chan);
1084
1085         ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1086                           0, dev_name(&pdev->dev), mv_chan);
1087         if (ret)
1088                 goto err_free_dma;
1089
1090         mv_chan_unmask_interrupts(mv_chan);
1091
1092         if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1093                 mv_chan_set_mode_to_desc(mv_chan);
1094         else
1095                 mv_chan_set_mode(mv_chan, DMA_XOR);
1096
1097         spin_lock_init(&mv_chan->lock);
1098         INIT_LIST_HEAD(&mv_chan->chain);
1099         INIT_LIST_HEAD(&mv_chan->completed_slots);
1100         INIT_LIST_HEAD(&mv_chan->all_slots);
1101         mv_chan->dmachan.device = dma_dev;
1102         dma_cookie_init(&mv_chan->dmachan);
1103
1104         list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1105
1106         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1107                 ret = mv_chan_memcpy_self_test(mv_chan);
1108                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1109                 if (ret)
1110                         goto err_free_irq;
1111         }
1112
1113         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1114                 ret = mv_chan_xor_self_test(mv_chan);
1115                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1116                 if (ret)
1117                         goto err_free_irq;
1118         }
1119
1120         dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1121                  mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1122                  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1123                  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1124                  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1125
1126         dma_async_device_register(dma_dev);
1127         return mv_chan;
1128
1129 err_free_irq:
1130         free_irq(mv_chan->irq, mv_chan);
1131  err_free_dma:
1132         dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1133                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1134         return ERR_PTR(ret);
1135 }
1136
1137 static void
1138 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1139                          const struct mbus_dram_target_info *dram)
1140 {
1141         void __iomem *base = xordev->xor_high_base;
1142         u32 win_enable = 0;
1143         int i;
1144
1145         for (i = 0; i < 8; i++) {
1146                 writel(0, base + WINDOW_BASE(i));
1147                 writel(0, base + WINDOW_SIZE(i));
1148                 if (i < 4)
1149                         writel(0, base + WINDOW_REMAP_HIGH(i));
1150         }
1151
1152         for (i = 0; i < dram->num_cs; i++) {
1153                 const struct mbus_dram_window *cs = dram->cs + i;
1154
1155                 writel((cs->base & 0xffff0000) |
1156                        (cs->mbus_attr << 8) |
1157                        dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1158                 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1159
1160                 win_enable |= (1 << i);
1161                 win_enable |= 3 << (16 + (2 * i));
1162         }
1163
1164         writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1165         writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1166         writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1167         writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1168 }
1169
1170 static const struct of_device_id mv_xor_dt_ids[] = {
1171         { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
1172         { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
1173         {},
1174 };
1175 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1176
1177 static int mv_xor_probe(struct platform_device *pdev)
1178 {
1179         const struct mbus_dram_target_info *dram;
1180         struct mv_xor_device *xordev;
1181         struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1182         struct resource *res;
1183         int i, ret;
1184         int op_in_desc;
1185
1186         dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1187
1188         xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1189         if (!xordev)
1190                 return -ENOMEM;
1191
1192         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1193         if (!res)
1194                 return -ENODEV;
1195
1196         xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1197                                         resource_size(res));
1198         if (!xordev->xor_base)
1199                 return -EBUSY;
1200
1201         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1202         if (!res)
1203                 return -ENODEV;
1204
1205         xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1206                                              resource_size(res));
1207         if (!xordev->xor_high_base)
1208                 return -EBUSY;
1209
1210         platform_set_drvdata(pdev, xordev);
1211
1212         /*
1213          * (Re-)program MBUS remapping windows if we are asked to.
1214          */
1215         dram = mv_mbus_dram_info();
1216         if (dram)
1217                 mv_xor_conf_mbus_windows(xordev, dram);
1218
1219         /* Not all platforms can gate the clock, so it is not
1220          * an error if the clock does not exists.
1221          */
1222         xordev->clk = clk_get(&pdev->dev, NULL);
1223         if (!IS_ERR(xordev->clk))
1224                 clk_prepare_enable(xordev->clk);
1225
1226         if (pdev->dev.of_node) {
1227                 struct device_node *np;
1228                 int i = 0;
1229                 const struct of_device_id *of_id =
1230                         of_match_device(mv_xor_dt_ids,
1231                                         &pdev->dev);
1232
1233                 for_each_child_of_node(pdev->dev.of_node, np) {
1234                         struct mv_xor_chan *chan;
1235                         dma_cap_mask_t cap_mask;
1236                         int irq;
1237                         op_in_desc = (int)of_id->data;
1238
1239                         dma_cap_zero(cap_mask);
1240                         if (of_property_read_bool(np, "dmacap,memcpy"))
1241                                 dma_cap_set(DMA_MEMCPY, cap_mask);
1242                         if (of_property_read_bool(np, "dmacap,xor"))
1243                                 dma_cap_set(DMA_XOR, cap_mask);
1244                         if (of_property_read_bool(np, "dmacap,interrupt"))
1245                                 dma_cap_set(DMA_INTERRUPT, cap_mask);
1246
1247                         irq = irq_of_parse_and_map(np, 0);
1248                         if (!irq) {
1249                                 ret = -ENODEV;
1250                                 goto err_channel_add;
1251                         }
1252
1253                         chan = mv_xor_channel_add(xordev, pdev, i,
1254                                                   cap_mask, irq, op_in_desc);
1255                         if (IS_ERR(chan)) {
1256                                 ret = PTR_ERR(chan);
1257                                 irq_dispose_mapping(irq);
1258                                 goto err_channel_add;
1259                         }
1260
1261                         xordev->channels[i] = chan;
1262                         i++;
1263                 }
1264         } else if (pdata && pdata->channels) {
1265                 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1266                         struct mv_xor_channel_data *cd;
1267                         struct mv_xor_chan *chan;
1268                         int irq;
1269
1270                         cd = &pdata->channels[i];
1271                         if (!cd) {
1272                                 ret = -ENODEV;
1273                                 goto err_channel_add;
1274                         }
1275
1276                         irq = platform_get_irq(pdev, i);
1277                         if (irq < 0) {
1278                                 ret = irq;
1279                                 goto err_channel_add;
1280                         }
1281
1282                         chan = mv_xor_channel_add(xordev, pdev, i,
1283                                                   cd->cap_mask, irq,
1284                                                   XOR_MODE_IN_REG);
1285                         if (IS_ERR(chan)) {
1286                                 ret = PTR_ERR(chan);
1287                                 goto err_channel_add;
1288                         }
1289
1290                         xordev->channels[i] = chan;
1291                 }
1292         }
1293
1294         return 0;
1295
1296 err_channel_add:
1297         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1298                 if (xordev->channels[i]) {
1299                         mv_xor_channel_remove(xordev->channels[i]);
1300                         if (pdev->dev.of_node)
1301                                 irq_dispose_mapping(xordev->channels[i]->irq);
1302                 }
1303
1304         if (!IS_ERR(xordev->clk)) {
1305                 clk_disable_unprepare(xordev->clk);
1306                 clk_put(xordev->clk);
1307         }
1308
1309         return ret;
1310 }
1311
1312 static int mv_xor_remove(struct platform_device *pdev)
1313 {
1314         struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1315         int i;
1316
1317         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1318                 if (xordev->channels[i])
1319                         mv_xor_channel_remove(xordev->channels[i]);
1320         }
1321
1322         if (!IS_ERR(xordev->clk)) {
1323                 clk_disable_unprepare(xordev->clk);
1324                 clk_put(xordev->clk);
1325         }
1326
1327         return 0;
1328 }
1329
1330 static struct platform_driver mv_xor_driver = {
1331         .probe          = mv_xor_probe,
1332         .remove         = mv_xor_remove,
1333         .driver         = {
1334                 .name           = MV_XOR_NAME,
1335                 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1336         },
1337 };
1338
1339
1340 static int __init mv_xor_init(void)
1341 {
1342         return platform_driver_register(&mv_xor_driver);
1343 }
1344 module_init(mv_xor_init);
1345
1346 /* it's currently unsafe to unload this module */
1347 #if 0
1348 static void __exit mv_xor_exit(void)
1349 {
1350         platform_driver_unregister(&mv_xor_driver);
1351         return;
1352 }
1353
1354 module_exit(mv_xor_exit);
1355 #endif
1356
1357 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1358 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1359 MODULE_LICENSE("GPL");