dmaengine: mv_xor: Rename function for consistent naming
[firefly-linux-kernel-4.4.55.git] / drivers / dma / mv_xor.c
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
23 #include <linux/memory.h>
24 #include <linux/clk.h>
25 #include <linux/of.h>
26 #include <linux/of_irq.h>
27 #include <linux/irqdomain.h>
28 #include <linux/platform_data/dma-mv_xor.h>
29
30 #include "dmaengine.h"
31 #include "mv_xor.h"
32
33 static void mv_xor_issue_pending(struct dma_chan *chan);
34
35 #define to_mv_xor_chan(chan)            \
36         container_of(chan, struct mv_xor_chan, dmachan)
37
38 #define to_mv_xor_slot(tx)              \
39         container_of(tx, struct mv_xor_desc_slot, async_tx)
40
41 #define mv_chan_to_devp(chan)           \
42         ((chan)->dmadev.dev)
43
44 static void mv_desc_init(struct mv_xor_desc_slot *desc,
45                          dma_addr_t addr, u32 byte_count,
46                          enum dma_ctrl_flags flags)
47 {
48         struct mv_xor_desc *hw_desc = desc->hw_desc;
49
50         hw_desc->status = XOR_DESC_DMA_OWNED;
51         hw_desc->phy_next_desc = 0;
52         /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
53         hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
54                                 XOR_DESC_EOD_INT_EN : 0;
55         hw_desc->phy_dest_addr = addr;
56         hw_desc->byte_count = byte_count;
57 }
58
59 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
60                                   u32 next_desc_addr)
61 {
62         struct mv_xor_desc *hw_desc = desc->hw_desc;
63         BUG_ON(hw_desc->phy_next_desc);
64         hw_desc->phy_next_desc = next_desc_addr;
65 }
66
67 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
68 {
69         struct mv_xor_desc *hw_desc = desc->hw_desc;
70         hw_desc->phy_next_desc = 0;
71 }
72
73 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
74                                  int index, dma_addr_t addr)
75 {
76         struct mv_xor_desc *hw_desc = desc->hw_desc;
77         hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
78         if (desc->type == DMA_XOR)
79                 hw_desc->desc_command |= (1 << index);
80 }
81
82 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
83 {
84         return readl_relaxed(XOR_CURR_DESC(chan));
85 }
86
87 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
88                                         u32 next_desc_addr)
89 {
90         writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
91 }
92
93 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
94 {
95         u32 val = readl_relaxed(XOR_INTR_MASK(chan));
96         val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
97         writel_relaxed(val, XOR_INTR_MASK(chan));
98 }
99
100 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
101 {
102         u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
103         intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
104         return intr_cause;
105 }
106
107 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
108 {
109         u32 val;
110
111         val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
112         val = ~(val << (chan->idx * 16));
113         dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
114         writel_relaxed(val, XOR_INTR_CAUSE(chan));
115 }
116
117 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
118 {
119         u32 val = 0xFFFF0000 >> (chan->idx * 16);
120         writel_relaxed(val, XOR_INTR_CAUSE(chan));
121 }
122
123 static void mv_chan_set_mode(struct mv_xor_chan *chan,
124                              enum dma_transaction_type type)
125 {
126         u32 op_mode;
127         u32 config = readl_relaxed(XOR_CONFIG(chan));
128
129         switch (type) {
130         case DMA_XOR:
131                 op_mode = XOR_OPERATION_MODE_XOR;
132                 break;
133         case DMA_MEMCPY:
134                 op_mode = XOR_OPERATION_MODE_MEMCPY;
135                 break;
136         default:
137                 dev_err(mv_chan_to_devp(chan),
138                         "error: unsupported operation %d\n",
139                         type);
140                 BUG();
141                 return;
142         }
143
144         config &= ~0x7;
145         config |= op_mode;
146
147 #if defined(__BIG_ENDIAN)
148         config |= XOR_DESCRIPTOR_SWAP;
149 #else
150         config &= ~XOR_DESCRIPTOR_SWAP;
151 #endif
152
153         writel_relaxed(config, XOR_CONFIG(chan));
154         chan->current_type = type;
155 }
156
157 static void mv_chan_activate(struct mv_xor_chan *chan)
158 {
159         dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
160
161         /* writel ensures all descriptors are flushed before activation */
162         writel(BIT(0), XOR_ACTIVATION(chan));
163 }
164
165 static char mv_chan_is_busy(struct mv_xor_chan *chan)
166 {
167         u32 state = readl_relaxed(XOR_ACTIVATION(chan));
168
169         state = (state >> 4) & 0x3;
170
171         return (state == 1) ? 1 : 0;
172 }
173
174 /**
175  * mv_chan_free_slots - flags descriptor slots for reuse
176  * @slot: Slot to free
177  * Caller must hold &mv_chan->lock while calling this function
178  */
179 static void mv_chan_free_slots(struct mv_xor_chan *mv_chan,
180                                struct mv_xor_desc_slot *slot)
181 {
182         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
183                 __func__, __LINE__, slot);
184
185         slot->slot_used = 0;
186
187 }
188
189 /*
190  * mv_chan_start_new_chain - program the engine to operate on new
191  * chain headed by sw_desc
192  * Caller must hold &mv_chan->lock while calling this function
193  */
194 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
195                                     struct mv_xor_desc_slot *sw_desc)
196 {
197         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
198                 __func__, __LINE__, sw_desc);
199
200         /* set the hardware chain */
201         mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
202
203         mv_chan->pending++;
204         mv_xor_issue_pending(&mv_chan->dmachan);
205 }
206
207 static dma_cookie_t
208 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
209                                 struct mv_xor_chan *mv_chan,
210                                 dma_cookie_t cookie)
211 {
212         BUG_ON(desc->async_tx.cookie < 0);
213
214         if (desc->async_tx.cookie > 0) {
215                 cookie = desc->async_tx.cookie;
216
217                 /* call the callback (must not sleep or submit new
218                  * operations to this channel)
219                  */
220                 if (desc->async_tx.callback)
221                         desc->async_tx.callback(
222                                 desc->async_tx.callback_param);
223
224                 dma_descriptor_unmap(&desc->async_tx);
225         }
226
227         /* run dependent operations */
228         dma_run_dependencies(&desc->async_tx);
229
230         return cookie;
231 }
232
233 static int
234 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
235 {
236         struct mv_xor_desc_slot *iter, *_iter;
237
238         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
239         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
240                                  completed_node) {
241
242                 if (async_tx_test_ack(&iter->async_tx)) {
243                         list_del(&iter->completed_node);
244                         mv_chan_free_slots(mv_chan, iter);
245                 }
246         }
247         return 0;
248 }
249
250 static int
251 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
252                    struct mv_xor_chan *mv_chan)
253 {
254         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
255                 __func__, __LINE__, desc, desc->async_tx.flags);
256         list_del(&desc->chain_node);
257         /* the client is allowed to attach dependent operations
258          * until 'ack' is set
259          */
260         if (!async_tx_test_ack(&desc->async_tx)) {
261                 /* move this slot to the completed_slots */
262                 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
263                 return 0;
264         }
265
266         mv_chan_free_slots(mv_chan, desc);
267         return 0;
268 }
269
270 /* This function must be called with the mv_xor_chan spinlock held */
271 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
272 {
273         struct mv_xor_desc_slot *iter, *_iter;
274         dma_cookie_t cookie = 0;
275         int busy = mv_chan_is_busy(mv_chan);
276         u32 current_desc = mv_chan_get_current_desc(mv_chan);
277         int current_cleaned = 0;
278         struct mv_xor_desc *hw_desc;
279
280         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
281         dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
282         mv_chan_clean_completed_slots(mv_chan);
283
284         /* free completed slots from the chain starting with
285          * the oldest descriptor
286          */
287
288         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
289                                         chain_node) {
290
291                 /* clean finished descriptors */
292                 hw_desc = iter->hw_desc;
293                 if (hw_desc->status & XOR_DESC_SUCCESS) {
294                         cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
295                                                                  cookie);
296
297                         /* done processing desc, clean slot */
298                         mv_desc_clean_slot(iter, mv_chan);
299
300                         /* break if we did cleaned the current */
301                         if (iter->async_tx.phys == current_desc) {
302                                 current_cleaned = 1;
303                                 break;
304                         }
305                 } else {
306                         if (iter->async_tx.phys == current_desc) {
307                                 current_cleaned = 0;
308                                 break;
309                         }
310                 }
311         }
312
313         if ((busy == 0) && !list_empty(&mv_chan->chain)) {
314                 if (current_cleaned) {
315                         /*
316                          * current descriptor cleaned and removed, run
317                          * from list head
318                          */
319                         iter = list_entry(mv_chan->chain.next,
320                                           struct mv_xor_desc_slot,
321                                           chain_node);
322                         mv_chan_start_new_chain(mv_chan, iter);
323                 } else {
324                         if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
325                                 /*
326                                  * descriptors are still waiting after
327                                  * current, trigger them
328                                  */
329                                 iter = list_entry(iter->chain_node.next,
330                                                   struct mv_xor_desc_slot,
331                                                   chain_node);
332                                 mv_chan_start_new_chain(mv_chan, iter);
333                         } else {
334                                 /*
335                                  * some descriptors are still waiting
336                                  * to be cleaned
337                                  */
338                                 tasklet_schedule(&mv_chan->irq_tasklet);
339                         }
340                 }
341         }
342
343         if (cookie > 0)
344                 mv_chan->dmachan.completed_cookie = cookie;
345 }
346
347 static void mv_xor_tasklet(unsigned long data)
348 {
349         struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
350
351         spin_lock_bh(&chan->lock);
352         mv_chan_slot_cleanup(chan);
353         spin_unlock_bh(&chan->lock);
354 }
355
356 static struct mv_xor_desc_slot *
357 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
358 {
359         struct mv_xor_desc_slot *iter, *_iter;
360         int retry = 0;
361
362         /* start search from the last allocated descrtiptor
363          * if a contiguous allocation can not be found start searching
364          * from the beginning of the list
365          */
366 retry:
367         if (retry == 0)
368                 iter = mv_chan->last_used;
369         else
370                 iter = list_entry(&mv_chan->all_slots,
371                         struct mv_xor_desc_slot,
372                         slot_node);
373
374         list_for_each_entry_safe_continue(
375                 iter, _iter, &mv_chan->all_slots, slot_node) {
376
377                 prefetch(_iter);
378                 prefetch(&_iter->async_tx);
379                 if (iter->slot_used) {
380                         /* give up after finding the first busy slot
381                          * on the second pass through the list
382                          */
383                         if (retry)
384                                 break;
385                         continue;
386                 }
387
388                 /* pre-ack descriptor */
389                 async_tx_ack(&iter->async_tx);
390
391                 iter->slot_used = 1;
392                 INIT_LIST_HEAD(&iter->chain_node);
393                 iter->async_tx.cookie = -EBUSY;
394                 mv_chan->last_used = iter;
395                 mv_desc_clear_next_desc(iter);
396
397                 return iter;
398
399         }
400         if (!retry++)
401                 goto retry;
402
403         /* try to free some slots if the allocation fails */
404         tasklet_schedule(&mv_chan->irq_tasklet);
405
406         return NULL;
407 }
408
409 /************************ DMA engine API functions ****************************/
410 static dma_cookie_t
411 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
412 {
413         struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
414         struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
415         struct mv_xor_desc_slot *old_chain_tail;
416         dma_cookie_t cookie;
417         int new_hw_chain = 1;
418
419         dev_dbg(mv_chan_to_devp(mv_chan),
420                 "%s sw_desc %p: async_tx %p\n",
421                 __func__, sw_desc, &sw_desc->async_tx);
422
423         spin_lock_bh(&mv_chan->lock);
424         cookie = dma_cookie_assign(tx);
425
426         if (list_empty(&mv_chan->chain))
427                 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
428         else {
429                 new_hw_chain = 0;
430
431                 old_chain_tail = list_entry(mv_chan->chain.prev,
432                                             struct mv_xor_desc_slot,
433                                             chain_node);
434                 list_add_tail(&sw_desc->chain_node, &mv_chan->chain);
435
436                 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
437                         &old_chain_tail->async_tx.phys);
438
439                 /* fix up the hardware chain */
440                 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
441
442                 /* if the channel is not busy */
443                 if (!mv_chan_is_busy(mv_chan)) {
444                         u32 current_desc = mv_chan_get_current_desc(mv_chan);
445                         /*
446                          * and the curren desc is the end of the chain before
447                          * the append, then we need to start the channel
448                          */
449                         if (current_desc == old_chain_tail->async_tx.phys)
450                                 new_hw_chain = 1;
451                 }
452         }
453
454         if (new_hw_chain)
455                 mv_chan_start_new_chain(mv_chan, sw_desc);
456
457         spin_unlock_bh(&mv_chan->lock);
458
459         return cookie;
460 }
461
462 /* returns the number of allocated descriptors */
463 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
464 {
465         void *virt_desc;
466         dma_addr_t dma_desc;
467         int idx;
468         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
469         struct mv_xor_desc_slot *slot = NULL;
470         int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
471
472         /* Allocate descriptor slots */
473         idx = mv_chan->slots_allocated;
474         while (idx < num_descs_in_pool) {
475                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
476                 if (!slot) {
477                         dev_info(mv_chan_to_devp(mv_chan),
478                                  "channel only initialized %d descriptor slots",
479                                  idx);
480                         break;
481                 }
482                 virt_desc = mv_chan->dma_desc_pool_virt;
483                 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
484
485                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
486                 slot->async_tx.tx_submit = mv_xor_tx_submit;
487                 INIT_LIST_HEAD(&slot->chain_node);
488                 INIT_LIST_HEAD(&slot->slot_node);
489                 dma_desc = mv_chan->dma_desc_pool;
490                 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
491                 slot->idx = idx++;
492
493                 spin_lock_bh(&mv_chan->lock);
494                 mv_chan->slots_allocated = idx;
495                 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
496                 spin_unlock_bh(&mv_chan->lock);
497         }
498
499         if (mv_chan->slots_allocated && !mv_chan->last_used)
500                 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
501                                         struct mv_xor_desc_slot,
502                                         slot_node);
503
504         dev_dbg(mv_chan_to_devp(mv_chan),
505                 "allocated %d descriptor slots last_used: %p\n",
506                 mv_chan->slots_allocated, mv_chan->last_used);
507
508         return mv_chan->slots_allocated ? : -ENOMEM;
509 }
510
511 static struct dma_async_tx_descriptor *
512 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
513                     unsigned int src_cnt, size_t len, unsigned long flags)
514 {
515         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
516         struct mv_xor_desc_slot *sw_desc;
517
518         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
519                 return NULL;
520
521         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
522
523         dev_dbg(mv_chan_to_devp(mv_chan),
524                 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
525                 __func__, src_cnt, len, &dest, flags);
526
527         spin_lock_bh(&mv_chan->lock);
528         sw_desc = mv_chan_alloc_slot(mv_chan);
529         if (sw_desc) {
530                 sw_desc->type = DMA_XOR;
531                 sw_desc->async_tx.flags = flags;
532                 mv_desc_init(sw_desc, dest, len, flags);
533                 while (src_cnt--)
534                         mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
535         }
536         spin_unlock_bh(&mv_chan->lock);
537         dev_dbg(mv_chan_to_devp(mv_chan),
538                 "%s sw_desc %p async_tx %p \n",
539                 __func__, sw_desc, &sw_desc->async_tx);
540         return sw_desc ? &sw_desc->async_tx : NULL;
541 }
542
543 static struct dma_async_tx_descriptor *
544 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
545                 size_t len, unsigned long flags)
546 {
547         /*
548          * A MEMCPY operation is identical to an XOR operation with only
549          * a single source address.
550          */
551         return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
552 }
553
554 static struct dma_async_tx_descriptor *
555 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
556 {
557         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
558         dma_addr_t src, dest;
559         size_t len;
560
561         src = mv_chan->dummy_src_addr;
562         dest = mv_chan->dummy_dst_addr;
563         len = MV_XOR_MIN_BYTE_COUNT;
564
565         /*
566          * We implement the DMA_INTERRUPT operation as a minimum sized
567          * XOR operation with a single dummy source address.
568          */
569         return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
570 }
571
572 static void mv_xor_free_chan_resources(struct dma_chan *chan)
573 {
574         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
575         struct mv_xor_desc_slot *iter, *_iter;
576         int in_use_descs = 0;
577
578         spin_lock_bh(&mv_chan->lock);
579
580         mv_chan_slot_cleanup(mv_chan);
581
582         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
583                                         chain_node) {
584                 in_use_descs++;
585                 list_del(&iter->chain_node);
586         }
587         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
588                                  completed_node) {
589                 in_use_descs++;
590                 list_del(&iter->completed_node);
591         }
592         list_for_each_entry_safe_reverse(
593                 iter, _iter, &mv_chan->all_slots, slot_node) {
594                 list_del(&iter->slot_node);
595                 kfree(iter);
596                 mv_chan->slots_allocated--;
597         }
598         mv_chan->last_used = NULL;
599
600         dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
601                 __func__, mv_chan->slots_allocated);
602         spin_unlock_bh(&mv_chan->lock);
603
604         if (in_use_descs)
605                 dev_err(mv_chan_to_devp(mv_chan),
606                         "freeing %d in use descriptors!\n", in_use_descs);
607 }
608
609 /**
610  * mv_xor_status - poll the status of an XOR transaction
611  * @chan: XOR channel handle
612  * @cookie: XOR transaction identifier
613  * @txstate: XOR transactions state holder (or NULL)
614  */
615 static enum dma_status mv_xor_status(struct dma_chan *chan,
616                                           dma_cookie_t cookie,
617                                           struct dma_tx_state *txstate)
618 {
619         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
620         enum dma_status ret;
621
622         ret = dma_cookie_status(chan, cookie, txstate);
623         if (ret == DMA_COMPLETE)
624                 return ret;
625
626         spin_lock_bh(&mv_chan->lock);
627         mv_chan_slot_cleanup(mv_chan);
628         spin_unlock_bh(&mv_chan->lock);
629
630         return dma_cookie_status(chan, cookie, txstate);
631 }
632
633 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
634 {
635         u32 val;
636
637         val = readl_relaxed(XOR_CONFIG(chan));
638         dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
639
640         val = readl_relaxed(XOR_ACTIVATION(chan));
641         dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
642
643         val = readl_relaxed(XOR_INTR_CAUSE(chan));
644         dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
645
646         val = readl_relaxed(XOR_INTR_MASK(chan));
647         dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
648
649         val = readl_relaxed(XOR_ERROR_CAUSE(chan));
650         dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
651
652         val = readl_relaxed(XOR_ERROR_ADDR(chan));
653         dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
654 }
655
656 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
657                                           u32 intr_cause)
658 {
659         if (intr_cause & XOR_INT_ERR_DECODE) {
660                 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
661                 return;
662         }
663
664         dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
665                 chan->idx, intr_cause);
666
667         mv_chan_dump_regs(chan);
668         WARN_ON(1);
669 }
670
671 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
672 {
673         struct mv_xor_chan *chan = data;
674         u32 intr_cause = mv_chan_get_intr_cause(chan);
675
676         dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
677
678         if (intr_cause & XOR_INTR_ERRORS)
679                 mv_chan_err_interrupt_handler(chan, intr_cause);
680
681         tasklet_schedule(&chan->irq_tasklet);
682
683         mv_chan_clear_eoc_cause(chan);
684
685         return IRQ_HANDLED;
686 }
687
688 static void mv_xor_issue_pending(struct dma_chan *chan)
689 {
690         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
691
692         if (mv_chan->pending >= MV_XOR_THRESHOLD) {
693                 mv_chan->pending = 0;
694                 mv_chan_activate(mv_chan);
695         }
696 }
697
698 /*
699  * Perform a transaction to verify the HW works.
700  */
701
702 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
703 {
704         int i, ret;
705         void *src, *dest;
706         dma_addr_t src_dma, dest_dma;
707         struct dma_chan *dma_chan;
708         dma_cookie_t cookie;
709         struct dma_async_tx_descriptor *tx;
710         struct dmaengine_unmap_data *unmap;
711         int err = 0;
712
713         src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
714         if (!src)
715                 return -ENOMEM;
716
717         dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
718         if (!dest) {
719                 kfree(src);
720                 return -ENOMEM;
721         }
722
723         /* Fill in src buffer */
724         for (i = 0; i < PAGE_SIZE; i++)
725                 ((u8 *) src)[i] = (u8)i;
726
727         dma_chan = &mv_chan->dmachan;
728         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
729                 err = -ENODEV;
730                 goto out;
731         }
732
733         unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
734         if (!unmap) {
735                 err = -ENOMEM;
736                 goto free_resources;
737         }
738
739         src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
740                                  PAGE_SIZE, DMA_TO_DEVICE);
741         unmap->addr[0] = src_dma;
742
743         ret = dma_mapping_error(dma_chan->device->dev, src_dma);
744         if (ret) {
745                 err = -ENOMEM;
746                 goto free_resources;
747         }
748         unmap->to_cnt = 1;
749
750         dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
751                                   PAGE_SIZE, DMA_FROM_DEVICE);
752         unmap->addr[1] = dest_dma;
753
754         ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
755         if (ret) {
756                 err = -ENOMEM;
757                 goto free_resources;
758         }
759         unmap->from_cnt = 1;
760         unmap->len = PAGE_SIZE;
761
762         tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
763                                     PAGE_SIZE, 0);
764         if (!tx) {
765                 dev_err(dma_chan->device->dev,
766                         "Self-test cannot prepare operation, disabling\n");
767                 err = -ENODEV;
768                 goto free_resources;
769         }
770
771         cookie = mv_xor_tx_submit(tx);
772         if (dma_submit_error(cookie)) {
773                 dev_err(dma_chan->device->dev,
774                         "Self-test submit error, disabling\n");
775                 err = -ENODEV;
776                 goto free_resources;
777         }
778
779         mv_xor_issue_pending(dma_chan);
780         async_tx_ack(tx);
781         msleep(1);
782
783         if (mv_xor_status(dma_chan, cookie, NULL) !=
784             DMA_COMPLETE) {
785                 dev_err(dma_chan->device->dev,
786                         "Self-test copy timed out, disabling\n");
787                 err = -ENODEV;
788                 goto free_resources;
789         }
790
791         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
792                                 PAGE_SIZE, DMA_FROM_DEVICE);
793         if (memcmp(src, dest, PAGE_SIZE)) {
794                 dev_err(dma_chan->device->dev,
795                         "Self-test copy failed compare, disabling\n");
796                 err = -ENODEV;
797                 goto free_resources;
798         }
799
800 free_resources:
801         dmaengine_unmap_put(unmap);
802         mv_xor_free_chan_resources(dma_chan);
803 out:
804         kfree(src);
805         kfree(dest);
806         return err;
807 }
808
809 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
810 static int
811 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
812 {
813         int i, src_idx, ret;
814         struct page *dest;
815         struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
816         dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
817         dma_addr_t dest_dma;
818         struct dma_async_tx_descriptor *tx;
819         struct dmaengine_unmap_data *unmap;
820         struct dma_chan *dma_chan;
821         dma_cookie_t cookie;
822         u8 cmp_byte = 0;
823         u32 cmp_word;
824         int err = 0;
825         int src_count = MV_XOR_NUM_SRC_TEST;
826
827         for (src_idx = 0; src_idx < src_count; src_idx++) {
828                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
829                 if (!xor_srcs[src_idx]) {
830                         while (src_idx--)
831                                 __free_page(xor_srcs[src_idx]);
832                         return -ENOMEM;
833                 }
834         }
835
836         dest = alloc_page(GFP_KERNEL);
837         if (!dest) {
838                 while (src_idx--)
839                         __free_page(xor_srcs[src_idx]);
840                 return -ENOMEM;
841         }
842
843         /* Fill in src buffers */
844         for (src_idx = 0; src_idx < src_count; src_idx++) {
845                 u8 *ptr = page_address(xor_srcs[src_idx]);
846                 for (i = 0; i < PAGE_SIZE; i++)
847                         ptr[i] = (1 << src_idx);
848         }
849
850         for (src_idx = 0; src_idx < src_count; src_idx++)
851                 cmp_byte ^= (u8) (1 << src_idx);
852
853         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
854                 (cmp_byte << 8) | cmp_byte;
855
856         memset(page_address(dest), 0, PAGE_SIZE);
857
858         dma_chan = &mv_chan->dmachan;
859         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
860                 err = -ENODEV;
861                 goto out;
862         }
863
864         unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
865                                          GFP_KERNEL);
866         if (!unmap) {
867                 err = -ENOMEM;
868                 goto free_resources;
869         }
870
871         /* test xor */
872         for (i = 0; i < src_count; i++) {
873                 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
874                                               0, PAGE_SIZE, DMA_TO_DEVICE);
875                 dma_srcs[i] = unmap->addr[i];
876                 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
877                 if (ret) {
878                         err = -ENOMEM;
879                         goto free_resources;
880                 }
881                 unmap->to_cnt++;
882         }
883
884         unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
885                                       DMA_FROM_DEVICE);
886         dest_dma = unmap->addr[src_count];
887         ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
888         if (ret) {
889                 err = -ENOMEM;
890                 goto free_resources;
891         }
892         unmap->from_cnt = 1;
893         unmap->len = PAGE_SIZE;
894
895         tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
896                                  src_count, PAGE_SIZE, 0);
897         if (!tx) {
898                 dev_err(dma_chan->device->dev,
899                         "Self-test cannot prepare operation, disabling\n");
900                 err = -ENODEV;
901                 goto free_resources;
902         }
903
904         cookie = mv_xor_tx_submit(tx);
905         if (dma_submit_error(cookie)) {
906                 dev_err(dma_chan->device->dev,
907                         "Self-test submit error, disabling\n");
908                 err = -ENODEV;
909                 goto free_resources;
910         }
911
912         mv_xor_issue_pending(dma_chan);
913         async_tx_ack(tx);
914         msleep(8);
915
916         if (mv_xor_status(dma_chan, cookie, NULL) !=
917             DMA_COMPLETE) {
918                 dev_err(dma_chan->device->dev,
919                         "Self-test xor timed out, disabling\n");
920                 err = -ENODEV;
921                 goto free_resources;
922         }
923
924         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
925                                 PAGE_SIZE, DMA_FROM_DEVICE);
926         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
927                 u32 *ptr = page_address(dest);
928                 if (ptr[i] != cmp_word) {
929                         dev_err(dma_chan->device->dev,
930                                 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
931                                 i, ptr[i], cmp_word);
932                         err = -ENODEV;
933                         goto free_resources;
934                 }
935         }
936
937 free_resources:
938         dmaengine_unmap_put(unmap);
939         mv_xor_free_chan_resources(dma_chan);
940 out:
941         src_idx = src_count;
942         while (src_idx--)
943                 __free_page(xor_srcs[src_idx]);
944         __free_page(dest);
945         return err;
946 }
947
948 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
949 {
950         struct dma_chan *chan, *_chan;
951         struct device *dev = mv_chan->dmadev.dev;
952
953         dma_async_device_unregister(&mv_chan->dmadev);
954
955         dma_free_coherent(dev, MV_XOR_POOL_SIZE,
956                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
957         dma_unmap_single(dev, mv_chan->dummy_src_addr,
958                          MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
959         dma_unmap_single(dev, mv_chan->dummy_dst_addr,
960                          MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
961
962         list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
963                                  device_node) {
964                 list_del(&chan->device_node);
965         }
966
967         free_irq(mv_chan->irq, mv_chan);
968
969         return 0;
970 }
971
972 static struct mv_xor_chan *
973 mv_xor_channel_add(struct mv_xor_device *xordev,
974                    struct platform_device *pdev,
975                    int idx, dma_cap_mask_t cap_mask, int irq)
976 {
977         int ret = 0;
978         struct mv_xor_chan *mv_chan;
979         struct dma_device *dma_dev;
980
981         mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
982         if (!mv_chan)
983                 return ERR_PTR(-ENOMEM);
984
985         mv_chan->idx = idx;
986         mv_chan->irq = irq;
987
988         dma_dev = &mv_chan->dmadev;
989
990         /*
991          * These source and destination dummy buffers are used to implement
992          * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
993          * Hence, we only need to map the buffers at initialization-time.
994          */
995         mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
996                 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
997         mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
998                 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
999
1000         /* allocate coherent memory for hardware descriptors
1001          * note: writecombine gives slightly better performance, but
1002          * requires that we explicitly flush the writes
1003          */
1004         mv_chan->dma_desc_pool_virt =
1005           dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1006                                  &mv_chan->dma_desc_pool, GFP_KERNEL);
1007         if (!mv_chan->dma_desc_pool_virt)
1008                 return ERR_PTR(-ENOMEM);
1009
1010         /* discover transaction capabilites from the platform data */
1011         dma_dev->cap_mask = cap_mask;
1012
1013         INIT_LIST_HEAD(&dma_dev->channels);
1014
1015         /* set base routines */
1016         dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1017         dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1018         dma_dev->device_tx_status = mv_xor_status;
1019         dma_dev->device_issue_pending = mv_xor_issue_pending;
1020         dma_dev->dev = &pdev->dev;
1021
1022         /* set prep routines based on capability */
1023         if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1024                 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1025         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1026                 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1027         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1028                 dma_dev->max_xor = 8;
1029                 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1030         }
1031
1032         mv_chan->mmr_base = xordev->xor_base;
1033         mv_chan->mmr_high_base = xordev->xor_high_base;
1034         tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1035                      mv_chan);
1036
1037         /* clear errors before enabling interrupts */
1038         mv_chan_clear_err_status(mv_chan);
1039
1040         ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1041                           0, dev_name(&pdev->dev), mv_chan);
1042         if (ret)
1043                 goto err_free_dma;
1044
1045         mv_chan_unmask_interrupts(mv_chan);
1046
1047         mv_chan_set_mode(mv_chan, DMA_XOR);
1048
1049         spin_lock_init(&mv_chan->lock);
1050         INIT_LIST_HEAD(&mv_chan->chain);
1051         INIT_LIST_HEAD(&mv_chan->completed_slots);
1052         INIT_LIST_HEAD(&mv_chan->all_slots);
1053         mv_chan->dmachan.device = dma_dev;
1054         dma_cookie_init(&mv_chan->dmachan);
1055
1056         list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1057
1058         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1059                 ret = mv_chan_memcpy_self_test(mv_chan);
1060                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1061                 if (ret)
1062                         goto err_free_irq;
1063         }
1064
1065         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1066                 ret = mv_chan_xor_self_test(mv_chan);
1067                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1068                 if (ret)
1069                         goto err_free_irq;
1070         }
1071
1072         dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1073                  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1074                  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1075                  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1076
1077         dma_async_device_register(dma_dev);
1078         return mv_chan;
1079
1080 err_free_irq:
1081         free_irq(mv_chan->irq, mv_chan);
1082  err_free_dma:
1083         dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1084                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1085         return ERR_PTR(ret);
1086 }
1087
1088 static void
1089 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1090                          const struct mbus_dram_target_info *dram)
1091 {
1092         void __iomem *base = xordev->xor_high_base;
1093         u32 win_enable = 0;
1094         int i;
1095
1096         for (i = 0; i < 8; i++) {
1097                 writel(0, base + WINDOW_BASE(i));
1098                 writel(0, base + WINDOW_SIZE(i));
1099                 if (i < 4)
1100                         writel(0, base + WINDOW_REMAP_HIGH(i));
1101         }
1102
1103         for (i = 0; i < dram->num_cs; i++) {
1104                 const struct mbus_dram_window *cs = dram->cs + i;
1105
1106                 writel((cs->base & 0xffff0000) |
1107                        (cs->mbus_attr << 8) |
1108                        dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1109                 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1110
1111                 win_enable |= (1 << i);
1112                 win_enable |= 3 << (16 + (2 * i));
1113         }
1114
1115         writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1116         writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1117         writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1118         writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1119 }
1120
1121 static int mv_xor_probe(struct platform_device *pdev)
1122 {
1123         const struct mbus_dram_target_info *dram;
1124         struct mv_xor_device *xordev;
1125         struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1126         struct resource *res;
1127         int i, ret;
1128
1129         dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1130
1131         xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1132         if (!xordev)
1133                 return -ENOMEM;
1134
1135         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1136         if (!res)
1137                 return -ENODEV;
1138
1139         xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1140                                         resource_size(res));
1141         if (!xordev->xor_base)
1142                 return -EBUSY;
1143
1144         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1145         if (!res)
1146                 return -ENODEV;
1147
1148         xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1149                                              resource_size(res));
1150         if (!xordev->xor_high_base)
1151                 return -EBUSY;
1152
1153         platform_set_drvdata(pdev, xordev);
1154
1155         /*
1156          * (Re-)program MBUS remapping windows if we are asked to.
1157          */
1158         dram = mv_mbus_dram_info();
1159         if (dram)
1160                 mv_xor_conf_mbus_windows(xordev, dram);
1161
1162         /* Not all platforms can gate the clock, so it is not
1163          * an error if the clock does not exists.
1164          */
1165         xordev->clk = clk_get(&pdev->dev, NULL);
1166         if (!IS_ERR(xordev->clk))
1167                 clk_prepare_enable(xordev->clk);
1168
1169         if (pdev->dev.of_node) {
1170                 struct device_node *np;
1171                 int i = 0;
1172
1173                 for_each_child_of_node(pdev->dev.of_node, np) {
1174                         struct mv_xor_chan *chan;
1175                         dma_cap_mask_t cap_mask;
1176                         int irq;
1177
1178                         dma_cap_zero(cap_mask);
1179                         if (of_property_read_bool(np, "dmacap,memcpy"))
1180                                 dma_cap_set(DMA_MEMCPY, cap_mask);
1181                         if (of_property_read_bool(np, "dmacap,xor"))
1182                                 dma_cap_set(DMA_XOR, cap_mask);
1183                         if (of_property_read_bool(np, "dmacap,interrupt"))
1184                                 dma_cap_set(DMA_INTERRUPT, cap_mask);
1185
1186                         irq = irq_of_parse_and_map(np, 0);
1187                         if (!irq) {
1188                                 ret = -ENODEV;
1189                                 goto err_channel_add;
1190                         }
1191
1192                         chan = mv_xor_channel_add(xordev, pdev, i,
1193                                                   cap_mask, irq);
1194                         if (IS_ERR(chan)) {
1195                                 ret = PTR_ERR(chan);
1196                                 irq_dispose_mapping(irq);
1197                                 goto err_channel_add;
1198                         }
1199
1200                         xordev->channels[i] = chan;
1201                         i++;
1202                 }
1203         } else if (pdata && pdata->channels) {
1204                 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1205                         struct mv_xor_channel_data *cd;
1206                         struct mv_xor_chan *chan;
1207                         int irq;
1208
1209                         cd = &pdata->channels[i];
1210                         if (!cd) {
1211                                 ret = -ENODEV;
1212                                 goto err_channel_add;
1213                         }
1214
1215                         irq = platform_get_irq(pdev, i);
1216                         if (irq < 0) {
1217                                 ret = irq;
1218                                 goto err_channel_add;
1219                         }
1220
1221                         chan = mv_xor_channel_add(xordev, pdev, i,
1222                                                   cd->cap_mask, irq);
1223                         if (IS_ERR(chan)) {
1224                                 ret = PTR_ERR(chan);
1225                                 goto err_channel_add;
1226                         }
1227
1228                         xordev->channels[i] = chan;
1229                 }
1230         }
1231
1232         return 0;
1233
1234 err_channel_add:
1235         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1236                 if (xordev->channels[i]) {
1237                         mv_xor_channel_remove(xordev->channels[i]);
1238                         if (pdev->dev.of_node)
1239                                 irq_dispose_mapping(xordev->channels[i]->irq);
1240                 }
1241
1242         if (!IS_ERR(xordev->clk)) {
1243                 clk_disable_unprepare(xordev->clk);
1244                 clk_put(xordev->clk);
1245         }
1246
1247         return ret;
1248 }
1249
1250 static int mv_xor_remove(struct platform_device *pdev)
1251 {
1252         struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1253         int i;
1254
1255         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1256                 if (xordev->channels[i])
1257                         mv_xor_channel_remove(xordev->channels[i]);
1258         }
1259
1260         if (!IS_ERR(xordev->clk)) {
1261                 clk_disable_unprepare(xordev->clk);
1262                 clk_put(xordev->clk);
1263         }
1264
1265         return 0;
1266 }
1267
1268 #ifdef CONFIG_OF
1269 static const struct of_device_id mv_xor_dt_ids[] = {
1270        { .compatible = "marvell,orion-xor", },
1271        {},
1272 };
1273 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1274 #endif
1275
1276 static struct platform_driver mv_xor_driver = {
1277         .probe          = mv_xor_probe,
1278         .remove         = mv_xor_remove,
1279         .driver         = {
1280                 .name           = MV_XOR_NAME,
1281                 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1282         },
1283 };
1284
1285
1286 static int __init mv_xor_init(void)
1287 {
1288         return platform_driver_register(&mv_xor_driver);
1289 }
1290 module_init(mv_xor_init);
1291
1292 /* it's currently unsafe to unload this module */
1293 #if 0
1294 static void __exit mv_xor_exit(void)
1295 {
1296         platform_driver_unregister(&mv_xor_driver);
1297         return;
1298 }
1299
1300 module_exit(mv_xor_exit);
1301 #endif
1302
1303 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1304 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1305 MODULE_LICENSE("GPL");