mmc: atmel-mci: change the state machine for compatibility with old IP
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / host / atmel-mci.c
1 /*
2  * Atmel MultiMedia Card Interface driver
3  *
4  * Copyright (C) 2004-2008 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/gpio.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/ioport.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/stat.h>
27 #include <linux/types.h>
28
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/sdio.h>
31
32 #include <mach/atmel-mci.h>
33 #include <linux/atmel-mci.h>
34 #include <linux/atmel_pdc.h>
35
36 #include <asm/io.h>
37 #include <asm/unaligned.h>
38
39 #include <mach/cpu.h>
40 #include <mach/board.h>
41
42 #include "atmel-mci-regs.h"
43
44 #define ATMCI_DATA_ERROR_FLAGS  (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
45 #define ATMCI_DMA_THRESHOLD     16
46
47 enum {
48         EVENT_CMD_RDY = 0,
49         EVENT_XFER_COMPLETE,
50         EVENT_NOTBUSY,
51         EVENT_DATA_ERROR,
52 };
53
54 enum atmel_mci_state {
55         STATE_IDLE = 0,
56         STATE_SENDING_CMD,
57         STATE_DATA_XFER,
58         STATE_WAITING_NOTBUSY,
59         STATE_SENDING_STOP,
60         STATE_END_REQUEST,
61 };
62
63 enum atmci_xfer_dir {
64         XFER_RECEIVE = 0,
65         XFER_TRANSMIT,
66 };
67
68 enum atmci_pdc_buf {
69         PDC_FIRST_BUF = 0,
70         PDC_SECOND_BUF,
71 };
72
73 struct atmel_mci_caps {
74         bool    has_dma;
75         bool    has_pdc;
76         bool    has_cfg_reg;
77         bool    has_cstor_reg;
78         bool    has_highspeed;
79         bool    has_rwproof;
80         bool    has_odd_clk_div;
81 };
82
83 struct atmel_mci_dma {
84         struct dma_chan                 *chan;
85         struct dma_async_tx_descriptor  *data_desc;
86 };
87
88 /**
89  * struct atmel_mci - MMC controller state shared between all slots
90  * @lock: Spinlock protecting the queue and associated data.
91  * @regs: Pointer to MMIO registers.
92  * @sg: Scatterlist entry currently being processed by PIO or PDC code.
93  * @pio_offset: Offset into the current scatterlist entry.
94  * @buffer: Buffer used if we don't have the r/w proof capability. We
95  *      don't have the time to switch pdc buffers so we have to use only
96  *      one buffer for the full transaction.
97  * @buf_size: size of the buffer.
98  * @phys_buf_addr: buffer address needed for pdc.
99  * @cur_slot: The slot which is currently using the controller.
100  * @mrq: The request currently being processed on @cur_slot,
101  *      or NULL if the controller is idle.
102  * @cmd: The command currently being sent to the card, or NULL.
103  * @data: The data currently being transferred, or NULL if no data
104  *      transfer is in progress.
105  * @data_size: just data->blocks * data->blksz.
106  * @dma: DMA client state.
107  * @data_chan: DMA channel being used for the current data transfer.
108  * @cmd_status: Snapshot of SR taken upon completion of the current
109  *      command. Only valid when EVENT_CMD_COMPLETE is pending.
110  * @data_status: Snapshot of SR taken upon completion of the current
111  *      data transfer. Only valid when EVENT_DATA_COMPLETE or
112  *      EVENT_DATA_ERROR is pending.
113  * @stop_cmdr: Value to be loaded into CMDR when the stop command is
114  *      to be sent.
115  * @tasklet: Tasklet running the request state machine.
116  * @pending_events: Bitmask of events flagged by the interrupt handler
117  *      to be processed by the tasklet.
118  * @completed_events: Bitmask of events which the state machine has
119  *      processed.
120  * @state: Tasklet state.
121  * @queue: List of slots waiting for access to the controller.
122  * @need_clock_update: Update the clock rate before the next request.
123  * @need_reset: Reset controller before next request.
124  * @mode_reg: Value of the MR register.
125  * @cfg_reg: Value of the CFG register.
126  * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
127  *      rate and timeout calculations.
128  * @mapbase: Physical address of the MMIO registers.
129  * @mck: The peripheral bus clock hooked up to the MMC controller.
130  * @pdev: Platform device associated with the MMC controller.
131  * @slot: Slots sharing this MMC controller.
132  * @caps: MCI capabilities depending on MCI version.
133  * @prepare_data: function to setup MCI before data transfer which
134  * depends on MCI capabilities.
135  * @submit_data: function to start data transfer which depends on MCI
136  * capabilities.
137  * @stop_transfer: function to stop data transfer which depends on MCI
138  * capabilities.
139  *
140  * Locking
141  * =======
142  *
143  * @lock is a softirq-safe spinlock protecting @queue as well as
144  * @cur_slot, @mrq and @state. These must always be updated
145  * at the same time while holding @lock.
146  *
147  * @lock also protects mode_reg and need_clock_update since these are
148  * used to synchronize mode register updates with the queue
149  * processing.
150  *
151  * The @mrq field of struct atmel_mci_slot is also protected by @lock,
152  * and must always be written at the same time as the slot is added to
153  * @queue.
154  *
155  * @pending_events and @completed_events are accessed using atomic bit
156  * operations, so they don't need any locking.
157  *
158  * None of the fields touched by the interrupt handler need any
159  * locking. However, ordering is important: Before EVENT_DATA_ERROR or
160  * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
161  * interrupts must be disabled and @data_status updated with a
162  * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
163  * CMDRDY interrupt must be disabled and @cmd_status updated with a
164  * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
165  * bytes_xfered field of @data must be written. This is ensured by
166  * using barriers.
167  */
168 struct atmel_mci {
169         spinlock_t              lock;
170         void __iomem            *regs;
171
172         struct scatterlist      *sg;
173         unsigned int            pio_offset;
174         unsigned int            *buffer;
175         unsigned int            buf_size;
176         dma_addr_t              buf_phys_addr;
177
178         struct atmel_mci_slot   *cur_slot;
179         struct mmc_request      *mrq;
180         struct mmc_command      *cmd;
181         struct mmc_data         *data;
182         unsigned int            data_size;
183
184         struct atmel_mci_dma    dma;
185         struct dma_chan         *data_chan;
186         struct dma_slave_config dma_conf;
187
188         u32                     cmd_status;
189         u32                     data_status;
190         u32                     stop_cmdr;
191
192         struct tasklet_struct   tasklet;
193         unsigned long           pending_events;
194         unsigned long           completed_events;
195         enum atmel_mci_state    state;
196         struct list_head        queue;
197
198         bool                    need_clock_update;
199         bool                    need_reset;
200         u32                     mode_reg;
201         u32                     cfg_reg;
202         unsigned long           bus_hz;
203         unsigned long           mapbase;
204         struct clk              *mck;
205         struct platform_device  *pdev;
206
207         struct atmel_mci_slot   *slot[ATMCI_MAX_NR_SLOTS];
208
209         struct atmel_mci_caps   caps;
210
211         u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
212         void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
213         void (*stop_transfer)(struct atmel_mci *host);
214 };
215
216 /**
217  * struct atmel_mci_slot - MMC slot state
218  * @mmc: The mmc_host representing this slot.
219  * @host: The MMC controller this slot is using.
220  * @sdc_reg: Value of SDCR to be written before using this slot.
221  * @sdio_irq: SDIO irq mask for this slot.
222  * @mrq: mmc_request currently being processed or waiting to be
223  *      processed, or NULL when the slot is idle.
224  * @queue_node: List node for placing this node in the @queue list of
225  *      &struct atmel_mci.
226  * @clock: Clock rate configured by set_ios(). Protected by host->lock.
227  * @flags: Random state bits associated with the slot.
228  * @detect_pin: GPIO pin used for card detection, or negative if not
229  *      available.
230  * @wp_pin: GPIO pin used for card write protect sending, or negative
231  *      if not available.
232  * @detect_is_active_high: The state of the detect pin when it is active.
233  * @detect_timer: Timer used for debouncing @detect_pin interrupts.
234  */
235 struct atmel_mci_slot {
236         struct mmc_host         *mmc;
237         struct atmel_mci        *host;
238
239         u32                     sdc_reg;
240         u32                     sdio_irq;
241
242         struct mmc_request      *mrq;
243         struct list_head        queue_node;
244
245         unsigned int            clock;
246         unsigned long           flags;
247 #define ATMCI_CARD_PRESENT      0
248 #define ATMCI_CARD_NEED_INIT    1
249 #define ATMCI_SHUTDOWN          2
250 #define ATMCI_SUSPENDED         3
251
252         int                     detect_pin;
253         int                     wp_pin;
254         bool                    detect_is_active_high;
255
256         struct timer_list       detect_timer;
257 };
258
259 #define atmci_test_and_clear_pending(host, event)               \
260         test_and_clear_bit(event, &host->pending_events)
261 #define atmci_set_completed(host, event)                        \
262         set_bit(event, &host->completed_events)
263 #define atmci_set_pending(host, event)                          \
264         set_bit(event, &host->pending_events)
265
266 /*
267  * The debugfs stuff below is mostly optimized away when
268  * CONFIG_DEBUG_FS is not set.
269  */
270 static int atmci_req_show(struct seq_file *s, void *v)
271 {
272         struct atmel_mci_slot   *slot = s->private;
273         struct mmc_request      *mrq;
274         struct mmc_command      *cmd;
275         struct mmc_command      *stop;
276         struct mmc_data         *data;
277
278         /* Make sure we get a consistent snapshot */
279         spin_lock_bh(&slot->host->lock);
280         mrq = slot->mrq;
281
282         if (mrq) {
283                 cmd = mrq->cmd;
284                 data = mrq->data;
285                 stop = mrq->stop;
286
287                 if (cmd)
288                         seq_printf(s,
289                                 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
290                                 cmd->opcode, cmd->arg, cmd->flags,
291                                 cmd->resp[0], cmd->resp[1], cmd->resp[2],
292                                 cmd->resp[3], cmd->error);
293                 if (data)
294                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
295                                 data->bytes_xfered, data->blocks,
296                                 data->blksz, data->flags, data->error);
297                 if (stop)
298                         seq_printf(s,
299                                 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
300                                 stop->opcode, stop->arg, stop->flags,
301                                 stop->resp[0], stop->resp[1], stop->resp[2],
302                                 stop->resp[3], stop->error);
303         }
304
305         spin_unlock_bh(&slot->host->lock);
306
307         return 0;
308 }
309
310 static int atmci_req_open(struct inode *inode, struct file *file)
311 {
312         return single_open(file, atmci_req_show, inode->i_private);
313 }
314
315 static const struct file_operations atmci_req_fops = {
316         .owner          = THIS_MODULE,
317         .open           = atmci_req_open,
318         .read           = seq_read,
319         .llseek         = seq_lseek,
320         .release        = single_release,
321 };
322
323 static void atmci_show_status_reg(struct seq_file *s,
324                 const char *regname, u32 value)
325 {
326         static const char       *sr_bit[] = {
327                 [0]     = "CMDRDY",
328                 [1]     = "RXRDY",
329                 [2]     = "TXRDY",
330                 [3]     = "BLKE",
331                 [4]     = "DTIP",
332                 [5]     = "NOTBUSY",
333                 [6]     = "ENDRX",
334                 [7]     = "ENDTX",
335                 [8]     = "SDIOIRQA",
336                 [9]     = "SDIOIRQB",
337                 [12]    = "SDIOWAIT",
338                 [14]    = "RXBUFF",
339                 [15]    = "TXBUFE",
340                 [16]    = "RINDE",
341                 [17]    = "RDIRE",
342                 [18]    = "RCRCE",
343                 [19]    = "RENDE",
344                 [20]    = "RTOE",
345                 [21]    = "DCRCE",
346                 [22]    = "DTOE",
347                 [23]    = "CSTOE",
348                 [24]    = "BLKOVRE",
349                 [25]    = "DMADONE",
350                 [26]    = "FIFOEMPTY",
351                 [27]    = "XFRDONE",
352                 [30]    = "OVRE",
353                 [31]    = "UNRE",
354         };
355         unsigned int            i;
356
357         seq_printf(s, "%s:\t0x%08x", regname, value);
358         for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
359                 if (value & (1 << i)) {
360                         if (sr_bit[i])
361                                 seq_printf(s, " %s", sr_bit[i]);
362                         else
363                                 seq_puts(s, " UNKNOWN");
364                 }
365         }
366         seq_putc(s, '\n');
367 }
368
369 static int atmci_regs_show(struct seq_file *s, void *v)
370 {
371         struct atmel_mci        *host = s->private;
372         u32                     *buf;
373
374         buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
375         if (!buf)
376                 return -ENOMEM;
377
378         /*
379          * Grab a more or less consistent snapshot. Note that we're
380          * not disabling interrupts, so IMR and SR may not be
381          * consistent.
382          */
383         spin_lock_bh(&host->lock);
384         clk_enable(host->mck);
385         memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
386         clk_disable(host->mck);
387         spin_unlock_bh(&host->lock);
388
389         seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
390                         buf[ATMCI_MR / 4],
391                         buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
392                         buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "",
393                         buf[ATMCI_MR / 4] & 0xff);
394         seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
395         seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
396         seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
397         seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
398                         buf[ATMCI_BLKR / 4],
399                         buf[ATMCI_BLKR / 4] & 0xffff,
400                         (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
401         if (host->caps.has_cstor_reg)
402                 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
403
404         /* Don't read RSPR and RDR; it will consume the data there */
405
406         atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
407         atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
408
409         if (host->caps.has_dma) {
410                 u32 val;
411
412                 val = buf[ATMCI_DMA / 4];
413                 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
414                                 val, val & 3,
415                                 ((val >> 4) & 3) ?
416                                         1 << (((val >> 4) & 3) + 1) : 1,
417                                 val & ATMCI_DMAEN ? " DMAEN" : "");
418         }
419         if (host->caps.has_cfg_reg) {
420                 u32 val;
421
422                 val = buf[ATMCI_CFG / 4];
423                 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
424                                 val,
425                                 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
426                                 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
427                                 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
428                                 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
429         }
430
431         kfree(buf);
432
433         return 0;
434 }
435
436 static int atmci_regs_open(struct inode *inode, struct file *file)
437 {
438         return single_open(file, atmci_regs_show, inode->i_private);
439 }
440
441 static const struct file_operations atmci_regs_fops = {
442         .owner          = THIS_MODULE,
443         .open           = atmci_regs_open,
444         .read           = seq_read,
445         .llseek         = seq_lseek,
446         .release        = single_release,
447 };
448
449 static void atmci_init_debugfs(struct atmel_mci_slot *slot)
450 {
451         struct mmc_host         *mmc = slot->mmc;
452         struct atmel_mci        *host = slot->host;
453         struct dentry           *root;
454         struct dentry           *node;
455
456         root = mmc->debugfs_root;
457         if (!root)
458                 return;
459
460         node = debugfs_create_file("regs", S_IRUSR, root, host,
461                         &atmci_regs_fops);
462         if (IS_ERR(node))
463                 return;
464         if (!node)
465                 goto err;
466
467         node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
468         if (!node)
469                 goto err;
470
471         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
472         if (!node)
473                 goto err;
474
475         node = debugfs_create_x32("pending_events", S_IRUSR, root,
476                                      (u32 *)&host->pending_events);
477         if (!node)
478                 goto err;
479
480         node = debugfs_create_x32("completed_events", S_IRUSR, root,
481                                      (u32 *)&host->completed_events);
482         if (!node)
483                 goto err;
484
485         return;
486
487 err:
488         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
489 }
490
491 static inline unsigned int atmci_get_version(struct atmel_mci *host)
492 {
493         return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
494 }
495
496 static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
497                                         unsigned int ns)
498 {
499         /*
500          * It is easier here to use us instead of ns for the timeout,
501          * it prevents from overflows during calculation.
502          */
503         unsigned int us = DIV_ROUND_UP(ns, 1000);
504
505         /* Maximum clock frequency is host->bus_hz/2 */
506         return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
507 }
508
509 static void atmci_set_timeout(struct atmel_mci *host,
510                 struct atmel_mci_slot *slot, struct mmc_data *data)
511 {
512         static unsigned dtomul_to_shift[] = {
513                 0, 4, 7, 8, 10, 12, 16, 20
514         };
515         unsigned        timeout;
516         unsigned        dtocyc;
517         unsigned        dtomul;
518
519         timeout = atmci_ns_to_clocks(host, data->timeout_ns)
520                 + data->timeout_clks;
521
522         for (dtomul = 0; dtomul < 8; dtomul++) {
523                 unsigned shift = dtomul_to_shift[dtomul];
524                 dtocyc = (timeout + (1 << shift) - 1) >> shift;
525                 if (dtocyc < 15)
526                         break;
527         }
528
529         if (dtomul >= 8) {
530                 dtomul = 7;
531                 dtocyc = 15;
532         }
533
534         dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
535                         dtocyc << dtomul_to_shift[dtomul]);
536         atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
537 }
538
539 /*
540  * Return mask with command flags to be enabled for this command.
541  */
542 static u32 atmci_prepare_command(struct mmc_host *mmc,
543                                  struct mmc_command *cmd)
544 {
545         struct mmc_data *data;
546         u32             cmdr;
547
548         cmd->error = -EINPROGRESS;
549
550         cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
551
552         if (cmd->flags & MMC_RSP_PRESENT) {
553                 if (cmd->flags & MMC_RSP_136)
554                         cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
555                 else
556                         cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
557         }
558
559         /*
560          * This should really be MAXLAT_5 for CMD2 and ACMD41, but
561          * it's too difficult to determine whether this is an ACMD or
562          * not. Better make it 64.
563          */
564         cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
565
566         if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
567                 cmdr |= ATMCI_CMDR_OPDCMD;
568
569         data = cmd->data;
570         if (data) {
571                 cmdr |= ATMCI_CMDR_START_XFER;
572
573                 if (cmd->opcode == SD_IO_RW_EXTENDED) {
574                         cmdr |= ATMCI_CMDR_SDIO_BLOCK;
575                 } else {
576                         if (data->flags & MMC_DATA_STREAM)
577                                 cmdr |= ATMCI_CMDR_STREAM;
578                         else if (data->blocks > 1)
579                                 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
580                         else
581                                 cmdr |= ATMCI_CMDR_BLOCK;
582                 }
583
584                 if (data->flags & MMC_DATA_READ)
585                         cmdr |= ATMCI_CMDR_TRDIR_READ;
586         }
587
588         return cmdr;
589 }
590
591 static void atmci_send_command(struct atmel_mci *host,
592                 struct mmc_command *cmd, u32 cmd_flags)
593 {
594         WARN_ON(host->cmd);
595         host->cmd = cmd;
596
597         dev_vdbg(&host->pdev->dev,
598                         "start command: ARGR=0x%08x CMDR=0x%08x\n",
599                         cmd->arg, cmd_flags);
600
601         atmci_writel(host, ATMCI_ARGR, cmd->arg);
602         atmci_writel(host, ATMCI_CMDR, cmd_flags);
603 }
604
605 static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
606 {
607         atmci_send_command(host, data->stop, host->stop_cmdr);
608         atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
609 }
610
611 /*
612  * Configure given PDC buffer taking care of alignement issues.
613  * Update host->data_size and host->sg.
614  */
615 static void atmci_pdc_set_single_buf(struct atmel_mci *host,
616         enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
617 {
618         u32 pointer_reg, counter_reg;
619         unsigned int buf_size;
620
621         if (dir == XFER_RECEIVE) {
622                 pointer_reg = ATMEL_PDC_RPR;
623                 counter_reg = ATMEL_PDC_RCR;
624         } else {
625                 pointer_reg = ATMEL_PDC_TPR;
626                 counter_reg = ATMEL_PDC_TCR;
627         }
628
629         if (buf_nb == PDC_SECOND_BUF) {
630                 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
631                 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
632         }
633
634         if (!host->caps.has_rwproof) {
635                 buf_size = host->buf_size;
636                 atmci_writel(host, pointer_reg, host->buf_phys_addr);
637         } else {
638                 buf_size = sg_dma_len(host->sg);
639                 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
640         }
641
642         if (host->data_size <= buf_size) {
643                 if (host->data_size & 0x3) {
644                         /* If size is different from modulo 4, transfer bytes */
645                         atmci_writel(host, counter_reg, host->data_size);
646                         atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
647                 } else {
648                         /* Else transfer 32-bits words */
649                         atmci_writel(host, counter_reg, host->data_size / 4);
650                 }
651                 host->data_size = 0;
652         } else {
653                 /* We assume the size of a page is 32-bits aligned */
654                 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
655                 host->data_size -= sg_dma_len(host->sg);
656                 if (host->data_size)
657                         host->sg = sg_next(host->sg);
658         }
659 }
660
661 /*
662  * Configure PDC buffer according to the data size ie configuring one or two
663  * buffers. Don't use this function if you want to configure only the second
664  * buffer. In this case, use atmci_pdc_set_single_buf.
665  */
666 static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
667 {
668         atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
669         if (host->data_size)
670                 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
671 }
672
673 /*
674  * Unmap sg lists, called when transfer is finished.
675  */
676 static void atmci_pdc_cleanup(struct atmel_mci *host)
677 {
678         struct mmc_data         *data = host->data;
679
680         if (data)
681                 dma_unmap_sg(&host->pdev->dev,
682                                 data->sg, data->sg_len,
683                                 ((data->flags & MMC_DATA_WRITE)
684                                  ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
685 }
686
687 /*
688  * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after
689  * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY
690  * interrupt needed for both transfer directions.
691  */
692 static void atmci_pdc_complete(struct atmel_mci *host)
693 {
694         int transfer_size = host->data->blocks * host->data->blksz;
695
696         atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
697
698         if ((!host->caps.has_rwproof)
699             && (host->data->flags & MMC_DATA_READ))
700                 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
701                                     host->buffer, transfer_size);
702
703         atmci_pdc_cleanup(host);
704
705         /*
706          * If the card was removed, data will be NULL. No point trying
707          * to send the stop command or waiting for NBUSY in this case.
708          */
709         if (host->data) {
710                 atmci_set_pending(host, EVENT_XFER_COMPLETE);
711                 tasklet_schedule(&host->tasklet);
712         }
713 }
714
715 static void atmci_dma_cleanup(struct atmel_mci *host)
716 {
717         struct mmc_data                 *data = host->data;
718
719         if (data)
720                 dma_unmap_sg(host->dma.chan->device->dev,
721                                 data->sg, data->sg_len,
722                                 ((data->flags & MMC_DATA_WRITE)
723                                  ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
724 }
725
726 /*
727  * This function is called by the DMA driver from tasklet context.
728  */
729 static void atmci_dma_complete(void *arg)
730 {
731         struct atmel_mci        *host = arg;
732         struct mmc_data         *data = host->data;
733
734         dev_vdbg(&host->pdev->dev, "DMA complete\n");
735
736         if (host->caps.has_dma)
737                 /* Disable DMA hardware handshaking on MCI */
738                 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
739
740         atmci_dma_cleanup(host);
741
742         /*
743          * If the card was removed, data will be NULL. No point trying
744          * to send the stop command or waiting for NBUSY in this case.
745          */
746         if (data) {
747                 atmci_set_pending(host, EVENT_XFER_COMPLETE);
748                 tasklet_schedule(&host->tasklet);
749
750                 /*
751                  * Regardless of what the documentation says, we have
752                  * to wait for NOTBUSY even after block read
753                  * operations.
754                  *
755                  * When the DMA transfer is complete, the controller
756                  * may still be reading the CRC from the card, i.e.
757                  * the data transfer is still in progress and we
758                  * haven't seen all the potential error bits yet.
759                  *
760                  * The interrupt handler will schedule a different
761                  * tasklet to finish things up when the data transfer
762                  * is completely done.
763                  *
764                  * We may not complete the mmc request here anyway
765                  * because the mmc layer may call back and cause us to
766                  * violate the "don't submit new operations from the
767                  * completion callback" rule of the dma engine
768                  * framework.
769                  */
770                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
771         }
772 }
773
774 /*
775  * Returns a mask of interrupt flags to be enabled after the whole
776  * request has been prepared.
777  */
778 static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
779 {
780         u32 iflags;
781
782         data->error = -EINPROGRESS;
783
784         host->sg = data->sg;
785         host->data = data;
786         host->data_chan = NULL;
787
788         iflags = ATMCI_DATA_ERROR_FLAGS;
789
790         /*
791          * Errata: MMC data write operation with less than 12
792          * bytes is impossible.
793          *
794          * Errata: MCI Transmit Data Register (TDR) FIFO
795          * corruption when length is not multiple of 4.
796          */
797         if (data->blocks * data->blksz < 12
798                         || (data->blocks * data->blksz) & 3)
799                 host->need_reset = true;
800
801         host->pio_offset = 0;
802         if (data->flags & MMC_DATA_READ)
803                 iflags |= ATMCI_RXRDY;
804         else
805                 iflags |= ATMCI_TXRDY;
806
807         return iflags;
808 }
809
810 /*
811  * Set interrupt flags and set block length into the MCI mode register even
812  * if this value is also accessible in the MCI block register. It seems to be
813  * necessary before the High Speed MCI version. It also map sg and configure
814  * PDC registers.
815  */
816 static u32
817 atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
818 {
819         u32 iflags, tmp;
820         unsigned int sg_len;
821         enum dma_data_direction dir;
822
823         data->error = -EINPROGRESS;
824
825         host->data = data;
826         host->sg = data->sg;
827         iflags = ATMCI_DATA_ERROR_FLAGS;
828
829         /* Enable pdc mode */
830         atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
831
832         if (data->flags & MMC_DATA_READ) {
833                 dir = DMA_FROM_DEVICE;
834                 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
835         } else {
836                 dir = DMA_TO_DEVICE;
837                 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
838         }
839
840         /* Set BLKLEN */
841         tmp = atmci_readl(host, ATMCI_MR);
842         tmp &= 0x0000ffff;
843         tmp |= ATMCI_BLKLEN(data->blksz);
844         atmci_writel(host, ATMCI_MR, tmp);
845
846         /* Configure PDC */
847         host->data_size = data->blocks * data->blksz;
848         sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
849
850         if ((!host->caps.has_rwproof)
851             && (host->data->flags & MMC_DATA_WRITE))
852                 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
853                                   host->buffer, host->data_size);
854
855         if (host->data_size)
856                 atmci_pdc_set_both_buf(host,
857                         ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
858
859         return iflags;
860 }
861
862 static u32
863 atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
864 {
865         struct dma_chan                 *chan;
866         struct dma_async_tx_descriptor  *desc;
867         struct scatterlist              *sg;
868         unsigned int                    i;
869         enum dma_data_direction         direction;
870         enum dma_transfer_direction     slave_dirn;
871         unsigned int                    sglen;
872         u32 iflags;
873
874         data->error = -EINPROGRESS;
875
876         WARN_ON(host->data);
877         host->sg = NULL;
878         host->data = data;
879
880         iflags = ATMCI_DATA_ERROR_FLAGS;
881
882         /*
883          * We don't do DMA on "complex" transfers, i.e. with
884          * non-word-aligned buffers or lengths. Also, we don't bother
885          * with all the DMA setup overhead for short transfers.
886          */
887         if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
888                 return atmci_prepare_data(host, data);
889         if (data->blksz & 3)
890                 return atmci_prepare_data(host, data);
891
892         for_each_sg(data->sg, sg, data->sg_len, i) {
893                 if (sg->offset & 3 || sg->length & 3)
894                         return atmci_prepare_data(host, data);
895         }
896
897         /* If we don't have a channel, we can't do DMA */
898         chan = host->dma.chan;
899         if (chan)
900                 host->data_chan = chan;
901
902         if (!chan)
903                 return -ENODEV;
904
905         if (host->caps.has_dma)
906                 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
907
908         if (data->flags & MMC_DATA_READ) {
909                 direction = DMA_FROM_DEVICE;
910                 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
911         } else {
912                 direction = DMA_TO_DEVICE;
913                 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
914         }
915
916         sglen = dma_map_sg(chan->device->dev, data->sg,
917                         data->sg_len, direction);
918
919         dmaengine_slave_config(chan, &host->dma_conf);
920         desc = dmaengine_prep_slave_sg(chan,
921                         data->sg, sglen, slave_dirn,
922                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
923         if (!desc)
924                 goto unmap_exit;
925
926         host->dma.data_desc = desc;
927         desc->callback = atmci_dma_complete;
928         desc->callback_param = host;
929
930         return iflags;
931 unmap_exit:
932         dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
933         return -ENOMEM;
934 }
935
936 static void
937 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
938 {
939         return;
940 }
941
942 /*
943  * Start PDC according to transfer direction.
944  */
945 static void
946 atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
947 {
948         if (data->flags & MMC_DATA_READ)
949                 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
950         else
951                 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
952 }
953
954 static void
955 atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
956 {
957         struct dma_chan                 *chan = host->data_chan;
958         struct dma_async_tx_descriptor  *desc = host->dma.data_desc;
959
960         if (chan) {
961                 dmaengine_submit(desc);
962                 dma_async_issue_pending(chan);
963         }
964 }
965
966 static void atmci_stop_transfer(struct atmel_mci *host)
967 {
968         atmci_set_pending(host, EVENT_XFER_COMPLETE);
969         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
970 }
971
972 /*
973  * Stop data transfer because error(s) occured.
974  */
975 static void atmci_stop_transfer_pdc(struct atmel_mci *host)
976 {
977         atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
978 }
979
980 static void atmci_stop_transfer_dma(struct atmel_mci *host)
981 {
982         struct dma_chan *chan = host->data_chan;
983
984         if (chan) {
985                 dmaengine_terminate_all(chan);
986                 atmci_dma_cleanup(host);
987         } else {
988                 /* Data transfer was stopped by the interrupt handler */
989                 atmci_set_pending(host, EVENT_XFER_COMPLETE);
990                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
991         }
992 }
993
994 /*
995  * Start a request: prepare data if needed, prepare the command and activate
996  * interrupts.
997  */
998 static void atmci_start_request(struct atmel_mci *host,
999                 struct atmel_mci_slot *slot)
1000 {
1001         struct mmc_request      *mrq;
1002         struct mmc_command      *cmd;
1003         struct mmc_data         *data;
1004         u32                     iflags;
1005         u32                     cmdflags;
1006
1007         mrq = slot->mrq;
1008         host->cur_slot = slot;
1009         host->mrq = mrq;
1010
1011         host->pending_events = 0;
1012         host->completed_events = 0;
1013         host->cmd_status = 0;
1014         host->data_status = 0;
1015
1016         if (host->need_reset) {
1017                 iflags = atmci_readl(host, ATMCI_IMR);
1018                 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1019                 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1020                 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1021                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1022                 if (host->caps.has_cfg_reg)
1023                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1024                 atmci_writel(host, ATMCI_IER, iflags);
1025                 host->need_reset = false;
1026         }
1027         atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1028
1029         iflags = atmci_readl(host, ATMCI_IMR);
1030         if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1031                 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1032                                 iflags);
1033
1034         if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1035                 /* Send init sequence (74 clock cycles) */
1036                 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1037                 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1038                         cpu_relax();
1039         }
1040         iflags = 0;
1041         data = mrq->data;
1042         if (data) {
1043                 atmci_set_timeout(host, slot, data);
1044
1045                 /* Must set block count/size before sending command */
1046                 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1047                                 | ATMCI_BLKLEN(data->blksz));
1048                 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1049                         ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1050
1051                 iflags |= host->prepare_data(host, data);
1052         }
1053
1054         iflags |= ATMCI_CMDRDY;
1055         cmd = mrq->cmd;
1056         cmdflags = atmci_prepare_command(slot->mmc, cmd);
1057         atmci_send_command(host, cmd, cmdflags);
1058
1059         if (data)
1060                 host->submit_data(host, data);
1061
1062         if (mrq->stop) {
1063                 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1064                 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1065                 if (!(data->flags & MMC_DATA_WRITE))
1066                         host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1067                 if (data->flags & MMC_DATA_STREAM)
1068                         host->stop_cmdr |= ATMCI_CMDR_STREAM;
1069                 else
1070                         host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1071         }
1072
1073         /*
1074          * We could have enabled interrupts earlier, but I suspect
1075          * that would open up a nice can of interesting race
1076          * conditions (e.g. command and data complete, but stop not
1077          * prepared yet.)
1078          */
1079         atmci_writel(host, ATMCI_IER, iflags);
1080 }
1081
1082 static void atmci_queue_request(struct atmel_mci *host,
1083                 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1084 {
1085         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1086                         host->state);
1087
1088         spin_lock_bh(&host->lock);
1089         slot->mrq = mrq;
1090         if (host->state == STATE_IDLE) {
1091                 host->state = STATE_SENDING_CMD;
1092                 atmci_start_request(host, slot);
1093         } else {
1094                 list_add_tail(&slot->queue_node, &host->queue);
1095         }
1096         spin_unlock_bh(&host->lock);
1097 }
1098
1099 static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1100 {
1101         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1102         struct atmel_mci        *host = slot->host;
1103         struct mmc_data         *data;
1104
1105         WARN_ON(slot->mrq);
1106
1107         /*
1108          * We may "know" the card is gone even though there's still an
1109          * electrical connection. If so, we really need to communicate
1110          * this to the MMC core since there won't be any more
1111          * interrupts as the card is completely removed. Otherwise,
1112          * the MMC core might believe the card is still there even
1113          * though the card was just removed very slowly.
1114          */
1115         if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1116                 mrq->cmd->error = -ENOMEDIUM;
1117                 mmc_request_done(mmc, mrq);
1118                 return;
1119         }
1120
1121         /* We don't support multiple blocks of weird lengths. */
1122         data = mrq->data;
1123         if (data && data->blocks > 1 && data->blksz & 3) {
1124                 mrq->cmd->error = -EINVAL;
1125                 mmc_request_done(mmc, mrq);
1126         }
1127
1128         atmci_queue_request(host, slot, mrq);
1129 }
1130
1131 static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1132 {
1133         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1134         struct atmel_mci        *host = slot->host;
1135         unsigned int            i;
1136
1137         slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1138         switch (ios->bus_width) {
1139         case MMC_BUS_WIDTH_1:
1140                 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1141                 break;
1142         case MMC_BUS_WIDTH_4:
1143                 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1144                 break;
1145         }
1146
1147         if (ios->clock) {
1148                 unsigned int clock_min = ~0U;
1149                 u32 clkdiv;
1150
1151                 spin_lock_bh(&host->lock);
1152                 if (!host->mode_reg) {
1153                         clk_enable(host->mck);
1154                         atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1155                         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1156                         if (host->caps.has_cfg_reg)
1157                                 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1158                 }
1159
1160                 /*
1161                  * Use mirror of ios->clock to prevent race with mmc
1162                  * core ios update when finding the minimum.
1163                  */
1164                 slot->clock = ios->clock;
1165                 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1166                         if (host->slot[i] && host->slot[i]->clock
1167                                         && host->slot[i]->clock < clock_min)
1168                                 clock_min = host->slot[i]->clock;
1169                 }
1170
1171                 /* Calculate clock divider */
1172                 if (host->caps.has_odd_clk_div) {
1173                         clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1174                         if (clkdiv > 511) {
1175                                 dev_warn(&mmc->class_dev,
1176                                          "clock %u too slow; using %lu\n",
1177                                          clock_min, host->bus_hz / (511 + 2));
1178                                 clkdiv = 511;
1179                         }
1180                         host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1181                                          | ATMCI_MR_CLKODD(clkdiv & 1);
1182                 } else {
1183                         clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1184                         if (clkdiv > 255) {
1185                                 dev_warn(&mmc->class_dev,
1186                                          "clock %u too slow; using %lu\n",
1187                                          clock_min, host->bus_hz / (2 * 256));
1188                                 clkdiv = 255;
1189                         }
1190                         host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1191                 }
1192
1193                 /*
1194                  * WRPROOF and RDPROOF prevent overruns/underruns by
1195                  * stopping the clock when the FIFO is full/empty.
1196                  * This state is not expected to last for long.
1197                  */
1198                 if (host->caps.has_rwproof)
1199                         host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1200
1201                 if (host->caps.has_cfg_reg) {
1202                         /* setup High Speed mode in relation with card capacity */
1203                         if (ios->timing == MMC_TIMING_SD_HS)
1204                                 host->cfg_reg |= ATMCI_CFG_HSMODE;
1205                         else
1206                                 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1207                 }
1208
1209                 if (list_empty(&host->queue)) {
1210                         atmci_writel(host, ATMCI_MR, host->mode_reg);
1211                         if (host->caps.has_cfg_reg)
1212                                 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1213                 } else {
1214                         host->need_clock_update = true;
1215                 }
1216
1217                 spin_unlock_bh(&host->lock);
1218         } else {
1219                 bool any_slot_active = false;
1220
1221                 spin_lock_bh(&host->lock);
1222                 slot->clock = 0;
1223                 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1224                         if (host->slot[i] && host->slot[i]->clock) {
1225                                 any_slot_active = true;
1226                                 break;
1227                         }
1228                 }
1229                 if (!any_slot_active) {
1230                         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1231                         if (host->mode_reg) {
1232                                 atmci_readl(host, ATMCI_MR);
1233                                 clk_disable(host->mck);
1234                         }
1235                         host->mode_reg = 0;
1236                 }
1237                 spin_unlock_bh(&host->lock);
1238         }
1239
1240         switch (ios->power_mode) {
1241         case MMC_POWER_UP:
1242                 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1243                 break;
1244         default:
1245                 /*
1246                  * TODO: None of the currently available AVR32-based
1247                  * boards allow MMC power to be turned off. Implement
1248                  * power control when this can be tested properly.
1249                  *
1250                  * We also need to hook this into the clock management
1251                  * somehow so that newly inserted cards aren't
1252                  * subjected to a fast clock before we have a chance
1253                  * to figure out what the maximum rate is. Currently,
1254                  * there's no way to avoid this, and there never will
1255                  * be for boards that don't support power control.
1256                  */
1257                 break;
1258         }
1259 }
1260
1261 static int atmci_get_ro(struct mmc_host *mmc)
1262 {
1263         int                     read_only = -ENOSYS;
1264         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1265
1266         if (gpio_is_valid(slot->wp_pin)) {
1267                 read_only = gpio_get_value(slot->wp_pin);
1268                 dev_dbg(&mmc->class_dev, "card is %s\n",
1269                                 read_only ? "read-only" : "read-write");
1270         }
1271
1272         return read_only;
1273 }
1274
1275 static int atmci_get_cd(struct mmc_host *mmc)
1276 {
1277         int                     present = -ENOSYS;
1278         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1279
1280         if (gpio_is_valid(slot->detect_pin)) {
1281                 present = !(gpio_get_value(slot->detect_pin) ^
1282                             slot->detect_is_active_high);
1283                 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1284                                 present ? "" : "not ");
1285         }
1286
1287         return present;
1288 }
1289
1290 static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1291 {
1292         struct atmel_mci_slot   *slot = mmc_priv(mmc);
1293         struct atmel_mci        *host = slot->host;
1294
1295         if (enable)
1296                 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1297         else
1298                 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1299 }
1300
1301 static const struct mmc_host_ops atmci_ops = {
1302         .request        = atmci_request,
1303         .set_ios        = atmci_set_ios,
1304         .get_ro         = atmci_get_ro,
1305         .get_cd         = atmci_get_cd,
1306         .enable_sdio_irq = atmci_enable_sdio_irq,
1307 };
1308
1309 /* Called with host->lock held */
1310 static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1311         __releases(&host->lock)
1312         __acquires(&host->lock)
1313 {
1314         struct atmel_mci_slot   *slot = NULL;
1315         struct mmc_host         *prev_mmc = host->cur_slot->mmc;
1316
1317         WARN_ON(host->cmd || host->data);
1318
1319         /*
1320          * Update the MMC clock rate if necessary. This may be
1321          * necessary if set_ios() is called when a different slot is
1322          * busy transferring data.
1323          */
1324         if (host->need_clock_update) {
1325                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1326                 if (host->caps.has_cfg_reg)
1327                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1328         }
1329
1330         host->cur_slot->mrq = NULL;
1331         host->mrq = NULL;
1332         if (!list_empty(&host->queue)) {
1333                 slot = list_entry(host->queue.next,
1334                                 struct atmel_mci_slot, queue_node);
1335                 list_del(&slot->queue_node);
1336                 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1337                                 mmc_hostname(slot->mmc));
1338                 host->state = STATE_SENDING_CMD;
1339                 atmci_start_request(host, slot);
1340         } else {
1341                 dev_vdbg(&host->pdev->dev, "list empty\n");
1342                 host->state = STATE_IDLE;
1343         }
1344
1345         spin_unlock(&host->lock);
1346         mmc_request_done(prev_mmc, mrq);
1347         spin_lock(&host->lock);
1348 }
1349
1350 static void atmci_command_complete(struct atmel_mci *host,
1351                         struct mmc_command *cmd)
1352 {
1353         u32             status = host->cmd_status;
1354
1355         /* Read the response from the card (up to 16 bytes) */
1356         cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1357         cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1358         cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1359         cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1360
1361         if (status & ATMCI_RTOE)
1362                 cmd->error = -ETIMEDOUT;
1363         else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1364                 cmd->error = -EILSEQ;
1365         else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1366                 cmd->error = -EIO;
1367         else
1368                 cmd->error = 0;
1369 }
1370
1371 static void atmci_detect_change(unsigned long data)
1372 {
1373         struct atmel_mci_slot   *slot = (struct atmel_mci_slot *)data;
1374         bool                    present;
1375         bool                    present_old;
1376
1377         /*
1378          * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
1379          * freeing the interrupt. We must not re-enable the interrupt
1380          * if it has been freed, and if we're shutting down, it
1381          * doesn't really matter whether the card is present or not.
1382          */
1383         smp_rmb();
1384         if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1385                 return;
1386
1387         enable_irq(gpio_to_irq(slot->detect_pin));
1388         present = !(gpio_get_value(slot->detect_pin) ^
1389                     slot->detect_is_active_high);
1390         present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1391
1392         dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1393                         present, present_old);
1394
1395         if (present != present_old) {
1396                 struct atmel_mci        *host = slot->host;
1397                 struct mmc_request      *mrq;
1398
1399                 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1400                         present ? "inserted" : "removed");
1401
1402                 spin_lock(&host->lock);
1403
1404                 if (!present)
1405                         clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1406                 else
1407                         set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1408
1409                 /* Clean up queue if present */
1410                 mrq = slot->mrq;
1411                 if (mrq) {
1412                         if (mrq == host->mrq) {
1413                                 /*
1414                                  * Reset controller to terminate any ongoing
1415                                  * commands or data transfers.
1416                                  */
1417                                 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1418                                 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1419                                 atmci_writel(host, ATMCI_MR, host->mode_reg);
1420                                 if (host->caps.has_cfg_reg)
1421                                         atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1422
1423                                 host->data = NULL;
1424                                 host->cmd = NULL;
1425
1426                                 switch (host->state) {
1427                                 case STATE_IDLE:
1428                                         break;
1429                                 case STATE_SENDING_CMD:
1430                                         mrq->cmd->error = -ENOMEDIUM;
1431                                         if (mrq->data)
1432                                                 host->stop_transfer(host);
1433                                         break;
1434                                 case STATE_DATA_XFER:
1435                                         mrq->data->error = -ENOMEDIUM;
1436                                         host->stop_transfer(host);
1437                                         break;
1438                                 case STATE_WAITING_NOTBUSY:
1439                                         mrq->data->error = -ENOMEDIUM;
1440                                         break;
1441                                 case STATE_SENDING_STOP:
1442                                         mrq->stop->error = -ENOMEDIUM;
1443                                         break;
1444                                 case STATE_END_REQUEST:
1445                                         break;
1446                                 }
1447
1448                                 atmci_request_end(host, mrq);
1449                         } else {
1450                                 list_del(&slot->queue_node);
1451                                 mrq->cmd->error = -ENOMEDIUM;
1452                                 if (mrq->data)
1453                                         mrq->data->error = -ENOMEDIUM;
1454                                 if (mrq->stop)
1455                                         mrq->stop->error = -ENOMEDIUM;
1456
1457                                 spin_unlock(&host->lock);
1458                                 mmc_request_done(slot->mmc, mrq);
1459                                 spin_lock(&host->lock);
1460                         }
1461                 }
1462                 spin_unlock(&host->lock);
1463
1464                 mmc_detect_change(slot->mmc, 0);
1465         }
1466 }
1467
1468 static void atmci_tasklet_func(unsigned long priv)
1469 {
1470         struct atmel_mci        *host = (struct atmel_mci *)priv;
1471         struct mmc_request      *mrq = host->mrq;
1472         struct mmc_data         *data = host->data;
1473         enum atmel_mci_state    state = host->state;
1474         enum atmel_mci_state    prev_state;
1475         u32                     status;
1476
1477         spin_lock(&host->lock);
1478
1479         state = host->state;
1480
1481         dev_vdbg(&host->pdev->dev,
1482                 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1483                 state, host->pending_events, host->completed_events,
1484                 atmci_readl(host, ATMCI_IMR));
1485
1486         do {
1487                 prev_state = state;
1488
1489                 switch (state) {
1490                 case STATE_IDLE:
1491                         break;
1492
1493                 case STATE_SENDING_CMD:
1494                         /*
1495                          * Command has been sent, we are waiting for command
1496                          * ready. Then we have three next states possible:
1497                          * END_REQUEST by default, WAITING_NOTBUSY if it's a
1498                          * command needing it or DATA_XFER if there is data.
1499                          */
1500                         if (!atmci_test_and_clear_pending(host,
1501                                                 EVENT_CMD_RDY))
1502                                 break;
1503
1504                         host->cmd = NULL;
1505                         atmci_set_completed(host, EVENT_CMD_RDY);
1506                         atmci_command_complete(host, mrq->cmd);
1507                         if (mrq->data) {
1508                                 /*
1509                                  * If there is a command error don't start
1510                                  * data transfer.
1511                                  */
1512                                 if (mrq->cmd->error) {
1513                                         host->stop_transfer(host);
1514                                         host->data = NULL;
1515                                         atmci_writel(host, ATMCI_IDR,
1516                                                      ATMCI_TXRDY | ATMCI_RXRDY
1517                                                      | ATMCI_DATA_ERROR_FLAGS);
1518                                         state = STATE_END_REQUEST;
1519                                 } else
1520                                         state = STATE_DATA_XFER;
1521                         } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1522                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1523                                 state = STATE_WAITING_NOTBUSY;
1524                         } else
1525                                 state = STATE_END_REQUEST;
1526
1527                         break;
1528
1529                 case STATE_DATA_XFER:
1530                         if (atmci_test_and_clear_pending(host,
1531                                                 EVENT_DATA_ERROR)) {
1532                                 atmci_set_completed(host, EVENT_DATA_ERROR);
1533                                 state = STATE_END_REQUEST;
1534                                 break;
1535                         }
1536
1537                         /*
1538                          * A data transfer is in progress. The event expected
1539                          * to move to the next state depends of data transfer
1540                          * type (PDC or DMA). Once transfer done we can move
1541                          * to the next step which is WAITING_NOTBUSY in write
1542                          * case and directly SENDING_STOP in read case.
1543                          */
1544                         if (!atmci_test_and_clear_pending(host,
1545                                                 EVENT_XFER_COMPLETE))
1546                                 break;
1547
1548                         atmci_set_completed(host, EVENT_XFER_COMPLETE);
1549
1550                         if (host->data->flags & MMC_DATA_WRITE) {
1551                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1552                                 state = STATE_WAITING_NOTBUSY;
1553                         } else if (host->mrq->stop) {
1554                                 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1555                                 atmci_send_stop_cmd(host, data);
1556                                 state = STATE_SENDING_STOP;
1557                         } else {
1558                                 host->data = NULL;
1559                                 data->bytes_xfered = data->blocks * data->blksz;
1560                                 data->error = 0;
1561                                 state = STATE_END_REQUEST;
1562                         }
1563                         break;
1564
1565                 case STATE_WAITING_NOTBUSY:
1566                         /*
1567                          * We can be in the state for two reasons: a command
1568                          * requiring waiting not busy signal (stop command
1569                          * included) or a write operation. In the latest case,
1570                          * we need to send a stop command.
1571                          */
1572                         if (!atmci_test_and_clear_pending(host,
1573                                                 EVENT_NOTBUSY))
1574                                 break;
1575
1576                         atmci_set_completed(host, EVENT_NOTBUSY);
1577
1578                         if (host->data) {
1579                                 /*
1580                                  * For some commands such as CMD53, even if
1581                                  * there is data transfer, there is no stop
1582                                  * command to send.
1583                                  */
1584                                 if (host->mrq->stop) {
1585                                         atmci_writel(host, ATMCI_IER,
1586                                                      ATMCI_CMDRDY);
1587                                         atmci_send_stop_cmd(host, data);
1588                                         state = STATE_SENDING_STOP;
1589                                 } else {
1590                                         host->data = NULL;
1591                                         data->bytes_xfered = data->blocks
1592                                                              * data->blksz;
1593                                         data->error = 0;
1594                                         state = STATE_END_REQUEST;
1595                                 }
1596                         } else
1597                                 state = STATE_END_REQUEST;
1598                         break;
1599
1600                 case STATE_SENDING_STOP:
1601                         /*
1602                          * In this state, it is important to set host->data to
1603                          * NULL (which is tested in the waiting notbusy state)
1604                          * in order to go to the end request state instead of
1605                          * sending stop again.
1606                          */
1607                         if (!atmci_test_and_clear_pending(host,
1608                                                 EVENT_CMD_RDY))
1609                                 break;
1610
1611                         host->cmd = NULL;
1612                         host->data = NULL;
1613                         data->bytes_xfered = data->blocks * data->blksz;
1614                         data->error = 0;
1615                         atmci_command_complete(host, mrq->stop);
1616                         if (mrq->stop->error) {
1617                                 host->stop_transfer(host);
1618                                 atmci_writel(host, ATMCI_IDR,
1619                                              ATMCI_TXRDY | ATMCI_RXRDY
1620                                              | ATMCI_DATA_ERROR_FLAGS);
1621                                 state = STATE_END_REQUEST;
1622                         } else {
1623                                 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1624                                 state = STATE_WAITING_NOTBUSY;
1625                         }
1626                         break;
1627
1628                 case STATE_END_REQUEST:
1629                         atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1630                                            | ATMCI_DATA_ERROR_FLAGS);
1631                         status = host->data_status;
1632                         if (unlikely(status)) {
1633                                 host->stop_transfer(host);
1634                                 host->data = NULL;
1635                                 if (status & ATMCI_DTOE) {
1636                                         data->error = -ETIMEDOUT;
1637                                 } else if (status & ATMCI_DCRCE) {
1638                                         data->error = -EILSEQ;
1639                                 } else {
1640                                         data->error = -EIO;
1641                                 }
1642                         }
1643
1644                         atmci_request_end(host, host->mrq);
1645                         state = STATE_IDLE;
1646                         break;
1647                 }
1648         } while (state != prev_state);
1649
1650         host->state = state;
1651
1652         spin_unlock(&host->lock);
1653 }
1654
1655 static void atmci_read_data_pio(struct atmel_mci *host)
1656 {
1657         struct scatterlist      *sg = host->sg;
1658         void                    *buf = sg_virt(sg);
1659         unsigned int            offset = host->pio_offset;
1660         struct mmc_data         *data = host->data;
1661         u32                     value;
1662         u32                     status;
1663         unsigned int            nbytes = 0;
1664
1665         do {
1666                 value = atmci_readl(host, ATMCI_RDR);
1667                 if (likely(offset + 4 <= sg->length)) {
1668                         put_unaligned(value, (u32 *)(buf + offset));
1669
1670                         offset += 4;
1671                         nbytes += 4;
1672
1673                         if (offset == sg->length) {
1674                                 flush_dcache_page(sg_page(sg));
1675                                 host->sg = sg = sg_next(sg);
1676                                 if (!sg)
1677                                         goto done;
1678
1679                                 offset = 0;
1680                                 buf = sg_virt(sg);
1681                         }
1682                 } else {
1683                         unsigned int remaining = sg->length - offset;
1684                         memcpy(buf + offset, &value, remaining);
1685                         nbytes += remaining;
1686
1687                         flush_dcache_page(sg_page(sg));
1688                         host->sg = sg = sg_next(sg);
1689                         if (!sg)
1690                                 goto done;
1691
1692                         offset = 4 - remaining;
1693                         buf = sg_virt(sg);
1694                         memcpy(buf, (u8 *)&value + remaining, offset);
1695                         nbytes += offset;
1696                 }
1697
1698                 status = atmci_readl(host, ATMCI_SR);
1699                 if (status & ATMCI_DATA_ERROR_FLAGS) {
1700                         atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1701                                                 | ATMCI_DATA_ERROR_FLAGS));
1702                         host->data_status = status;
1703                         data->bytes_xfered += nbytes;
1704                         return;
1705                 }
1706         } while (status & ATMCI_RXRDY);
1707
1708         host->pio_offset = offset;
1709         data->bytes_xfered += nbytes;
1710
1711         return;
1712
1713 done:
1714         atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1715         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1716         data->bytes_xfered += nbytes;
1717         smp_wmb();
1718         atmci_set_pending(host, EVENT_XFER_COMPLETE);
1719 }
1720
1721 static void atmci_write_data_pio(struct atmel_mci *host)
1722 {
1723         struct scatterlist      *sg = host->sg;
1724         void                    *buf = sg_virt(sg);
1725         unsigned int            offset = host->pio_offset;
1726         struct mmc_data         *data = host->data;
1727         u32                     value;
1728         u32                     status;
1729         unsigned int            nbytes = 0;
1730
1731         do {
1732                 if (likely(offset + 4 <= sg->length)) {
1733                         value = get_unaligned((u32 *)(buf + offset));
1734                         atmci_writel(host, ATMCI_TDR, value);
1735
1736                         offset += 4;
1737                         nbytes += 4;
1738                         if (offset == sg->length) {
1739                                 host->sg = sg = sg_next(sg);
1740                                 if (!sg)
1741                                         goto done;
1742
1743                                 offset = 0;
1744                                 buf = sg_virt(sg);
1745                         }
1746                 } else {
1747                         unsigned int remaining = sg->length - offset;
1748
1749                         value = 0;
1750                         memcpy(&value, buf + offset, remaining);
1751                         nbytes += remaining;
1752
1753                         host->sg = sg = sg_next(sg);
1754                         if (!sg) {
1755                                 atmci_writel(host, ATMCI_TDR, value);
1756                                 goto done;
1757                         }
1758
1759                         offset = 4 - remaining;
1760                         buf = sg_virt(sg);
1761                         memcpy((u8 *)&value + remaining, buf, offset);
1762                         atmci_writel(host, ATMCI_TDR, value);
1763                         nbytes += offset;
1764                 }
1765
1766                 status = atmci_readl(host, ATMCI_SR);
1767                 if (status & ATMCI_DATA_ERROR_FLAGS) {
1768                         atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1769                                                 | ATMCI_DATA_ERROR_FLAGS));
1770                         host->data_status = status;
1771                         data->bytes_xfered += nbytes;
1772                         return;
1773                 }
1774         } while (status & ATMCI_TXRDY);
1775
1776         host->pio_offset = offset;
1777         data->bytes_xfered += nbytes;
1778
1779         return;
1780
1781 done:
1782         atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1783         atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1784         data->bytes_xfered += nbytes;
1785         smp_wmb();
1786         atmci_set_pending(host, EVENT_XFER_COMPLETE);
1787 }
1788
1789 static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1790 {
1791         int     i;
1792
1793         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1794                 struct atmel_mci_slot *slot = host->slot[i];
1795                 if (slot && (status & slot->sdio_irq)) {
1796                         mmc_signal_sdio_irq(slot->mmc);
1797                 }
1798         }
1799 }
1800
1801
1802 static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1803 {
1804         struct atmel_mci        *host = dev_id;
1805         u32                     status, mask, pending;
1806         unsigned int            pass_count = 0;
1807
1808         do {
1809                 status = atmci_readl(host, ATMCI_SR);
1810                 mask = atmci_readl(host, ATMCI_IMR);
1811                 pending = status & mask;
1812                 if (!pending)
1813                         break;
1814
1815                 if (pending & ATMCI_DATA_ERROR_FLAGS) {
1816                         atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
1817                                         | ATMCI_RXRDY | ATMCI_TXRDY
1818                                         | ATMCI_ENDRX | ATMCI_ENDTX
1819                                         | ATMCI_RXBUFF | ATMCI_TXBUFE);
1820
1821                         host->data_status = status;
1822                         smp_wmb();
1823                         atmci_set_pending(host, EVENT_DATA_ERROR);
1824                         tasklet_schedule(&host->tasklet);
1825                 }
1826
1827                 if (pending & ATMCI_TXBUFE) {
1828                         atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
1829                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1830                         /*
1831                          * We can receive this interruption before having configured
1832                          * the second pdc buffer, so we need to reconfigure first and
1833                          * second buffers again
1834                          */
1835                         if (host->data_size) {
1836                                 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
1837                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
1838                                 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
1839                         } else {
1840                                 atmci_pdc_complete(host);
1841                         }
1842                 } else if (pending & ATMCI_ENDTX) {
1843                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1844
1845                         if (host->data_size) {
1846                                 atmci_pdc_set_single_buf(host,
1847                                                 XFER_TRANSMIT, PDC_SECOND_BUF);
1848                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
1849                         }
1850                 }
1851
1852                 if (pending & ATMCI_RXBUFF) {
1853                         atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
1854                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1855                         /*
1856                          * We can receive this interruption before having configured
1857                          * the second pdc buffer, so we need to reconfigure first and
1858                          * second buffers again
1859                          */
1860                         if (host->data_size) {
1861                                 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
1862                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
1863                                 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
1864                         } else {
1865                                 atmci_pdc_complete(host);
1866                         }
1867                 } else if (pending & ATMCI_ENDRX) {
1868                         atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1869
1870                         if (host->data_size) {
1871                                 atmci_pdc_set_single_buf(host,
1872                                                 XFER_RECEIVE, PDC_SECOND_BUF);
1873                                 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
1874                         }
1875                 }
1876
1877                 /*
1878                  * First mci IPs, so mainly the ones having pdc, have some
1879                  * issues with the notbusy signal. You can't get it after
1880                  * data transmission if you have not sent a stop command.
1881                  * The appropriate workaround is to use the BLKE signal.
1882                  */
1883                 if (pending & ATMCI_BLKE) {
1884                         atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
1885                         smp_wmb();
1886                         atmci_set_pending(host, EVENT_NOTBUSY);
1887                         tasklet_schedule(&host->tasklet);
1888                 }
1889
1890                 if (pending & ATMCI_NOTBUSY) {
1891                         atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
1892                         smp_wmb();
1893                         atmci_set_pending(host, EVENT_NOTBUSY);
1894                         tasklet_schedule(&host->tasklet);
1895                 }
1896
1897                 if (pending & ATMCI_RXRDY)
1898                         atmci_read_data_pio(host);
1899                 if (pending & ATMCI_TXRDY)
1900                         atmci_write_data_pio(host);
1901
1902                 if (pending & ATMCI_CMDRDY) {
1903                         atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
1904                         host->cmd_status = status;
1905                         smp_wmb();
1906                         atmci_set_pending(host, EVENT_CMD_RDY);
1907                         tasklet_schedule(&host->tasklet);
1908                 }
1909
1910                 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1911                         atmci_sdio_interrupt(host, status);
1912
1913         } while (pass_count++ < 5);
1914
1915         return pass_count ? IRQ_HANDLED : IRQ_NONE;
1916 }
1917
1918 static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1919 {
1920         struct atmel_mci_slot   *slot = dev_id;
1921
1922         /*
1923          * Disable interrupts until the pin has stabilized and check
1924          * the state then. Use mod_timer() since we may be in the
1925          * middle of the timer routine when this interrupt triggers.
1926          */
1927         disable_irq_nosync(irq);
1928         mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
1929
1930         return IRQ_HANDLED;
1931 }
1932
1933 static int __init atmci_init_slot(struct atmel_mci *host,
1934                 struct mci_slot_pdata *slot_data, unsigned int id,
1935                 u32 sdc_reg, u32 sdio_irq)
1936 {
1937         struct mmc_host                 *mmc;
1938         struct atmel_mci_slot           *slot;
1939
1940         mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
1941         if (!mmc)
1942                 return -ENOMEM;
1943
1944         slot = mmc_priv(mmc);
1945         slot->mmc = mmc;
1946         slot->host = host;
1947         slot->detect_pin = slot_data->detect_pin;
1948         slot->wp_pin = slot_data->wp_pin;
1949         slot->detect_is_active_high = slot_data->detect_is_active_high;
1950         slot->sdc_reg = sdc_reg;
1951         slot->sdio_irq = sdio_irq;
1952
1953         mmc->ops = &atmci_ops;
1954         mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1955         mmc->f_max = host->bus_hz / 2;
1956         mmc->ocr_avail  = MMC_VDD_32_33 | MMC_VDD_33_34;
1957         if (sdio_irq)
1958                 mmc->caps |= MMC_CAP_SDIO_IRQ;
1959         if (host->caps.has_highspeed)
1960                 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1961         /*
1962          * Without the read/write proof capability, it is strongly suggested to
1963          * use only one bit for data to prevent fifo underruns and overruns
1964          * which will corrupt data.
1965          */
1966         if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
1967                 mmc->caps |= MMC_CAP_4_BIT_DATA;
1968
1969         if (atmci_get_version(host) < 0x200) {
1970                 mmc->max_segs = 256;
1971                 mmc->max_blk_size = 4095;
1972                 mmc->max_blk_count = 256;
1973                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1974                 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
1975         } else {
1976                 mmc->max_segs = 64;
1977                 mmc->max_req_size = 32768 * 512;
1978                 mmc->max_blk_size = 32768;
1979                 mmc->max_blk_count = 512;
1980         }
1981
1982         /* Assume card is present initially */
1983         set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1984         if (gpio_is_valid(slot->detect_pin)) {
1985                 if (gpio_request(slot->detect_pin, "mmc_detect")) {
1986                         dev_dbg(&mmc->class_dev, "no detect pin available\n");
1987                         slot->detect_pin = -EBUSY;
1988                 } else if (gpio_get_value(slot->detect_pin) ^
1989                                 slot->detect_is_active_high) {
1990                         clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1991                 }
1992         }
1993
1994         if (!gpio_is_valid(slot->detect_pin))
1995                 mmc->caps |= MMC_CAP_NEEDS_POLL;
1996
1997         if (gpio_is_valid(slot->wp_pin)) {
1998                 if (gpio_request(slot->wp_pin, "mmc_wp")) {
1999                         dev_dbg(&mmc->class_dev, "no WP pin available\n");
2000                         slot->wp_pin = -EBUSY;
2001                 }
2002         }
2003
2004         host->slot[id] = slot;
2005         mmc_add_host(mmc);
2006
2007         if (gpio_is_valid(slot->detect_pin)) {
2008                 int ret;
2009
2010                 setup_timer(&slot->detect_timer, atmci_detect_change,
2011                                 (unsigned long)slot);
2012
2013                 ret = request_irq(gpio_to_irq(slot->detect_pin),
2014                                 atmci_detect_interrupt,
2015                                 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2016                                 "mmc-detect", slot);
2017                 if (ret) {
2018                         dev_dbg(&mmc->class_dev,
2019                                 "could not request IRQ %d for detect pin\n",
2020                                 gpio_to_irq(slot->detect_pin));
2021                         gpio_free(slot->detect_pin);
2022                         slot->detect_pin = -EBUSY;
2023                 }
2024         }
2025
2026         atmci_init_debugfs(slot);
2027
2028         return 0;
2029 }
2030
2031 static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2032                 unsigned int id)
2033 {
2034         /* Debugfs stuff is cleaned up by mmc core */
2035
2036         set_bit(ATMCI_SHUTDOWN, &slot->flags);
2037         smp_wmb();
2038
2039         mmc_remove_host(slot->mmc);
2040
2041         if (gpio_is_valid(slot->detect_pin)) {
2042                 int pin = slot->detect_pin;
2043
2044                 free_irq(gpio_to_irq(pin), slot);
2045                 del_timer_sync(&slot->detect_timer);
2046                 gpio_free(pin);
2047         }
2048         if (gpio_is_valid(slot->wp_pin))
2049                 gpio_free(slot->wp_pin);
2050
2051         slot->host->slot[id] = NULL;
2052         mmc_free_host(slot->mmc);
2053 }
2054
2055 static bool atmci_filter(struct dma_chan *chan, void *slave)
2056 {
2057         struct mci_dma_data     *sl = slave;
2058
2059         if (sl && find_slave_dev(sl) == chan->device->dev) {
2060                 chan->private = slave_data_ptr(sl);
2061                 return true;
2062         } else {
2063                 return false;
2064         }
2065 }
2066
2067 static bool atmci_configure_dma(struct atmel_mci *host)
2068 {
2069         struct mci_platform_data        *pdata;
2070
2071         if (host == NULL)
2072                 return false;
2073
2074         pdata = host->pdev->dev.platform_data;
2075
2076         if (pdata && find_slave_dev(pdata->dma_slave)) {
2077                 dma_cap_mask_t mask;
2078
2079                 /* Try to grab a DMA channel */
2080                 dma_cap_zero(mask);
2081                 dma_cap_set(DMA_SLAVE, mask);
2082                 host->dma.chan =
2083                         dma_request_channel(mask, atmci_filter, pdata->dma_slave);
2084         }
2085         if (!host->dma.chan) {
2086                 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2087                 return false;
2088         } else {
2089                 dev_info(&host->pdev->dev,
2090                                         "using %s for DMA transfers\n",
2091                                         dma_chan_name(host->dma.chan));
2092
2093                 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2094                 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2095                 host->dma_conf.src_maxburst = 1;
2096                 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2097                 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2098                 host->dma_conf.dst_maxburst = 1;
2099                 host->dma_conf.device_fc = false;
2100                 return true;
2101         }
2102 }
2103
2104 /*
2105  * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
2106  * HSMCI provides DMA support and a new config register but no more supports
2107  * PDC.
2108  */
2109 static void __init atmci_get_cap(struct atmel_mci *host)
2110 {
2111         unsigned int version;
2112
2113         version = atmci_get_version(host);
2114         dev_info(&host->pdev->dev,
2115                         "version: 0x%x\n", version);
2116
2117         host->caps.has_dma = 0;
2118         host->caps.has_pdc = 1;
2119         host->caps.has_cfg_reg = 0;
2120         host->caps.has_cstor_reg = 0;
2121         host->caps.has_highspeed = 0;
2122         host->caps.has_rwproof = 0;
2123         host->caps.has_odd_clk_div = 0;
2124
2125         /* keep only major version number */
2126         switch (version & 0xf00) {
2127         case 0x500:
2128                 host->caps.has_odd_clk_div = 1;
2129         case 0x400:
2130         case 0x300:
2131 #ifdef CONFIG_AT_HDMAC
2132                 host->caps.has_dma = 1;
2133 #else
2134                 dev_info(&host->pdev->dev,
2135                         "has dma capability but dma engine is not selected, then use pio\n");
2136 #endif
2137                 host->caps.has_pdc = 0;
2138                 host->caps.has_cfg_reg = 1;
2139                 host->caps.has_cstor_reg = 1;
2140                 host->caps.has_highspeed = 1;
2141         case 0x200:
2142                 host->caps.has_rwproof = 1;
2143         case 0x100:
2144                 break;
2145         default:
2146                 host->caps.has_pdc = 0;
2147                 dev_warn(&host->pdev->dev,
2148                                 "Unmanaged mci version, set minimum capabilities\n");
2149                 break;
2150         }
2151 }
2152
2153 static int __init atmci_probe(struct platform_device *pdev)
2154 {
2155         struct mci_platform_data        *pdata;
2156         struct atmel_mci                *host;
2157         struct resource                 *regs;
2158         unsigned int                    nr_slots;
2159         int                             irq;
2160         int                             ret;
2161
2162         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2163         if (!regs)
2164                 return -ENXIO;
2165         pdata = pdev->dev.platform_data;
2166         if (!pdata)
2167                 return -ENXIO;
2168         irq = platform_get_irq(pdev, 0);
2169         if (irq < 0)
2170                 return irq;
2171
2172         host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
2173         if (!host)
2174                 return -ENOMEM;
2175
2176         host->pdev = pdev;
2177         spin_lock_init(&host->lock);
2178         INIT_LIST_HEAD(&host->queue);
2179
2180         host->mck = clk_get(&pdev->dev, "mci_clk");
2181         if (IS_ERR(host->mck)) {
2182                 ret = PTR_ERR(host->mck);
2183                 goto err_clk_get;
2184         }
2185
2186         ret = -ENOMEM;
2187         host->regs = ioremap(regs->start, resource_size(regs));
2188         if (!host->regs)
2189                 goto err_ioremap;
2190
2191         clk_enable(host->mck);
2192         atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2193         host->bus_hz = clk_get_rate(host->mck);
2194         clk_disable(host->mck);
2195
2196         host->mapbase = regs->start;
2197
2198         tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2199
2200         ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2201         if (ret)
2202                 goto err_request_irq;
2203
2204         /* Get MCI capabilities and set operations according to it */
2205         atmci_get_cap(host);
2206         if (host->caps.has_dma && atmci_configure_dma(host)) {
2207                 host->prepare_data = &atmci_prepare_data_dma;
2208                 host->submit_data = &atmci_submit_data_dma;
2209                 host->stop_transfer = &atmci_stop_transfer_dma;
2210         } else if (host->caps.has_pdc) {
2211                 dev_info(&pdev->dev, "using PDC\n");
2212                 host->prepare_data = &atmci_prepare_data_pdc;
2213                 host->submit_data = &atmci_submit_data_pdc;
2214                 host->stop_transfer = &atmci_stop_transfer_pdc;
2215         } else {
2216                 dev_info(&pdev->dev, "using PIO\n");
2217                 host->prepare_data = &atmci_prepare_data;
2218                 host->submit_data = &atmci_submit_data;
2219                 host->stop_transfer = &atmci_stop_transfer;
2220         }
2221
2222         platform_set_drvdata(pdev, host);
2223
2224         /* We need at least one slot to succeed */
2225         nr_slots = 0;
2226         ret = -ENODEV;
2227         if (pdata->slot[0].bus_width) {
2228                 ret = atmci_init_slot(host, &pdata->slot[0],
2229                                 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2230                 if (!ret) {
2231                         nr_slots++;
2232                         host->buf_size = host->slot[0]->mmc->max_req_size;
2233                 }
2234         }
2235         if (pdata->slot[1].bus_width) {
2236                 ret = atmci_init_slot(host, &pdata->slot[1],
2237                                 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2238                 if (!ret) {
2239                         nr_slots++;
2240                         if (host->slot[1]->mmc->max_req_size > host->buf_size)
2241                                 host->buf_size =
2242                                         host->slot[1]->mmc->max_req_size;
2243                 }
2244         }
2245
2246         if (!nr_slots) {
2247                 dev_err(&pdev->dev, "init failed: no slot defined\n");
2248                 goto err_init_slot;
2249         }
2250
2251         if (!host->caps.has_rwproof) {
2252                 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2253                                                   &host->buf_phys_addr,
2254                                                   GFP_KERNEL);
2255                 if (!host->buffer) {
2256                         ret = -ENOMEM;
2257                         dev_err(&pdev->dev, "buffer allocation failed\n");
2258                         goto err_init_slot;
2259                 }
2260         }
2261
2262         dev_info(&pdev->dev,
2263                         "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2264                         host->mapbase, irq, nr_slots);
2265
2266         return 0;
2267
2268 err_init_slot:
2269         if (host->dma.chan)
2270                 dma_release_channel(host->dma.chan);
2271         free_irq(irq, host);
2272 err_request_irq:
2273         iounmap(host->regs);
2274 err_ioremap:
2275         clk_put(host->mck);
2276 err_clk_get:
2277         kfree(host);
2278         return ret;
2279 }
2280
2281 static int __exit atmci_remove(struct platform_device *pdev)
2282 {
2283         struct atmel_mci        *host = platform_get_drvdata(pdev);
2284         unsigned int            i;
2285
2286         platform_set_drvdata(pdev, NULL);
2287
2288         if (host->buffer)
2289                 dma_free_coherent(&pdev->dev, host->buf_size,
2290                                   host->buffer, host->buf_phys_addr);
2291
2292         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2293                 if (host->slot[i])
2294                         atmci_cleanup_slot(host->slot[i], i);
2295         }
2296
2297         clk_enable(host->mck);
2298         atmci_writel(host, ATMCI_IDR, ~0UL);
2299         atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2300         atmci_readl(host, ATMCI_SR);
2301         clk_disable(host->mck);
2302
2303 #ifdef CONFIG_MMC_ATMELMCI_DMA
2304         if (host->dma.chan)
2305                 dma_release_channel(host->dma.chan);
2306 #endif
2307
2308         free_irq(platform_get_irq(pdev, 0), host);
2309         iounmap(host->regs);
2310
2311         clk_put(host->mck);
2312         kfree(host);
2313
2314         return 0;
2315 }
2316
2317 #ifdef CONFIG_PM
2318 static int atmci_suspend(struct device *dev)
2319 {
2320         struct atmel_mci *host = dev_get_drvdata(dev);
2321         int i;
2322
2323          for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2324                 struct atmel_mci_slot *slot = host->slot[i];
2325                 int ret;
2326
2327                 if (!slot)
2328                         continue;
2329                 ret = mmc_suspend_host(slot->mmc);
2330                 if (ret < 0) {
2331                         while (--i >= 0) {
2332                                 slot = host->slot[i];
2333                                 if (slot
2334                                 && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
2335                                         mmc_resume_host(host->slot[i]->mmc);
2336                                         clear_bit(ATMCI_SUSPENDED, &slot->flags);
2337                                 }
2338                         }
2339                         return ret;
2340                 } else {
2341                         set_bit(ATMCI_SUSPENDED, &slot->flags);
2342                 }
2343         }
2344
2345         return 0;
2346 }
2347
2348 static int atmci_resume(struct device *dev)
2349 {
2350         struct atmel_mci *host = dev_get_drvdata(dev);
2351         int i;
2352         int ret = 0;
2353
2354         for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2355                 struct atmel_mci_slot *slot = host->slot[i];
2356                 int err;
2357
2358                 slot = host->slot[i];
2359                 if (!slot)
2360                         continue;
2361                 if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
2362                         continue;
2363                 err = mmc_resume_host(slot->mmc);
2364                 if (err < 0)
2365                         ret = err;
2366                 else
2367                         clear_bit(ATMCI_SUSPENDED, &slot->flags);
2368         }
2369
2370         return ret;
2371 }
2372 static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
2373 #define ATMCI_PM_OPS    (&atmci_pm)
2374 #else
2375 #define ATMCI_PM_OPS    NULL
2376 #endif
2377
2378 static struct platform_driver atmci_driver = {
2379         .remove         = __exit_p(atmci_remove),
2380         .driver         = {
2381                 .name           = "atmel_mci",
2382                 .pm             = ATMCI_PM_OPS,
2383         },
2384 };
2385
2386 static int __init atmci_init(void)
2387 {
2388         return platform_driver_probe(&atmci_driver, atmci_probe);
2389 }
2390
2391 static void __exit atmci_exit(void)
2392 {
2393         platform_driver_unregister(&atmci_driver);
2394 }
2395
2396 late_initcall(atmci_init); /* try to load after dma driver when built-in */
2397 module_exit(atmci_exit);
2398
2399 MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2400 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2401 MODULE_LICENSE("GPL v2");