8e0836d39081b3ca1964b27ebed921a10b8ee24d
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / host / dw_mmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/dw_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
38 #include <linux/of.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
41
42 #include "dw_mmc.h"
43
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
47                                  SDMMC_INT_EBE)
48 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49                                  SDMMC_INT_RESP_ERR)
50 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
51                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS      1
53 #define DW_MCI_RECV_STATUS      2
54 #define DW_MCI_DMA_THRESHOLD    16
55
56 #define DW_MCI_FREQ_MAX 200000000       /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000          /* unit: HZ */
58
59 #ifdef CONFIG_MMC_DW_IDMAC
60 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63                                  SDMMC_IDMAC_INT_TI)
64
65 struct idmac_desc_64addr {
66         u32             des0;   /* Control Descriptor */
67
68         u32             des1;   /* Reserved */
69
70         u32             des2;   /*Buffer sizes */
71 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
72         ((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff))
73
74         u32             des3;   /* Reserved */
75
76         u32             des4;   /* Lower 32-bits of Buffer Address Pointer 1*/
77         u32             des5;   /* Upper 32-bits of Buffer Address Pointer 1*/
78
79         u32             des6;   /* Lower 32-bits of Next Descriptor Address */
80         u32             des7;   /* Upper 32-bits of Next Descriptor Address */
81 };
82
83 struct idmac_desc {
84         u32             des0;   /* Control Descriptor */
85 #define IDMAC_DES0_DIC  BIT(1)
86 #define IDMAC_DES0_LD   BIT(2)
87 #define IDMAC_DES0_FD   BIT(3)
88 #define IDMAC_DES0_CH   BIT(4)
89 #define IDMAC_DES0_ER   BIT(5)
90 #define IDMAC_DES0_CES  BIT(30)
91 #define IDMAC_DES0_OWN  BIT(31)
92
93         u32             des1;   /* Buffer sizes */
94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
95         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
96
97         u32             des2;   /* buffer 1 physical address */
98
99         u32             des3;   /* buffer 2 physical address */
100 };
101 #endif /* CONFIG_MMC_DW_IDMAC */
102
103 static bool dw_mci_reset(struct dw_mci *host);
104 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
105 static int dw_mci_card_busy(struct mmc_host *mmc);
106
107 #if defined(CONFIG_DEBUG_FS)
108 static int dw_mci_req_show(struct seq_file *s, void *v)
109 {
110         struct dw_mci_slot *slot = s->private;
111         struct mmc_request *mrq;
112         struct mmc_command *cmd;
113         struct mmc_command *stop;
114         struct mmc_data *data;
115
116         /* Make sure we get a consistent snapshot */
117         spin_lock_bh(&slot->host->lock);
118         mrq = slot->mrq;
119
120         if (mrq) {
121                 cmd = mrq->cmd;
122                 data = mrq->data;
123                 stop = mrq->stop;
124
125                 if (cmd)
126                         seq_printf(s,
127                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
128                                    cmd->opcode, cmd->arg, cmd->flags,
129                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
130                                    cmd->resp[2], cmd->error);
131                 if (data)
132                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
133                                    data->bytes_xfered, data->blocks,
134                                    data->blksz, data->flags, data->error);
135                 if (stop)
136                         seq_printf(s,
137                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138                                    stop->opcode, stop->arg, stop->flags,
139                                    stop->resp[0], stop->resp[1], stop->resp[2],
140                                    stop->resp[2], stop->error);
141         }
142
143         spin_unlock_bh(&slot->host->lock);
144
145         return 0;
146 }
147
148 static int dw_mci_req_open(struct inode *inode, struct file *file)
149 {
150         return single_open(file, dw_mci_req_show, inode->i_private);
151 }
152
153 static const struct file_operations dw_mci_req_fops = {
154         .owner          = THIS_MODULE,
155         .open           = dw_mci_req_open,
156         .read           = seq_read,
157         .llseek         = seq_lseek,
158         .release        = single_release,
159 };
160
161 static int dw_mci_regs_show(struct seq_file *s, void *v)
162 {
163         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
164         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
165         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
166         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
167         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
168         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
169
170         return 0;
171 }
172
173 static int dw_mci_regs_open(struct inode *inode, struct file *file)
174 {
175         return single_open(file, dw_mci_regs_show, inode->i_private);
176 }
177
178 static const struct file_operations dw_mci_regs_fops = {
179         .owner          = THIS_MODULE,
180         .open           = dw_mci_regs_open,
181         .read           = seq_read,
182         .llseek         = seq_lseek,
183         .release        = single_release,
184 };
185
186 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
187 {
188         struct mmc_host *mmc = slot->mmc;
189         struct dw_mci *host = slot->host;
190         struct dentry *root;
191         struct dentry *node;
192
193         root = mmc->debugfs_root;
194         if (!root)
195                 return;
196
197         node = debugfs_create_file("regs", S_IRUSR, root, host,
198                                    &dw_mci_regs_fops);
199         if (!node)
200                 goto err;
201
202         node = debugfs_create_file("req", S_IRUSR, root, slot,
203                                    &dw_mci_req_fops);
204         if (!node)
205                 goto err;
206
207         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
208         if (!node)
209                 goto err;
210
211         node = debugfs_create_x32("pending_events", S_IRUSR, root,
212                                   (u32 *)&host->pending_events);
213         if (!node)
214                 goto err;
215
216         node = debugfs_create_x32("completed_events", S_IRUSR, root,
217                                   (u32 *)&host->completed_events);
218         if (!node)
219                 goto err;
220
221         return;
222
223 err:
224         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
225 }
226 #endif /* defined(CONFIG_DEBUG_FS) */
227
228 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
229
230 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
231 {
232         struct mmc_data *data;
233         struct dw_mci_slot *slot = mmc_priv(mmc);
234         struct dw_mci *host = slot->host;
235         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
236         u32 cmdr;
237         cmd->error = -EINPROGRESS;
238
239         cmdr = cmd->opcode;
240
241         if (cmd->opcode == MMC_STOP_TRANSMISSION ||
242             cmd->opcode == MMC_GO_IDLE_STATE ||
243             cmd->opcode == MMC_GO_INACTIVE_STATE ||
244             (cmd->opcode == SD_IO_RW_DIRECT &&
245              ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
246                 cmdr |= SDMMC_CMD_STOP;
247         else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
248                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
249
250         if (cmd->opcode == SD_SWITCH_VOLTAGE) {
251                 u32 clk_en_a;
252
253                 /* Special bit makes CMD11 not die */
254                 cmdr |= SDMMC_CMD_VOLT_SWITCH;
255
256                 /* Change state to continue to handle CMD11 weirdness */
257                 WARN_ON(slot->host->state != STATE_SENDING_CMD);
258                 slot->host->state = STATE_SENDING_CMD11;
259
260                 /*
261                  * We need to disable low power mode (automatic clock stop)
262                  * while doing voltage switch so we don't confuse the card,
263                  * since stopping the clock is a specific part of the UHS
264                  * voltage change dance.
265                  *
266                  * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
267                  * unconditionally turned back on in dw_mci_setup_bus() if it's
268                  * ever called with a non-zero clock.  That shouldn't happen
269                  * until the voltage change is all done.
270                  */
271                 clk_en_a = mci_readl(host, CLKENA);
272                 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
273                 mci_writel(host, CLKENA, clk_en_a);
274                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
275                              SDMMC_CMD_PRV_DAT_WAIT, 0);
276         }
277
278         if (cmd->flags & MMC_RSP_PRESENT) {
279                 /* We expect a response, so set this bit */
280                 cmdr |= SDMMC_CMD_RESP_EXP;
281                 if (cmd->flags & MMC_RSP_136)
282                         cmdr |= SDMMC_CMD_RESP_LONG;
283         }
284
285         if (cmd->flags & MMC_RSP_CRC)
286                 cmdr |= SDMMC_CMD_RESP_CRC;
287
288         data = cmd->data;
289         if (data) {
290                 cmdr |= SDMMC_CMD_DAT_EXP;
291                 if (data->flags & MMC_DATA_STREAM)
292                         cmdr |= SDMMC_CMD_STRM_MODE;
293                 if (data->flags & MMC_DATA_WRITE)
294                         cmdr |= SDMMC_CMD_DAT_WR;
295         }
296
297         if (drv_data && drv_data->prepare_command)
298                 drv_data->prepare_command(slot->host, &cmdr);
299
300         return cmdr;
301 }
302
303 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
304 {
305         struct mmc_command *stop;
306         u32 cmdr;
307
308         if (!cmd->data)
309                 return 0;
310
311         stop = &host->stop_abort;
312         cmdr = cmd->opcode;
313         memset(stop, 0, sizeof(struct mmc_command));
314
315         if (cmdr == MMC_READ_SINGLE_BLOCK ||
316             cmdr == MMC_READ_MULTIPLE_BLOCK ||
317             cmdr == MMC_WRITE_BLOCK ||
318             cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
319             cmdr == MMC_SEND_TUNING_BLOCK ||
320             cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
321                 stop->opcode = MMC_STOP_TRANSMISSION;
322                 stop->arg = 0;
323                 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
324         } else if (cmdr == SD_IO_RW_EXTENDED) {
325                 stop->opcode = SD_IO_RW_DIRECT;
326                 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
327                              ((cmd->arg >> 28) & 0x7);
328                 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
329         } else {
330                 return 0;
331         }
332
333         cmdr = stop->opcode | SDMMC_CMD_STOP |
334                 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
335
336         return cmdr;
337 }
338
339 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
340 {
341         unsigned long timeout = jiffies + msecs_to_jiffies(500);
342
343         /*
344          * Databook says that before issuing a new data transfer command
345          * we need to check to see if the card is busy.  Data transfer commands
346          * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
347          *
348          * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
349          * expected.
350          */
351         if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
352             !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
353                 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) {
354                         if (time_after(jiffies, timeout)) {
355                                 /* Command will fail; we'll pass error then */
356                                 dev_err(host->dev, "Busy; trying anyway\n");
357                                 break;
358                         }
359                         udelay(10);
360                 }
361         }
362 }
363
364 static void dw_mci_start_command(struct dw_mci *host,
365                                  struct mmc_command *cmd, u32 cmd_flags)
366 {
367         host->cmd = cmd;
368         dev_vdbg(host->dev,
369                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
370                  cmd->arg, cmd_flags);
371
372         mci_writel(host, CMDARG, cmd->arg);
373         wmb();
374         dw_mci_wait_while_busy(host, cmd_flags);
375
376         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
377 }
378
379 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
380 {
381         struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
382         dw_mci_start_command(host, stop, host->stop_cmdr);
383 }
384
385 /* DMA interface functions */
386 static void dw_mci_stop_dma(struct dw_mci *host)
387 {
388         if (host->using_dma) {
389                 host->dma_ops->stop(host);
390                 host->dma_ops->cleanup(host);
391         }
392
393         /* Data transfer was stopped by the interrupt handler */
394         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
395 }
396
397 static int dw_mci_get_dma_dir(struct mmc_data *data)
398 {
399         if (data->flags & MMC_DATA_WRITE)
400                 return DMA_TO_DEVICE;
401         else
402                 return DMA_FROM_DEVICE;
403 }
404
405 #ifdef CONFIG_MMC_DW_IDMAC
406 static void dw_mci_dma_cleanup(struct dw_mci *host)
407 {
408         struct mmc_data *data = host->data;
409
410         if (data)
411                 if (!data->host_cookie)
412                         dma_unmap_sg(host->dev,
413                                      data->sg,
414                                      data->sg_len,
415                                      dw_mci_get_dma_dir(data));
416 }
417
418 static void dw_mci_idmac_reset(struct dw_mci *host)
419 {
420         u32 bmod = mci_readl(host, BMOD);
421         /* Software reset of DMA */
422         bmod |= SDMMC_IDMAC_SWRESET;
423         mci_writel(host, BMOD, bmod);
424 }
425
426 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
427 {
428         u32 temp;
429
430         /* Disable and reset the IDMAC interface */
431         temp = mci_readl(host, CTRL);
432         temp &= ~SDMMC_CTRL_USE_IDMAC;
433         temp |= SDMMC_CTRL_DMA_RESET;
434         mci_writel(host, CTRL, temp);
435
436         /* Stop the IDMAC running */
437         temp = mci_readl(host, BMOD);
438         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
439         temp |= SDMMC_IDMAC_SWRESET;
440         mci_writel(host, BMOD, temp);
441 }
442
443 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
444 {
445         struct mmc_data *data = host->data;
446
447         dev_vdbg(host->dev, "DMA complete\n");
448
449         host->dma_ops->cleanup(host);
450
451         /*
452          * If the card was removed, data will be NULL. No point in trying to
453          * send the stop command or waiting for NBUSY in this case.
454          */
455         if (data) {
456                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
457                 tasklet_schedule(&host->tasklet);
458         }
459 }
460
461 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
462                                     unsigned int sg_len)
463 {
464         int i;
465         if (host->dma_64bit_address == 1) {
466                 struct idmac_desc_64addr *desc = host->sg_cpu;
467
468                 for (i = 0; i < sg_len; i++, desc++) {
469                         unsigned int length = sg_dma_len(&data->sg[i]);
470                         u64 mem_addr = sg_dma_address(&data->sg[i]);
471
472                         /*
473                          * Set the OWN bit and disable interrupts for this
474                          * descriptor
475                          */
476                         desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
477                                                 IDMAC_DES0_CH;
478                         /* Buffer length */
479                         IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
480
481                         /* Physical address to DMA to/from */
482                         desc->des4 = mem_addr & 0xffffffff;
483                         desc->des5 = mem_addr >> 32;
484                 }
485
486                 /* Set first descriptor */
487                 desc = host->sg_cpu;
488                 desc->des0 |= IDMAC_DES0_FD;
489
490                 /* Set last descriptor */
491                 desc = host->sg_cpu + (i - 1) *
492                                 sizeof(struct idmac_desc_64addr);
493                 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
494                 desc->des0 |= IDMAC_DES0_LD;
495
496         } else {
497                 struct idmac_desc *desc = host->sg_cpu;
498
499                 for (i = 0; i < sg_len; i++, desc++) {
500                         unsigned int length = sg_dma_len(&data->sg[i]);
501                         u32 mem_addr = sg_dma_address(&data->sg[i]);
502
503                         /*
504                          * Set the OWN bit and disable interrupts for this
505                          * descriptor
506                          */
507                         desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
508                                                 IDMAC_DES0_CH;
509                         /* Buffer length */
510                         IDMAC_SET_BUFFER1_SIZE(desc, length);
511
512                         /* Physical address to DMA to/from */
513                         desc->des2 = mem_addr;
514                 }
515
516                 /* Set first descriptor */
517                 desc = host->sg_cpu;
518                 desc->des0 |= IDMAC_DES0_FD;
519
520                 /* Set last descriptor */
521                 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
522                 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
523                 desc->des0 |= IDMAC_DES0_LD;
524         }
525
526         wmb();
527 }
528
529 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
530 {
531         u32 temp;
532
533         dw_mci_translate_sglist(host, host->data, sg_len);
534
535         /* Make sure to reset DMA in case we did PIO before this */
536         dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
537         dw_mci_idmac_reset(host);
538
539         /* Select IDMAC interface */
540         temp = mci_readl(host, CTRL);
541         temp |= SDMMC_CTRL_USE_IDMAC;
542         mci_writel(host, CTRL, temp);
543
544         wmb();
545
546         /* Enable the IDMAC */
547         temp = mci_readl(host, BMOD);
548         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
549         mci_writel(host, BMOD, temp);
550
551         /* Start it running */
552         mci_writel(host, PLDMND, 1);
553 }
554
555 static int dw_mci_idmac_init(struct dw_mci *host)
556 {
557         int i;
558
559         if (host->dma_64bit_address == 1) {
560                 struct idmac_desc_64addr *p;
561                 /* Number of descriptors in the ring buffer */
562                 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr);
563
564                 /* Forward link the descriptor list */
565                 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
566                                                                 i++, p++) {
567                         p->des6 = (host->sg_dma +
568                                         (sizeof(struct idmac_desc_64addr) *
569                                                         (i + 1))) & 0xffffffff;
570
571                         p->des7 = (u64)(host->sg_dma +
572                                         (sizeof(struct idmac_desc_64addr) *
573                                                         (i + 1))) >> 32;
574                         /* Initialize reserved and buffer size fields to "0" */
575                         p->des1 = 0;
576                         p->des2 = 0;
577                         p->des3 = 0;
578                 }
579
580                 /* Set the last descriptor as the end-of-ring descriptor */
581                 p->des6 = host->sg_dma & 0xffffffff;
582                 p->des7 = (u64)host->sg_dma >> 32;
583                 p->des0 = IDMAC_DES0_ER;
584
585         } else {
586                 struct idmac_desc *p;
587                 /* Number of descriptors in the ring buffer */
588                 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
589
590                 /* Forward link the descriptor list */
591                 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
592                         p->des3 = host->sg_dma + (sizeof(struct idmac_desc) *
593                                                                 (i + 1));
594
595                 /* Set the last descriptor as the end-of-ring descriptor */
596                 p->des3 = host->sg_dma;
597                 p->des0 = IDMAC_DES0_ER;
598         }
599
600         dw_mci_idmac_reset(host);
601
602         if (host->dma_64bit_address == 1) {
603                 /* Mask out interrupts - get Tx & Rx complete only */
604                 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
605                 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
606                                 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
607
608                 /* Set the descriptor base address */
609                 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
610                 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
611
612         } else {
613                 /* Mask out interrupts - get Tx & Rx complete only */
614                 mci_writel(host, IDSTS, IDMAC_INT_CLR);
615                 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
616                                 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
617
618                 /* Set the descriptor base address */
619                 mci_writel(host, DBADDR, host->sg_dma);
620         }
621
622         return 0;
623 }
624
625 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
626         .init = dw_mci_idmac_init,
627         .start = dw_mci_idmac_start_dma,
628         .stop = dw_mci_idmac_stop_dma,
629         .complete = dw_mci_idmac_complete_dma,
630         .cleanup = dw_mci_dma_cleanup,
631 };
632 #endif /* CONFIG_MMC_DW_IDMAC */
633
634 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
635                                    struct mmc_data *data,
636                                    bool next)
637 {
638         struct scatterlist *sg;
639         unsigned int i, sg_len;
640
641         if (!next && data->host_cookie)
642                 return data->host_cookie;
643
644         /*
645          * We don't do DMA on "complex" transfers, i.e. with
646          * non-word-aligned buffers or lengths. Also, we don't bother
647          * with all the DMA setup overhead for short transfers.
648          */
649         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
650                 return -EINVAL;
651
652         if (data->blksz & 3)
653                 return -EINVAL;
654
655         for_each_sg(data->sg, sg, data->sg_len, i) {
656                 if (sg->offset & 3 || sg->length & 3)
657                         return -EINVAL;
658         }
659
660         sg_len = dma_map_sg(host->dev,
661                             data->sg,
662                             data->sg_len,
663                             dw_mci_get_dma_dir(data));
664         if (sg_len == 0)
665                 return -EINVAL;
666
667         if (next)
668                 data->host_cookie = sg_len;
669
670         return sg_len;
671 }
672
673 static void dw_mci_pre_req(struct mmc_host *mmc,
674                            struct mmc_request *mrq,
675                            bool is_first_req)
676 {
677         struct dw_mci_slot *slot = mmc_priv(mmc);
678         struct mmc_data *data = mrq->data;
679
680         if (!slot->host->use_dma || !data)
681                 return;
682
683         if (data->host_cookie) {
684                 data->host_cookie = 0;
685                 return;
686         }
687
688         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
689                 data->host_cookie = 0;
690 }
691
692 static void dw_mci_post_req(struct mmc_host *mmc,
693                             struct mmc_request *mrq,
694                             int err)
695 {
696         struct dw_mci_slot *slot = mmc_priv(mmc);
697         struct mmc_data *data = mrq->data;
698
699         if (!slot->host->use_dma || !data)
700                 return;
701
702         if (data->host_cookie)
703                 dma_unmap_sg(slot->host->dev,
704                              data->sg,
705                              data->sg_len,
706                              dw_mci_get_dma_dir(data));
707         data->host_cookie = 0;
708 }
709
710 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
711 {
712 #ifdef CONFIG_MMC_DW_IDMAC
713         unsigned int blksz = data->blksz;
714         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
715         u32 fifo_width = 1 << host->data_shift;
716         u32 blksz_depth = blksz / fifo_width, fifoth_val;
717         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
718         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
719
720         tx_wmark = (host->fifo_depth) / 2;
721         tx_wmark_invers = host->fifo_depth - tx_wmark;
722
723         /*
724          * MSIZE is '1',
725          * if blksz is not a multiple of the FIFO width
726          */
727         if (blksz % fifo_width) {
728                 msize = 0;
729                 rx_wmark = 1;
730                 goto done;
731         }
732
733         do {
734                 if (!((blksz_depth % mszs[idx]) ||
735                      (tx_wmark_invers % mszs[idx]))) {
736                         msize = idx;
737                         rx_wmark = mszs[idx] - 1;
738                         break;
739                 }
740         } while (--idx > 0);
741         /*
742          * If idx is '0', it won't be tried
743          * Thus, initial values are uesed
744          */
745 done:
746         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
747         mci_writel(host, FIFOTH, fifoth_val);
748 #endif
749 }
750
751 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
752 {
753         unsigned int blksz = data->blksz;
754         u32 blksz_depth, fifo_depth;
755         u16 thld_size;
756
757         WARN_ON(!(data->flags & MMC_DATA_READ));
758
759         /*
760          * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
761          * in the FIFO region, so we really shouldn't access it).
762          */
763         if (host->verid < DW_MMC_240A)
764                 return;
765
766         if (host->timing != MMC_TIMING_MMC_HS200 &&
767             host->timing != MMC_TIMING_MMC_HS400 &&
768             host->timing != MMC_TIMING_UHS_SDR104)
769                 goto disable;
770
771         blksz_depth = blksz / (1 << host->data_shift);
772         fifo_depth = host->fifo_depth;
773
774         if (blksz_depth > fifo_depth)
775                 goto disable;
776
777         /*
778          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
779          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
780          * Currently just choose blksz.
781          */
782         thld_size = blksz;
783         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
784         return;
785
786 disable:
787         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
788 }
789
790 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
791 {
792         unsigned long irqflags;
793         int sg_len;
794         u32 temp;
795
796         host->using_dma = 0;
797
798         /* If we don't have a channel, we can't do DMA */
799         if (!host->use_dma)
800                 return -ENODEV;
801
802         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
803         if (sg_len < 0) {
804                 host->dma_ops->stop(host);
805                 return sg_len;
806         }
807
808         host->using_dma = 1;
809
810         dev_vdbg(host->dev,
811                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
812                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
813                  sg_len);
814
815         /*
816          * Decide the MSIZE and RX/TX Watermark.
817          * If current block size is same with previous size,
818          * no need to update fifoth.
819          */
820         if (host->prev_blksz != data->blksz)
821                 dw_mci_adjust_fifoth(host, data);
822
823         /* Enable the DMA interface */
824         temp = mci_readl(host, CTRL);
825         temp |= SDMMC_CTRL_DMA_ENABLE;
826         mci_writel(host, CTRL, temp);
827
828         /* Disable RX/TX IRQs, let DMA handle it */
829         spin_lock_irqsave(&host->irq_lock, irqflags);
830         temp = mci_readl(host, INTMASK);
831         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
832         mci_writel(host, INTMASK, temp);
833         spin_unlock_irqrestore(&host->irq_lock, irqflags);
834
835         host->dma_ops->start(host, sg_len);
836
837         return 0;
838 }
839
840 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
841 {
842         unsigned long irqflags;
843         u32 temp;
844
845         data->error = -EINPROGRESS;
846
847         WARN_ON(host->data);
848         host->sg = NULL;
849         host->data = data;
850
851         if (data->flags & MMC_DATA_READ) {
852                 host->dir_status = DW_MCI_RECV_STATUS;
853                 dw_mci_ctrl_rd_thld(host, data);
854         } else {
855                 host->dir_status = DW_MCI_SEND_STATUS;
856         }
857
858         if (dw_mci_submit_data_dma(host, data)) {
859                 int flags = SG_MITER_ATOMIC;
860                 if (host->data->flags & MMC_DATA_READ)
861                         flags |= SG_MITER_TO_SG;
862                 else
863                         flags |= SG_MITER_FROM_SG;
864
865                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
866                 host->sg = data->sg;
867                 host->part_buf_start = 0;
868                 host->part_buf_count = 0;
869
870                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
871
872                 spin_lock_irqsave(&host->irq_lock, irqflags);
873                 temp = mci_readl(host, INTMASK);
874                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
875                 mci_writel(host, INTMASK, temp);
876                 spin_unlock_irqrestore(&host->irq_lock, irqflags);
877
878                 temp = mci_readl(host, CTRL);
879                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
880                 mci_writel(host, CTRL, temp);
881
882                 /*
883                  * Use the initial fifoth_val for PIO mode.
884                  * If next issued data may be transfered by DMA mode,
885                  * prev_blksz should be invalidated.
886                  */
887                 mci_writel(host, FIFOTH, host->fifoth_val);
888                 host->prev_blksz = 0;
889         } else {
890                 /*
891                  * Keep the current block size.
892                  * It will be used to decide whether to update
893                  * fifoth register next time.
894                  */
895                 host->prev_blksz = data->blksz;
896         }
897 }
898
899 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
900 {
901         struct dw_mci *host = slot->host;
902         unsigned long timeout = jiffies + msecs_to_jiffies(500);
903         unsigned int cmd_status = 0;
904
905         mci_writel(host, CMDARG, arg);
906         wmb();
907         dw_mci_wait_while_busy(host, cmd);
908         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
909
910         while (time_before(jiffies, timeout)) {
911                 cmd_status = mci_readl(host, CMD);
912                 if (!(cmd_status & SDMMC_CMD_START))
913                         return;
914         }
915         dev_err(&slot->mmc->class_dev,
916                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
917                 cmd, arg, cmd_status);
918 }
919
920 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
921 {
922         struct dw_mci *host = slot->host;
923         unsigned int clock = slot->clock;
924         u32 div;
925         u32 clk_en_a;
926         u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
927
928         /* We must continue to set bit 28 in CMD until the change is complete */
929         if (host->state == STATE_WAITING_CMD11_DONE)
930                 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
931
932         if (!clock) {
933                 mci_writel(host, CLKENA, 0);
934                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
935         } else if (clock != host->current_speed || force_clkinit) {
936                 div = host->bus_hz / clock;
937                 if (host->bus_hz % clock && host->bus_hz > clock)
938                         /*
939                          * move the + 1 after the divide to prevent
940                          * over-clocking the card.
941                          */
942                         div += 1;
943
944                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
945
946                 if ((clock << div) != slot->__clk_old || force_clkinit)
947                         dev_info(&slot->mmc->class_dev,
948                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
949                                  slot->id, host->bus_hz, clock,
950                                  div ? ((host->bus_hz / div) >> 1) :
951                                  host->bus_hz, div);
952
953                 /* disable clock */
954                 mci_writel(host, CLKENA, 0);
955                 mci_writel(host, CLKSRC, 0);
956
957                 /* inform CIU */
958                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
959
960                 /* set clock to desired speed */
961                 mci_writel(host, CLKDIV, div);
962
963                 /* inform CIU */
964                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
965
966                 /* enable clock; only low power if no SDIO */
967                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
968                 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
969                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
970                 mci_writel(host, CLKENA, clk_en_a);
971
972                 /* inform CIU */
973                 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
974
975                 /* keep the clock with reflecting clock dividor */
976                 slot->__clk_old = clock << div;
977         }
978
979         host->current_speed = clock;
980
981         /* Set the current slot bus width */
982         mci_writel(host, CTYPE, (slot->ctype << slot->id));
983 }
984
985 static void __dw_mci_start_request(struct dw_mci *host,
986                                    struct dw_mci_slot *slot,
987                                    struct mmc_command *cmd)
988 {
989         struct mmc_request *mrq;
990         struct mmc_data *data;
991         u32 cmdflags;
992
993         mrq = slot->mrq;
994
995         host->cur_slot = slot;
996         host->mrq = mrq;
997
998         host->pending_events = 0;
999         host->completed_events = 0;
1000         host->cmd_status = 0;
1001         host->data_status = 0;
1002         host->dir_status = 0;
1003
1004         data = cmd->data;
1005         if (data) {
1006                 mci_writel(host, TMOUT, 0xFFFFFFFF);
1007                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1008                 mci_writel(host, BLKSIZ, data->blksz);
1009         }
1010
1011         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1012
1013         /* this is the first command, send the initialization clock */
1014         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1015                 cmdflags |= SDMMC_CMD_INIT;
1016
1017         if (data) {
1018                 dw_mci_submit_data(host, data);
1019                 wmb();
1020         }
1021
1022         dw_mci_start_command(host, cmd, cmdflags);
1023
1024         if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1025                 /*
1026                  * Databook says to fail after 2ms w/ no response; give an
1027                  * extra jiffy just in case we're about to roll over.
1028                  */
1029                 mod_timer(&host->cmd11_timer,
1030                           jiffies + msecs_to_jiffies(2) + 1);
1031         }
1032
1033         if (mrq->stop)
1034                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1035         else
1036                 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1037 }
1038
1039 static void dw_mci_start_request(struct dw_mci *host,
1040                                  struct dw_mci_slot *slot)
1041 {
1042         struct mmc_request *mrq = slot->mrq;
1043         struct mmc_command *cmd;
1044
1045         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1046         __dw_mci_start_request(host, slot, cmd);
1047 }
1048
1049 /* must be called with host->lock held */
1050 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1051                                  struct mmc_request *mrq)
1052 {
1053         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1054                  host->state);
1055
1056         slot->mrq = mrq;
1057
1058         if (host->state == STATE_WAITING_CMD11_DONE) {
1059                 dev_warn(&slot->mmc->class_dev,
1060                          "Voltage change didn't complete\n");
1061                 /*
1062                  * this case isn't expected to happen, so we can
1063                  * either crash here or just try to continue on
1064                  * in the closest possible state
1065                  */
1066                 host->state = STATE_IDLE;
1067         }
1068
1069         if (host->state == STATE_IDLE) {
1070                 host->state = STATE_SENDING_CMD;
1071                 dw_mci_start_request(host, slot);
1072         } else {
1073                 list_add_tail(&slot->queue_node, &host->queue);
1074         }
1075 }
1076
1077 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1078 {
1079         struct dw_mci_slot *slot = mmc_priv(mmc);
1080         struct dw_mci *host = slot->host;
1081
1082         WARN_ON(slot->mrq);
1083
1084         /*
1085          * The check for card presence and queueing of the request must be
1086          * atomic, otherwise the card could be removed in between and the
1087          * request wouldn't fail until another card was inserted.
1088          */
1089         spin_lock_bh(&host->lock);
1090
1091         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1092                 spin_unlock_bh(&host->lock);
1093                 mrq->cmd->error = -ENOMEDIUM;
1094                 mmc_request_done(mmc, mrq);
1095                 return;
1096         }
1097
1098         dw_mci_queue_request(host, slot, mrq);
1099
1100         spin_unlock_bh(&host->lock);
1101 }
1102
1103 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1104 {
1105         struct dw_mci_slot *slot = mmc_priv(mmc);
1106         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1107         u32 regs;
1108         int ret;
1109
1110         switch (ios->bus_width) {
1111         case MMC_BUS_WIDTH_4:
1112                 slot->ctype = SDMMC_CTYPE_4BIT;
1113                 break;
1114         case MMC_BUS_WIDTH_8:
1115                 slot->ctype = SDMMC_CTYPE_8BIT;
1116                 break;
1117         default:
1118                 /* set default 1 bit mode */
1119                 slot->ctype = SDMMC_CTYPE_1BIT;
1120         }
1121
1122         regs = mci_readl(slot->host, UHS_REG);
1123
1124         /* DDR mode set */
1125         if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1126             ios->timing == MMC_TIMING_MMC_HS400)
1127                 regs |= ((0x1 << slot->id) << 16);
1128         else
1129                 regs &= ~((0x1 << slot->id) << 16);
1130
1131         mci_writel(slot->host, UHS_REG, regs);
1132         slot->host->timing = ios->timing;
1133
1134         /*
1135          * Use mirror of ios->clock to prevent race with mmc
1136          * core ios update when finding the minimum.
1137          */
1138         slot->clock = ios->clock;
1139
1140         if (drv_data && drv_data->set_ios)
1141                 drv_data->set_ios(slot->host, ios);
1142
1143         switch (ios->power_mode) {
1144         case MMC_POWER_UP:
1145                 if (!IS_ERR(mmc->supply.vmmc)) {
1146                         ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1147                                         ios->vdd);
1148                         if (ret) {
1149                                 dev_err(slot->host->dev,
1150                                         "failed to enable vmmc regulator\n");
1151                                 /*return, if failed turn on vmmc*/
1152                                 return;
1153                         }
1154                 }
1155                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1156                 regs = mci_readl(slot->host, PWREN);
1157                 regs |= (1 << slot->id);
1158                 mci_writel(slot->host, PWREN, regs);
1159                 break;
1160         case MMC_POWER_ON:
1161                 if (!slot->host->vqmmc_enabled) {
1162                         if (!IS_ERR(mmc->supply.vqmmc)) {
1163                                 ret = regulator_enable(mmc->supply.vqmmc);
1164                                 if (ret < 0)
1165                                         dev_err(slot->host->dev,
1166                                                 "failed to enable vqmmc\n");
1167                                 else
1168                                         slot->host->vqmmc_enabled = true;
1169
1170                         } else {
1171                                 /* Keep track so we don't reset again */
1172                                 slot->host->vqmmc_enabled = true;
1173                         }
1174
1175                         /* Reset our state machine after powering on */
1176                         dw_mci_ctrl_reset(slot->host,
1177                                           SDMMC_CTRL_ALL_RESET_FLAGS);
1178                 }
1179
1180                 /* Adjust clock / bus width after power is up */
1181                 dw_mci_setup_bus(slot, false);
1182
1183                 break;
1184         case MMC_POWER_OFF:
1185                 /* Turn clock off before power goes down */
1186                 dw_mci_setup_bus(slot, false);
1187
1188                 if (!IS_ERR(mmc->supply.vmmc))
1189                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1190
1191                 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1192                         regulator_disable(mmc->supply.vqmmc);
1193                 slot->host->vqmmc_enabled = false;
1194
1195                 regs = mci_readl(slot->host, PWREN);
1196                 regs &= ~(1 << slot->id);
1197                 mci_writel(slot->host, PWREN, regs);
1198                 break;
1199         default:
1200                 break;
1201         }
1202
1203         if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1204                 slot->host->state = STATE_IDLE;
1205 }
1206
1207 static int dw_mci_card_busy(struct mmc_host *mmc)
1208 {
1209         struct dw_mci_slot *slot = mmc_priv(mmc);
1210         u32 status;
1211
1212         /*
1213          * Check the busy bit which is low when DAT[3:0]
1214          * (the data lines) are 0000
1215          */
1216         status = mci_readl(slot->host, STATUS);
1217
1218         return !!(status & SDMMC_STATUS_BUSY);
1219 }
1220
1221 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1222 {
1223         struct dw_mci_slot *slot = mmc_priv(mmc);
1224         struct dw_mci *host = slot->host;
1225         u32 uhs;
1226         u32 v18 = SDMMC_UHS_18V << slot->id;
1227         int min_uv, max_uv;
1228         int ret;
1229
1230         /*
1231          * Program the voltage.  Note that some instances of dw_mmc may use
1232          * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1233          * does no harm but you need to set the regulator directly.  Try both.
1234          */
1235         uhs = mci_readl(host, UHS_REG);
1236         if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1237                 min_uv = 2700000;
1238                 max_uv = 3600000;
1239                 uhs &= ~v18;
1240         } else {
1241                 min_uv = 1700000;
1242                 max_uv = 1950000;
1243                 uhs |= v18;
1244         }
1245         if (!IS_ERR(mmc->supply.vqmmc)) {
1246                 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1247
1248                 if (ret) {
1249                         dev_dbg(&mmc->class_dev,
1250                                          "Regulator set error %d: %d - %d\n",
1251                                          ret, min_uv, max_uv);
1252                         return ret;
1253                 }
1254         }
1255         mci_writel(host, UHS_REG, uhs);
1256
1257         return 0;
1258 }
1259
1260 static int dw_mci_get_ro(struct mmc_host *mmc)
1261 {
1262         int read_only;
1263         struct dw_mci_slot *slot = mmc_priv(mmc);
1264         int gpio_ro = mmc_gpio_get_ro(mmc);
1265
1266         /* Use platform get_ro function, else try on board write protect */
1267         if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1268                         (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1269                 read_only = 0;
1270         else if (!IS_ERR_VALUE(gpio_ro))
1271                 read_only = gpio_ro;
1272         else
1273                 read_only =
1274                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1275
1276         dev_dbg(&mmc->class_dev, "card is %s\n",
1277                 read_only ? "read-only" : "read-write");
1278
1279         return read_only;
1280 }
1281
1282 static int dw_mci_get_cd(struct mmc_host *mmc)
1283 {
1284         int present;
1285         struct dw_mci_slot *slot = mmc_priv(mmc);
1286         struct dw_mci_board *brd = slot->host->pdata;
1287         struct dw_mci *host = slot->host;
1288         int gpio_cd = mmc_gpio_get_cd(mmc);
1289
1290         /* Use platform get_cd function, else try onboard card detect */
1291         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1292                 present = 1;
1293         else if (!IS_ERR_VALUE(gpio_cd))
1294                 present = gpio_cd;
1295         else
1296                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1297                         == 0 ? 1 : 0;
1298
1299         spin_lock_bh(&host->lock);
1300         if (present) {
1301                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1302                 dev_dbg(&mmc->class_dev, "card is present\n");
1303         } else {
1304                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1305                 dev_dbg(&mmc->class_dev, "card is not present\n");
1306         }
1307         spin_unlock_bh(&host->lock);
1308
1309         return present;
1310 }
1311
1312 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1313 {
1314         struct dw_mci_slot *slot = mmc_priv(mmc);
1315         struct dw_mci *host = slot->host;
1316
1317         /*
1318          * Low power mode will stop the card clock when idle.  According to the
1319          * description of the CLKENA register we should disable low power mode
1320          * for SDIO cards if we need SDIO interrupts to work.
1321          */
1322         if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1323                 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1324                 u32 clk_en_a_old;
1325                 u32 clk_en_a;
1326
1327                 clk_en_a_old = mci_readl(host, CLKENA);
1328
1329                 if (card->type == MMC_TYPE_SDIO ||
1330                     card->type == MMC_TYPE_SD_COMBO) {
1331                         set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1332                         clk_en_a = clk_en_a_old & ~clken_low_pwr;
1333                 } else {
1334                         clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1335                         clk_en_a = clk_en_a_old | clken_low_pwr;
1336                 }
1337
1338                 if (clk_en_a != clk_en_a_old) {
1339                         mci_writel(host, CLKENA, clk_en_a);
1340                         mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1341                                      SDMMC_CMD_PRV_DAT_WAIT, 0);
1342                 }
1343         }
1344 }
1345
1346 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1347 {
1348         struct dw_mci_slot *slot = mmc_priv(mmc);
1349         struct dw_mci *host = slot->host;
1350         unsigned long irqflags;
1351         u32 int_mask;
1352
1353         spin_lock_irqsave(&host->irq_lock, irqflags);
1354
1355         /* Enable/disable Slot Specific SDIO interrupt */
1356         int_mask = mci_readl(host, INTMASK);
1357         if (enb)
1358                 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1359         else
1360                 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1361         mci_writel(host, INTMASK, int_mask);
1362
1363         spin_unlock_irqrestore(&host->irq_lock, irqflags);
1364 }
1365
1366 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1367 {
1368         struct dw_mci_slot *slot = mmc_priv(mmc);
1369         struct dw_mci *host = slot->host;
1370         const struct dw_mci_drv_data *drv_data = host->drv_data;
1371         int err = -ENOSYS;
1372
1373         if (drv_data && drv_data->execute_tuning)
1374                 err = drv_data->execute_tuning(slot);
1375         return err;
1376 }
1377
1378 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1379 {
1380         struct dw_mci_slot *slot = mmc_priv(mmc);
1381         struct dw_mci *host = slot->host;
1382         const struct dw_mci_drv_data *drv_data = host->drv_data;
1383
1384         if (drv_data && drv_data->prepare_hs400_tuning)
1385                 return drv_data->prepare_hs400_tuning(host, ios);
1386
1387         return 0;
1388 }
1389
1390 static const struct mmc_host_ops dw_mci_ops = {
1391         .request                = dw_mci_request,
1392         .pre_req                = dw_mci_pre_req,
1393         .post_req               = dw_mci_post_req,
1394         .set_ios                = dw_mci_set_ios,
1395         .get_ro                 = dw_mci_get_ro,
1396         .get_cd                 = dw_mci_get_cd,
1397         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1398         .execute_tuning         = dw_mci_execute_tuning,
1399         .card_busy              = dw_mci_card_busy,
1400         .start_signal_voltage_switch = dw_mci_switch_voltage,
1401         .init_card              = dw_mci_init_card,
1402         .prepare_hs400_tuning   = dw_mci_prepare_hs400_tuning,
1403 };
1404
1405 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1406         __releases(&host->lock)
1407         __acquires(&host->lock)
1408 {
1409         struct dw_mci_slot *slot;
1410         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1411
1412         WARN_ON(host->cmd || host->data);
1413
1414         host->cur_slot->mrq = NULL;
1415         host->mrq = NULL;
1416         if (!list_empty(&host->queue)) {
1417                 slot = list_entry(host->queue.next,
1418                                   struct dw_mci_slot, queue_node);
1419                 list_del(&slot->queue_node);
1420                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1421                          mmc_hostname(slot->mmc));
1422                 host->state = STATE_SENDING_CMD;
1423                 dw_mci_start_request(host, slot);
1424         } else {
1425                 dev_vdbg(host->dev, "list empty\n");
1426
1427                 if (host->state == STATE_SENDING_CMD11)
1428                         host->state = STATE_WAITING_CMD11_DONE;
1429                 else
1430                         host->state = STATE_IDLE;
1431         }
1432
1433         spin_unlock(&host->lock);
1434         mmc_request_done(prev_mmc, mrq);
1435         spin_lock(&host->lock);
1436 }
1437
1438 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1439 {
1440         u32 status = host->cmd_status;
1441
1442         host->cmd_status = 0;
1443
1444         /* Read the response from the card (up to 16 bytes) */
1445         if (cmd->flags & MMC_RSP_PRESENT) {
1446                 if (cmd->flags & MMC_RSP_136) {
1447                         cmd->resp[3] = mci_readl(host, RESP0);
1448                         cmd->resp[2] = mci_readl(host, RESP1);
1449                         cmd->resp[1] = mci_readl(host, RESP2);
1450                         cmd->resp[0] = mci_readl(host, RESP3);
1451                 } else {
1452                         cmd->resp[0] = mci_readl(host, RESP0);
1453                         cmd->resp[1] = 0;
1454                         cmd->resp[2] = 0;
1455                         cmd->resp[3] = 0;
1456                 }
1457         }
1458
1459         if (status & SDMMC_INT_RTO)
1460                 cmd->error = -ETIMEDOUT;
1461         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1462                 cmd->error = -EILSEQ;
1463         else if (status & SDMMC_INT_RESP_ERR)
1464                 cmd->error = -EIO;
1465         else
1466                 cmd->error = 0;
1467
1468         if (cmd->error) {
1469                 /* newer ip versions need a delay between retries */
1470                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1471                         mdelay(20);
1472         }
1473
1474         return cmd->error;
1475 }
1476
1477 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1478 {
1479         u32 status = host->data_status;
1480
1481         if (status & DW_MCI_DATA_ERROR_FLAGS) {
1482                 if (status & SDMMC_INT_DRTO) {
1483                         data->error = -ETIMEDOUT;
1484                 } else if (status & SDMMC_INT_DCRC) {
1485                         data->error = -EILSEQ;
1486                 } else if (status & SDMMC_INT_EBE) {
1487                         if (host->dir_status ==
1488                                 DW_MCI_SEND_STATUS) {
1489                                 /*
1490                                  * No data CRC status was returned.
1491                                  * The number of bytes transferred
1492                                  * will be exaggerated in PIO mode.
1493                                  */
1494                                 data->bytes_xfered = 0;
1495                                 data->error = -ETIMEDOUT;
1496                         } else if (host->dir_status ==
1497                                         DW_MCI_RECV_STATUS) {
1498                                 data->error = -EIO;
1499                         }
1500                 } else {
1501                         /* SDMMC_INT_SBE is included */
1502                         data->error = -EIO;
1503                 }
1504
1505                 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1506
1507                 /*
1508                  * After an error, there may be data lingering
1509                  * in the FIFO
1510                  */
1511                 dw_mci_reset(host);
1512         } else {
1513                 data->bytes_xfered = data->blocks * data->blksz;
1514                 data->error = 0;
1515         }
1516
1517         return data->error;
1518 }
1519
1520 static void dw_mci_tasklet_func(unsigned long priv)
1521 {
1522         struct dw_mci *host = (struct dw_mci *)priv;
1523         struct mmc_data *data;
1524         struct mmc_command *cmd;
1525         struct mmc_request *mrq;
1526         enum dw_mci_state state;
1527         enum dw_mci_state prev_state;
1528         unsigned int err;
1529
1530         spin_lock(&host->lock);
1531
1532         state = host->state;
1533         data = host->data;
1534         mrq = host->mrq;
1535
1536         do {
1537                 prev_state = state;
1538
1539                 switch (state) {
1540                 case STATE_IDLE:
1541                 case STATE_WAITING_CMD11_DONE:
1542                         break;
1543
1544                 case STATE_SENDING_CMD11:
1545                 case STATE_SENDING_CMD:
1546                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1547                                                 &host->pending_events))
1548                                 break;
1549
1550                         cmd = host->cmd;
1551                         host->cmd = NULL;
1552                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1553                         err = dw_mci_command_complete(host, cmd);
1554                         if (cmd == mrq->sbc && !err) {
1555                                 prev_state = state = STATE_SENDING_CMD;
1556                                 __dw_mci_start_request(host, host->cur_slot,
1557                                                        mrq->cmd);
1558                                 goto unlock;
1559                         }
1560
1561                         if (cmd->data && err) {
1562                                 dw_mci_stop_dma(host);
1563                                 send_stop_abort(host, data);
1564                                 state = STATE_SENDING_STOP;
1565                                 break;
1566                         }
1567
1568                         if (!cmd->data || err) {
1569                                 dw_mci_request_end(host, mrq);
1570                                 goto unlock;
1571                         }
1572
1573                         prev_state = state = STATE_SENDING_DATA;
1574                         /* fall through */
1575
1576                 case STATE_SENDING_DATA:
1577                         /*
1578                          * We could get a data error and never a transfer
1579                          * complete so we'd better check for it here.
1580                          *
1581                          * Note that we don't really care if we also got a
1582                          * transfer complete; stopping the DMA and sending an
1583                          * abort won't hurt.
1584                          */
1585                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1586                                                &host->pending_events)) {
1587                                 dw_mci_stop_dma(host);
1588                                 if (data->stop ||
1589                                     !(host->data_status & (SDMMC_INT_DRTO |
1590                                                            SDMMC_INT_EBE)))
1591                                         send_stop_abort(host, data);
1592                                 state = STATE_DATA_ERROR;
1593                                 break;
1594                         }
1595
1596                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1597                                                 &host->pending_events))
1598                                 break;
1599
1600                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1601
1602                         /*
1603                          * Handle an EVENT_DATA_ERROR that might have shown up
1604                          * before the transfer completed.  This might not have
1605                          * been caught by the check above because the interrupt
1606                          * could have gone off between the previous check and
1607                          * the check for transfer complete.
1608                          *
1609                          * Technically this ought not be needed assuming we
1610                          * get a DATA_COMPLETE eventually (we'll notice the
1611                          * error and end the request), but it shouldn't hurt.
1612                          *
1613                          * This has the advantage of sending the stop command.
1614                          */
1615                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1616                                                &host->pending_events)) {
1617                                 dw_mci_stop_dma(host);
1618                                 if (data->stop ||
1619                                     !(host->data_status & (SDMMC_INT_DRTO |
1620                                                            SDMMC_INT_EBE)))
1621                                         send_stop_abort(host, data);
1622                                 state = STATE_DATA_ERROR;
1623                                 break;
1624                         }
1625                         prev_state = state = STATE_DATA_BUSY;
1626
1627                         /* fall through */
1628
1629                 case STATE_DATA_BUSY:
1630                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1631                                                 &host->pending_events))
1632                                 break;
1633
1634                         host->data = NULL;
1635                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1636                         err = dw_mci_data_complete(host, data);
1637
1638                         if (!err) {
1639                                 if (!data->stop || mrq->sbc) {
1640                                         if (mrq->sbc && data->stop)
1641                                                 data->stop->error = 0;
1642                                         dw_mci_request_end(host, mrq);
1643                                         goto unlock;
1644                                 }
1645
1646                                 /* stop command for open-ended transfer*/
1647                                 if (data->stop)
1648                                         send_stop_abort(host, data);
1649                         } else {
1650                                 /*
1651                                  * If we don't have a command complete now we'll
1652                                  * never get one since we just reset everything;
1653                                  * better end the request.
1654                                  *
1655                                  * If we do have a command complete we'll fall
1656                                  * through to the SENDING_STOP command and
1657                                  * everything will be peachy keen.
1658                                  */
1659                                 if (!test_bit(EVENT_CMD_COMPLETE,
1660                                               &host->pending_events)) {
1661                                         host->cmd = NULL;
1662                                         dw_mci_request_end(host, mrq);
1663                                         goto unlock;
1664                                 }
1665                         }
1666
1667                         /*
1668                          * If err has non-zero,
1669                          * stop-abort command has been already issued.
1670                          */
1671                         prev_state = state = STATE_SENDING_STOP;
1672
1673                         /* fall through */
1674
1675                 case STATE_SENDING_STOP:
1676                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1677                                                 &host->pending_events))
1678                                 break;
1679
1680                         /* CMD error in data command */
1681                         if (mrq->cmd->error && mrq->data)
1682                                 dw_mci_reset(host);
1683
1684                         host->cmd = NULL;
1685                         host->data = NULL;
1686
1687                         if (mrq->stop)
1688                                 dw_mci_command_complete(host, mrq->stop);
1689                         else
1690                                 host->cmd_status = 0;
1691
1692                         dw_mci_request_end(host, mrq);
1693                         goto unlock;
1694
1695                 case STATE_DATA_ERROR:
1696                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1697                                                 &host->pending_events))
1698                                 break;
1699
1700                         state = STATE_DATA_BUSY;
1701                         break;
1702                 }
1703         } while (state != prev_state);
1704
1705         host->state = state;
1706 unlock:
1707         spin_unlock(&host->lock);
1708
1709 }
1710
1711 /* push final bytes to part_buf, only use during push */
1712 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1713 {
1714         memcpy((void *)&host->part_buf, buf, cnt);
1715         host->part_buf_count = cnt;
1716 }
1717
1718 /* append bytes to part_buf, only use during push */
1719 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1720 {
1721         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1722         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1723         host->part_buf_count += cnt;
1724         return cnt;
1725 }
1726
1727 /* pull first bytes from part_buf, only use during pull */
1728 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1729 {
1730         cnt = min(cnt, (int)host->part_buf_count);
1731         if (cnt) {
1732                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1733                        cnt);
1734                 host->part_buf_count -= cnt;
1735                 host->part_buf_start += cnt;
1736         }
1737         return cnt;
1738 }
1739
1740 /* pull final bytes from the part_buf, assuming it's just been filled */
1741 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1742 {
1743         memcpy(buf, &host->part_buf, cnt);
1744         host->part_buf_start = cnt;
1745         host->part_buf_count = (1 << host->data_shift) - cnt;
1746 }
1747
1748 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1749 {
1750         struct mmc_data *data = host->data;
1751         int init_cnt = cnt;
1752
1753         /* try and push anything in the part_buf */
1754         if (unlikely(host->part_buf_count)) {
1755                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1756                 buf += len;
1757                 cnt -= len;
1758                 if (host->part_buf_count == 2) {
1759                         mci_writew(host, DATA(host->data_offset),
1760                                         host->part_buf16);
1761                         host->part_buf_count = 0;
1762                 }
1763         }
1764 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1765         if (unlikely((unsigned long)buf & 0x1)) {
1766                 while (cnt >= 2) {
1767                         u16 aligned_buf[64];
1768                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1769                         int items = len >> 1;
1770                         int i;
1771                         /* memcpy from input buffer into aligned buffer */
1772                         memcpy(aligned_buf, buf, len);
1773                         buf += len;
1774                         cnt -= len;
1775                         /* push data from aligned buffer into fifo */
1776                         for (i = 0; i < items; ++i)
1777                                 mci_writew(host, DATA(host->data_offset),
1778                                                 aligned_buf[i]);
1779                 }
1780         } else
1781 #endif
1782         {
1783                 u16 *pdata = buf;
1784                 for (; cnt >= 2; cnt -= 2)
1785                         mci_writew(host, DATA(host->data_offset), *pdata++);
1786                 buf = pdata;
1787         }
1788         /* put anything remaining in the part_buf */
1789         if (cnt) {
1790                 dw_mci_set_part_bytes(host, buf, cnt);
1791                  /* Push data if we have reached the expected data length */
1792                 if ((data->bytes_xfered + init_cnt) ==
1793                     (data->blksz * data->blocks))
1794                         mci_writew(host, DATA(host->data_offset),
1795                                    host->part_buf16);
1796         }
1797 }
1798
1799 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1800 {
1801 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1802         if (unlikely((unsigned long)buf & 0x1)) {
1803                 while (cnt >= 2) {
1804                         /* pull data from fifo into aligned buffer */
1805                         u16 aligned_buf[64];
1806                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1807                         int items = len >> 1;
1808                         int i;
1809                         for (i = 0; i < items; ++i)
1810                                 aligned_buf[i] = mci_readw(host,
1811                                                 DATA(host->data_offset));
1812                         /* memcpy from aligned buffer into output buffer */
1813                         memcpy(buf, aligned_buf, len);
1814                         buf += len;
1815                         cnt -= len;
1816                 }
1817         } else
1818 #endif
1819         {
1820                 u16 *pdata = buf;
1821                 for (; cnt >= 2; cnt -= 2)
1822                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1823                 buf = pdata;
1824         }
1825         if (cnt) {
1826                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1827                 dw_mci_pull_final_bytes(host, buf, cnt);
1828         }
1829 }
1830
1831 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1832 {
1833         struct mmc_data *data = host->data;
1834         int init_cnt = cnt;
1835
1836         /* try and push anything in the part_buf */
1837         if (unlikely(host->part_buf_count)) {
1838                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1839                 buf += len;
1840                 cnt -= len;
1841                 if (host->part_buf_count == 4) {
1842                         mci_writel(host, DATA(host->data_offset),
1843                                         host->part_buf32);
1844                         host->part_buf_count = 0;
1845                 }
1846         }
1847 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1848         if (unlikely((unsigned long)buf & 0x3)) {
1849                 while (cnt >= 4) {
1850                         u32 aligned_buf[32];
1851                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1852                         int items = len >> 2;
1853                         int i;
1854                         /* memcpy from input buffer into aligned buffer */
1855                         memcpy(aligned_buf, buf, len);
1856                         buf += len;
1857                         cnt -= len;
1858                         /* push data from aligned buffer into fifo */
1859                         for (i = 0; i < items; ++i)
1860                                 mci_writel(host, DATA(host->data_offset),
1861                                                 aligned_buf[i]);
1862                 }
1863         } else
1864 #endif
1865         {
1866                 u32 *pdata = buf;
1867                 for (; cnt >= 4; cnt -= 4)
1868                         mci_writel(host, DATA(host->data_offset), *pdata++);
1869                 buf = pdata;
1870         }
1871         /* put anything remaining in the part_buf */
1872         if (cnt) {
1873                 dw_mci_set_part_bytes(host, buf, cnt);
1874                  /* Push data if we have reached the expected data length */
1875                 if ((data->bytes_xfered + init_cnt) ==
1876                     (data->blksz * data->blocks))
1877                         mci_writel(host, DATA(host->data_offset),
1878                                    host->part_buf32);
1879         }
1880 }
1881
1882 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1883 {
1884 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1885         if (unlikely((unsigned long)buf & 0x3)) {
1886                 while (cnt >= 4) {
1887                         /* pull data from fifo into aligned buffer */
1888                         u32 aligned_buf[32];
1889                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1890                         int items = len >> 2;
1891                         int i;
1892                         for (i = 0; i < items; ++i)
1893                                 aligned_buf[i] = mci_readl(host,
1894                                                 DATA(host->data_offset));
1895                         /* memcpy from aligned buffer into output buffer */
1896                         memcpy(buf, aligned_buf, len);
1897                         buf += len;
1898                         cnt -= len;
1899                 }
1900         } else
1901 #endif
1902         {
1903                 u32 *pdata = buf;
1904                 for (; cnt >= 4; cnt -= 4)
1905                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1906                 buf = pdata;
1907         }
1908         if (cnt) {
1909                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1910                 dw_mci_pull_final_bytes(host, buf, cnt);
1911         }
1912 }
1913
1914 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1915 {
1916         struct mmc_data *data = host->data;
1917         int init_cnt = cnt;
1918
1919         /* try and push anything in the part_buf */
1920         if (unlikely(host->part_buf_count)) {
1921                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1922                 buf += len;
1923                 cnt -= len;
1924
1925                 if (host->part_buf_count == 8) {
1926                         mci_writeq(host, DATA(host->data_offset),
1927                                         host->part_buf);
1928                         host->part_buf_count = 0;
1929                 }
1930         }
1931 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1932         if (unlikely((unsigned long)buf & 0x7)) {
1933                 while (cnt >= 8) {
1934                         u64 aligned_buf[16];
1935                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1936                         int items = len >> 3;
1937                         int i;
1938                         /* memcpy from input buffer into aligned buffer */
1939                         memcpy(aligned_buf, buf, len);
1940                         buf += len;
1941                         cnt -= len;
1942                         /* push data from aligned buffer into fifo */
1943                         for (i = 0; i < items; ++i)
1944                                 mci_writeq(host, DATA(host->data_offset),
1945                                                 aligned_buf[i]);
1946                 }
1947         } else
1948 #endif
1949         {
1950                 u64 *pdata = buf;
1951                 for (; cnt >= 8; cnt -= 8)
1952                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1953                 buf = pdata;
1954         }
1955         /* put anything remaining in the part_buf */
1956         if (cnt) {
1957                 dw_mci_set_part_bytes(host, buf, cnt);
1958                 /* Push data if we have reached the expected data length */
1959                 if ((data->bytes_xfered + init_cnt) ==
1960                     (data->blksz * data->blocks))
1961                         mci_writeq(host, DATA(host->data_offset),
1962                                    host->part_buf);
1963         }
1964 }
1965
1966 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1967 {
1968 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1969         if (unlikely((unsigned long)buf & 0x7)) {
1970                 while (cnt >= 8) {
1971                         /* pull data from fifo into aligned buffer */
1972                         u64 aligned_buf[16];
1973                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1974                         int items = len >> 3;
1975                         int i;
1976                         for (i = 0; i < items; ++i)
1977                                 aligned_buf[i] = mci_readq(host,
1978                                                 DATA(host->data_offset));
1979                         /* memcpy from aligned buffer into output buffer */
1980                         memcpy(buf, aligned_buf, len);
1981                         buf += len;
1982                         cnt -= len;
1983                 }
1984         } else
1985 #endif
1986         {
1987                 u64 *pdata = buf;
1988                 for (; cnt >= 8; cnt -= 8)
1989                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1990                 buf = pdata;
1991         }
1992         if (cnt) {
1993                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1994                 dw_mci_pull_final_bytes(host, buf, cnt);
1995         }
1996 }
1997
1998 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1999 {
2000         int len;
2001
2002         /* get remaining partial bytes */
2003         len = dw_mci_pull_part_bytes(host, buf, cnt);
2004         if (unlikely(len == cnt))
2005                 return;
2006         buf += len;
2007         cnt -= len;
2008
2009         /* get the rest of the data */
2010         host->pull_data(host, buf, cnt);
2011 }
2012
2013 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2014 {
2015         struct sg_mapping_iter *sg_miter = &host->sg_miter;
2016         void *buf;
2017         unsigned int offset;
2018         struct mmc_data *data = host->data;
2019         int shift = host->data_shift;
2020         u32 status;
2021         unsigned int len;
2022         unsigned int remain, fcnt;
2023
2024         do {
2025                 if (!sg_miter_next(sg_miter))
2026                         goto done;
2027
2028                 host->sg = sg_miter->piter.sg;
2029                 buf = sg_miter->addr;
2030                 remain = sg_miter->length;
2031                 offset = 0;
2032
2033                 do {
2034                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2035                                         << shift) + host->part_buf_count;
2036                         len = min(remain, fcnt);
2037                         if (!len)
2038                                 break;
2039                         dw_mci_pull_data(host, (void *)(buf + offset), len);
2040                         data->bytes_xfered += len;
2041                         offset += len;
2042                         remain -= len;
2043                 } while (remain);
2044
2045                 sg_miter->consumed = offset;
2046                 status = mci_readl(host, MINTSTS);
2047                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2048         /* if the RXDR is ready read again */
2049         } while ((status & SDMMC_INT_RXDR) ||
2050                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2051
2052         if (!remain) {
2053                 if (!sg_miter_next(sg_miter))
2054                         goto done;
2055                 sg_miter->consumed = 0;
2056         }
2057         sg_miter_stop(sg_miter);
2058         return;
2059
2060 done:
2061         sg_miter_stop(sg_miter);
2062         host->sg = NULL;
2063         smp_wmb();
2064         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2065 }
2066
2067 static void dw_mci_write_data_pio(struct dw_mci *host)
2068 {
2069         struct sg_mapping_iter *sg_miter = &host->sg_miter;
2070         void *buf;
2071         unsigned int offset;
2072         struct mmc_data *data = host->data;
2073         int shift = host->data_shift;
2074         u32 status;
2075         unsigned int len;
2076         unsigned int fifo_depth = host->fifo_depth;
2077         unsigned int remain, fcnt;
2078
2079         do {
2080                 if (!sg_miter_next(sg_miter))
2081                         goto done;
2082
2083                 host->sg = sg_miter->piter.sg;
2084                 buf = sg_miter->addr;
2085                 remain = sg_miter->length;
2086                 offset = 0;
2087
2088                 do {
2089                         fcnt = ((fifo_depth -
2090                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2091                                         << shift) - host->part_buf_count;
2092                         len = min(remain, fcnt);
2093                         if (!len)
2094                                 break;
2095                         host->push_data(host, (void *)(buf + offset), len);
2096                         data->bytes_xfered += len;
2097                         offset += len;
2098                         remain -= len;
2099                 } while (remain);
2100
2101                 sg_miter->consumed = offset;
2102                 status = mci_readl(host, MINTSTS);
2103                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2104         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2105
2106         if (!remain) {
2107                 if (!sg_miter_next(sg_miter))
2108                         goto done;
2109                 sg_miter->consumed = 0;
2110         }
2111         sg_miter_stop(sg_miter);
2112         return;
2113
2114 done:
2115         sg_miter_stop(sg_miter);
2116         host->sg = NULL;
2117         smp_wmb();
2118         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2119 }
2120
2121 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2122 {
2123         if (!host->cmd_status)
2124                 host->cmd_status = status;
2125
2126         smp_wmb();
2127
2128         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2129         tasklet_schedule(&host->tasklet);
2130 }
2131
2132 static void dw_mci_handle_cd(struct dw_mci *host)
2133 {
2134         int i;
2135
2136         for (i = 0; i < host->num_slots; i++) {
2137                 struct dw_mci_slot *slot = host->slot[i];
2138
2139                 if (!slot)
2140                         continue;
2141
2142                 if (slot->mmc->ops->card_event)
2143                         slot->mmc->ops->card_event(slot->mmc);
2144                 mmc_detect_change(slot->mmc,
2145                         msecs_to_jiffies(host->pdata->detect_delay_ms));
2146         }
2147 }
2148
2149 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2150 {
2151         struct dw_mci *host = dev_id;
2152         u32 pending;
2153         int i;
2154
2155         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2156
2157         /*
2158          * DTO fix - version 2.10a and below, and only if internal DMA
2159          * is configured.
2160          */
2161         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2162                 if (!pending &&
2163                     ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2164                         pending |= SDMMC_INT_DATA_OVER;
2165         }
2166
2167         if (pending) {
2168                 /* Check volt switch first, since it can look like an error */
2169                 if ((host->state == STATE_SENDING_CMD11) &&
2170                     (pending & SDMMC_INT_VOLT_SWITCH)) {
2171                         del_timer(&host->cmd11_timer);
2172
2173                         mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2174                         pending &= ~SDMMC_INT_VOLT_SWITCH;
2175                         dw_mci_cmd_interrupt(host, pending);
2176                 }
2177
2178                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2179                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2180                         host->cmd_status = pending;
2181                         smp_wmb();
2182                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2183                 }
2184
2185                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2186                         /* if there is an error report DATA_ERROR */
2187                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2188                         host->data_status = pending;
2189                         smp_wmb();
2190                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
2191                         tasklet_schedule(&host->tasklet);
2192                 }
2193
2194                 if (pending & SDMMC_INT_DATA_OVER) {
2195                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2196                         if (!host->data_status)
2197                                 host->data_status = pending;
2198                         smp_wmb();
2199                         if (host->dir_status == DW_MCI_RECV_STATUS) {
2200                                 if (host->sg != NULL)
2201                                         dw_mci_read_data_pio(host, true);
2202                         }
2203                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2204                         tasklet_schedule(&host->tasklet);
2205                 }
2206
2207                 if (pending & SDMMC_INT_RXDR) {
2208                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2209                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2210                                 dw_mci_read_data_pio(host, false);
2211                 }
2212
2213                 if (pending & SDMMC_INT_TXDR) {
2214                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2215                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2216                                 dw_mci_write_data_pio(host);
2217                 }
2218
2219                 if (pending & SDMMC_INT_CMD_DONE) {
2220                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2221                         dw_mci_cmd_interrupt(host, pending);
2222                 }
2223
2224                 if (pending & SDMMC_INT_CD) {
2225                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
2226                         dw_mci_handle_cd(host);
2227                 }
2228
2229                 /* Handle SDIO Interrupts */
2230                 for (i = 0; i < host->num_slots; i++) {
2231                         struct dw_mci_slot *slot = host->slot[i];
2232
2233                         if (!slot)
2234                                 continue;
2235
2236                         if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2237                                 mci_writel(host, RINTSTS,
2238                                            SDMMC_INT_SDIO(slot->sdio_id));
2239                                 mmc_signal_sdio_irq(slot->mmc);
2240                         }
2241                 }
2242
2243         }
2244
2245 #ifdef CONFIG_MMC_DW_IDMAC
2246         /* Handle DMA interrupts */
2247         if (host->dma_64bit_address == 1) {
2248                 pending = mci_readl(host, IDSTS64);
2249                 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2250                         mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2251                                                         SDMMC_IDMAC_INT_RI);
2252                         mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2253                         host->dma_ops->complete(host);
2254                 }
2255         } else {
2256                 pending = mci_readl(host, IDSTS);
2257                 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2258                         mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2259                                                         SDMMC_IDMAC_INT_RI);
2260                         mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2261                         host->dma_ops->complete(host);
2262                 }
2263         }
2264 #endif
2265
2266         return IRQ_HANDLED;
2267 }
2268
2269 #ifdef CONFIG_OF
2270 /* given a slot id, find out the device node representing that slot */
2271 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2272 {
2273         struct device_node *np;
2274         const __be32 *addr;
2275         int len;
2276
2277         if (!dev || !dev->of_node)
2278                 return NULL;
2279
2280         for_each_child_of_node(dev->of_node, np) {
2281                 addr = of_get_property(np, "reg", &len);
2282                 if (!addr || (len < sizeof(int)))
2283                         continue;
2284                 if (be32_to_cpup(addr) == slot)
2285                         return np;
2286         }
2287         return NULL;
2288 }
2289
2290 static struct dw_mci_of_slot_quirks {
2291         char *quirk;
2292         int id;
2293 } of_slot_quirks[] = {
2294         {
2295                 .quirk  = "disable-wp",
2296                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2297         },
2298 };
2299
2300 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2301 {
2302         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2303         int quirks = 0;
2304         int idx;
2305
2306         /* get quirks */
2307         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2308                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2309                         dev_warn(dev, "Slot quirk %s is deprecated\n",
2310                                         of_slot_quirks[idx].quirk);
2311                         quirks |= of_slot_quirks[idx].id;
2312                 }
2313
2314         return quirks;
2315 }
2316 #else /* CONFIG_OF */
2317 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2318 {
2319         return 0;
2320 }
2321 #endif /* CONFIG_OF */
2322
2323 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2324 {
2325         struct mmc_host *mmc;
2326         struct dw_mci_slot *slot;
2327         const struct dw_mci_drv_data *drv_data = host->drv_data;
2328         int ctrl_id, ret;
2329         u32 freq[2];
2330
2331         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2332         if (!mmc)
2333                 return -ENOMEM;
2334
2335         slot = mmc_priv(mmc);
2336         slot->id = id;
2337         slot->sdio_id = host->sdio_id0 + id;
2338         slot->mmc = mmc;
2339         slot->host = host;
2340         host->slot[id] = slot;
2341
2342         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2343
2344         mmc->ops = &dw_mci_ops;
2345         if (of_property_read_u32_array(host->dev->of_node,
2346                                        "clock-freq-min-max", freq, 2)) {
2347                 mmc->f_min = DW_MCI_FREQ_MIN;
2348                 mmc->f_max = DW_MCI_FREQ_MAX;
2349         } else {
2350                 mmc->f_min = freq[0];
2351                 mmc->f_max = freq[1];
2352         }
2353
2354         /*if there are external regulators, get them*/
2355         ret = mmc_regulator_get_supply(mmc);
2356         if (ret == -EPROBE_DEFER)
2357                 goto err_host_allocated;
2358
2359         if (!mmc->ocr_avail)
2360                 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2361
2362         if (host->pdata->caps)
2363                 mmc->caps = host->pdata->caps;
2364
2365         if (host->pdata->pm_caps)
2366                 mmc->pm_caps = host->pdata->pm_caps;
2367
2368         if (host->dev->of_node) {
2369                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2370                 if (ctrl_id < 0)
2371                         ctrl_id = 0;
2372         } else {
2373                 ctrl_id = to_platform_device(host->dev)->id;
2374         }
2375         if (drv_data && drv_data->caps)
2376                 mmc->caps |= drv_data->caps[ctrl_id];
2377
2378         if (host->pdata->caps2)
2379                 mmc->caps2 = host->pdata->caps2;
2380
2381         ret = mmc_of_parse(mmc);
2382         if (ret)
2383                 goto err_host_allocated;
2384
2385         if (host->pdata->blk_settings) {
2386                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2387                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2388                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2389                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2390                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2391         } else {
2392                 /* Useful defaults if platform data is unset. */
2393 #ifdef CONFIG_MMC_DW_IDMAC
2394                 mmc->max_segs = host->ring_size;
2395                 mmc->max_blk_size = 65536;
2396                 mmc->max_seg_size = 0x1000;
2397                 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2398                 mmc->max_blk_count = mmc->max_req_size / 512;
2399 #else
2400                 mmc->max_segs = 64;
2401                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2402                 mmc->max_blk_count = 512;
2403                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2404                 mmc->max_seg_size = mmc->max_req_size;
2405 #endif /* CONFIG_MMC_DW_IDMAC */
2406         }
2407
2408         if (dw_mci_get_cd(mmc))
2409                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2410         else
2411                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2412
2413         ret = mmc_add_host(mmc);
2414         if (ret)
2415                 goto err_host_allocated;
2416
2417 #if defined(CONFIG_DEBUG_FS)
2418         dw_mci_init_debugfs(slot);
2419 #endif
2420
2421         return 0;
2422
2423 err_host_allocated:
2424         mmc_free_host(mmc);
2425         return ret;
2426 }
2427
2428 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2429 {
2430         /* Debugfs stuff is cleaned up by mmc core */
2431         mmc_remove_host(slot->mmc);
2432         slot->host->slot[id] = NULL;
2433         mmc_free_host(slot->mmc);
2434 }
2435
2436 static void dw_mci_init_dma(struct dw_mci *host)
2437 {
2438         int addr_config;
2439         /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
2440         addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
2441
2442         if (addr_config == 1) {
2443                 /* host supports IDMAC in 64-bit address mode */
2444                 host->dma_64bit_address = 1;
2445                 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
2446                 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2447                         dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
2448         } else {
2449                 /* host supports IDMAC in 32-bit address mode */
2450                 host->dma_64bit_address = 0;
2451                 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
2452         }
2453
2454         /* Alloc memory for sg translation */
2455         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2456                                           &host->sg_dma, GFP_KERNEL);
2457         if (!host->sg_cpu) {
2458                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2459                         __func__);
2460                 goto no_dma;
2461         }
2462
2463         /* Determine which DMA interface to use */
2464 #ifdef CONFIG_MMC_DW_IDMAC
2465         host->dma_ops = &dw_mci_idmac_ops;
2466         dev_info(host->dev, "Using internal DMA controller.\n");
2467 #endif
2468
2469         if (!host->dma_ops)
2470                 goto no_dma;
2471
2472         if (host->dma_ops->init && host->dma_ops->start &&
2473             host->dma_ops->stop && host->dma_ops->cleanup) {
2474                 if (host->dma_ops->init(host)) {
2475                         dev_err(host->dev, "%s: Unable to initialize "
2476                                 "DMA Controller.\n", __func__);
2477                         goto no_dma;
2478                 }
2479         } else {
2480                 dev_err(host->dev, "DMA initialization not found.\n");
2481                 goto no_dma;
2482         }
2483
2484         host->use_dma = 1;
2485         return;
2486
2487 no_dma:
2488         dev_info(host->dev, "Using PIO mode.\n");
2489         host->use_dma = 0;
2490         return;
2491 }
2492
2493 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2494 {
2495         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2496         u32 ctrl;
2497
2498         ctrl = mci_readl(host, CTRL);
2499         ctrl |= reset;
2500         mci_writel(host, CTRL, ctrl);
2501
2502         /* wait till resets clear */
2503         do {
2504                 ctrl = mci_readl(host, CTRL);
2505                 if (!(ctrl & reset))
2506                         return true;
2507         } while (time_before(jiffies, timeout));
2508
2509         dev_err(host->dev,
2510                 "Timeout resetting block (ctrl reset %#x)\n",
2511                 ctrl & reset);
2512
2513         return false;
2514 }
2515
2516 static bool dw_mci_reset(struct dw_mci *host)
2517 {
2518         u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2519         bool ret = false;
2520
2521         /*
2522          * Reseting generates a block interrupt, hence setting
2523          * the scatter-gather pointer to NULL.
2524          */
2525         if (host->sg) {
2526                 sg_miter_stop(&host->sg_miter);
2527                 host->sg = NULL;
2528         }
2529
2530         if (host->use_dma)
2531                 flags |= SDMMC_CTRL_DMA_RESET;
2532
2533         if (dw_mci_ctrl_reset(host, flags)) {
2534                 /*
2535                  * In all cases we clear the RAWINTS register to clear any
2536                  * interrupts.
2537                  */
2538                 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2539
2540                 /* if using dma we wait for dma_req to clear */
2541                 if (host->use_dma) {
2542                         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2543                         u32 status;
2544                         do {
2545                                 status = mci_readl(host, STATUS);
2546                                 if (!(status & SDMMC_STATUS_DMA_REQ))
2547                                         break;
2548                                 cpu_relax();
2549                         } while (time_before(jiffies, timeout));
2550
2551                         if (status & SDMMC_STATUS_DMA_REQ) {
2552                                 dev_err(host->dev,
2553                                         "%s: Timeout waiting for dma_req to "
2554                                         "clear during reset\n", __func__);
2555                                 goto ciu_out;
2556                         }
2557
2558                         /* when using DMA next we reset the fifo again */
2559                         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2560                                 goto ciu_out;
2561                 }
2562         } else {
2563                 /* if the controller reset bit did clear, then set clock regs */
2564                 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2565                         dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2566                                 "clear but ciu was reset, doing clock update\n",
2567                                 __func__);
2568                         goto ciu_out;
2569                 }
2570         }
2571
2572 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2573         /* It is also recommended that we reset and reprogram idmac */
2574         dw_mci_idmac_reset(host);
2575 #endif
2576
2577         ret = true;
2578
2579 ciu_out:
2580         /* After a CTRL reset we need to have CIU set clock registers  */
2581         mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2582
2583         return ret;
2584 }
2585
2586 static void dw_mci_cmd11_timer(unsigned long arg)
2587 {
2588         struct dw_mci *host = (struct dw_mci *)arg;
2589
2590         if (host->state != STATE_SENDING_CMD11)
2591                 dev_info(host->dev, "Unexpected CMD11 timeout\n");
2592
2593         host->cmd_status = SDMMC_INT_RTO;
2594         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2595         tasklet_schedule(&host->tasklet);
2596 }
2597
2598 #ifdef CONFIG_OF
2599 static struct dw_mci_of_quirks {
2600         char *quirk;
2601         int id;
2602 } of_quirks[] = {
2603         {
2604                 .quirk  = "broken-cd",
2605                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2606         }, {
2607                 .quirk  = "disable-wp",
2608                 .id     = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2609         },
2610 };
2611
2612 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2613 {
2614         struct dw_mci_board *pdata;
2615         struct device *dev = host->dev;
2616         struct device_node *np = dev->of_node;
2617         const struct dw_mci_drv_data *drv_data = host->drv_data;
2618         int idx, ret;
2619         u32 clock_frequency;
2620
2621         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2622         if (!pdata)
2623                 return ERR_PTR(-ENOMEM);
2624
2625         /* find out number of slots supported */
2626         if (of_property_read_u32(dev->of_node, "num-slots",
2627                                 &pdata->num_slots)) {
2628                 dev_info(dev, "num-slots property not found, "
2629                                 "assuming 1 slot is available\n");
2630                 pdata->num_slots = 1;
2631         }
2632
2633         /* get quirks */
2634         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2635                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2636                         pdata->quirks |= of_quirks[idx].id;
2637
2638         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2639                 dev_info(dev, "fifo-depth property not found, using "
2640                                 "value of FIFOTH register as default\n");
2641
2642         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2643
2644         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2645                 pdata->bus_hz = clock_frequency;
2646
2647         if (drv_data && drv_data->parse_dt) {
2648                 ret = drv_data->parse_dt(host);
2649                 if (ret)
2650                         return ERR_PTR(ret);
2651         }
2652
2653         if (of_find_property(np, "supports-highspeed", NULL))
2654                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2655
2656         return pdata;
2657 }
2658
2659 #else /* CONFIG_OF */
2660 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2661 {
2662         return ERR_PTR(-EINVAL);
2663 }
2664 #endif /* CONFIG_OF */
2665
2666 static void dw_mci_enable_cd(struct dw_mci *host)
2667 {
2668         struct dw_mci_board *brd = host->pdata;
2669         unsigned long irqflags;
2670         u32 temp;
2671         int i;
2672
2673         /* No need for CD if broken card detection */
2674         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
2675                 return;
2676
2677         /* No need for CD if all slots have a non-error GPIO */
2678         for (i = 0; i < host->num_slots; i++) {
2679                 struct dw_mci_slot *slot = host->slot[i];
2680
2681                 if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
2682                         break;
2683         }
2684         if (i == host->num_slots)
2685                 return;
2686
2687         spin_lock_irqsave(&host->irq_lock, irqflags);
2688         temp = mci_readl(host, INTMASK);
2689         temp  |= SDMMC_INT_CD;
2690         mci_writel(host, INTMASK, temp);
2691         spin_unlock_irqrestore(&host->irq_lock, irqflags);
2692 }
2693
2694 int dw_mci_probe(struct dw_mci *host)
2695 {
2696         const struct dw_mci_drv_data *drv_data = host->drv_data;
2697         int width, i, ret = 0;
2698         u32 fifo_size;
2699         int init_slots = 0;
2700
2701         if (!host->pdata) {
2702                 host->pdata = dw_mci_parse_dt(host);
2703                 if (IS_ERR(host->pdata)) {
2704                         dev_err(host->dev, "platform data not available\n");
2705                         return -EINVAL;
2706                 }
2707         }
2708
2709         if (host->pdata->num_slots > 1) {
2710                 dev_err(host->dev,
2711                         "Platform data must supply num_slots.\n");
2712                 return -ENODEV;
2713         }
2714
2715         host->biu_clk = devm_clk_get(host->dev, "biu");
2716         if (IS_ERR(host->biu_clk)) {
2717                 dev_dbg(host->dev, "biu clock not available\n");
2718         } else {
2719                 ret = clk_prepare_enable(host->biu_clk);
2720                 if (ret) {
2721                         dev_err(host->dev, "failed to enable biu clock\n");
2722                         return ret;
2723                 }
2724         }
2725
2726         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2727         if (IS_ERR(host->ciu_clk)) {
2728                 dev_dbg(host->dev, "ciu clock not available\n");
2729                 host->bus_hz = host->pdata->bus_hz;
2730         } else {
2731                 ret = clk_prepare_enable(host->ciu_clk);
2732                 if (ret) {
2733                         dev_err(host->dev, "failed to enable ciu clock\n");
2734                         goto err_clk_biu;
2735                 }
2736
2737                 if (host->pdata->bus_hz) {
2738                         ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2739                         if (ret)
2740                                 dev_warn(host->dev,
2741                                          "Unable to set bus rate to %uHz\n",
2742                                          host->pdata->bus_hz);
2743                 }
2744                 host->bus_hz = clk_get_rate(host->ciu_clk);
2745         }
2746
2747         if (!host->bus_hz) {
2748                 dev_err(host->dev,
2749                         "Platform data must supply bus speed\n");
2750                 ret = -ENODEV;
2751                 goto err_clk_ciu;
2752         }
2753
2754         if (drv_data && drv_data->init) {
2755                 ret = drv_data->init(host);
2756                 if (ret) {
2757                         dev_err(host->dev,
2758                                 "implementation specific init failed\n");
2759                         goto err_clk_ciu;
2760                 }
2761         }
2762
2763         if (drv_data && drv_data->setup_clock) {
2764                 ret = drv_data->setup_clock(host);
2765                 if (ret) {
2766                         dev_err(host->dev,
2767                                 "implementation specific clock setup failed\n");
2768                         goto err_clk_ciu;
2769                 }
2770         }
2771
2772         setup_timer(&host->cmd11_timer,
2773                     dw_mci_cmd11_timer, (unsigned long)host);
2774
2775         host->quirks = host->pdata->quirks;
2776
2777         spin_lock_init(&host->lock);
2778         spin_lock_init(&host->irq_lock);
2779         INIT_LIST_HEAD(&host->queue);
2780
2781         /*
2782          * Get the host data width - this assumes that HCON has been set with
2783          * the correct values.
2784          */
2785         i = (mci_readl(host, HCON) >> 7) & 0x7;
2786         if (!i) {
2787                 host->push_data = dw_mci_push_data16;
2788                 host->pull_data = dw_mci_pull_data16;
2789                 width = 16;
2790                 host->data_shift = 1;
2791         } else if (i == 2) {
2792                 host->push_data = dw_mci_push_data64;
2793                 host->pull_data = dw_mci_pull_data64;
2794                 width = 64;
2795                 host->data_shift = 3;
2796         } else {
2797                 /* Check for a reserved value, and warn if it is */
2798                 WARN((i != 1),
2799                      "HCON reports a reserved host data width!\n"
2800                      "Defaulting to 32-bit access.\n");
2801                 host->push_data = dw_mci_push_data32;
2802                 host->pull_data = dw_mci_pull_data32;
2803                 width = 32;
2804                 host->data_shift = 2;
2805         }
2806
2807         /* Reset all blocks */
2808         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2809                 return -ENODEV;
2810
2811         host->dma_ops = host->pdata->dma_ops;
2812         dw_mci_init_dma(host);
2813
2814         /* Clear the interrupts for the host controller */
2815         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2816         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2817
2818         /* Put in max timeout */
2819         mci_writel(host, TMOUT, 0xFFFFFFFF);
2820
2821         /*
2822          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2823          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2824          */
2825         if (!host->pdata->fifo_depth) {
2826                 /*
2827                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2828                  * have been overwritten by the bootloader, just like we're
2829                  * about to do, so if you know the value for your hardware, you
2830                  * should put it in the platform data.
2831                  */
2832                 fifo_size = mci_readl(host, FIFOTH);
2833                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2834         } else {
2835                 fifo_size = host->pdata->fifo_depth;
2836         }
2837         host->fifo_depth = fifo_size;
2838         host->fifoth_val =
2839                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2840         mci_writel(host, FIFOTH, host->fifoth_val);
2841
2842         /* disable clock to CIU */
2843         mci_writel(host, CLKENA, 0);
2844         mci_writel(host, CLKSRC, 0);
2845
2846         /*
2847          * In 2.40a spec, Data offset is changed.
2848          * Need to check the version-id and set data-offset for DATA register.
2849          */
2850         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2851         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2852
2853         if (host->verid < DW_MMC_240A)
2854                 host->data_offset = DATA_OFFSET;
2855         else
2856                 host->data_offset = DATA_240A_OFFSET;
2857
2858         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2859         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2860                                host->irq_flags, "dw-mci", host);
2861         if (ret)
2862                 goto err_dmaunmap;
2863
2864         if (host->pdata->num_slots)
2865                 host->num_slots = host->pdata->num_slots;
2866         else
2867                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2868
2869         /*
2870          * Enable interrupts for command done, data over, data empty,
2871          * receive ready and error such as transmit, receive timeout, crc error
2872          */
2873         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2874         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2875                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2876                    DW_MCI_ERROR_FLAGS);
2877         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2878
2879         dev_info(host->dev, "DW MMC controller at irq %d, "
2880                  "%d bit host data width, "
2881                  "%u deep fifo\n",
2882                  host->irq, width, fifo_size);
2883
2884         /* We need at least one slot to succeed */
2885         for (i = 0; i < host->num_slots; i++) {
2886                 ret = dw_mci_init_slot(host, i);
2887                 if (ret)
2888                         dev_dbg(host->dev, "slot %d init failed\n", i);
2889                 else
2890                         init_slots++;
2891         }
2892
2893         /* Now that slots are all setup, we can enable card detect */
2894         dw_mci_enable_cd(host);
2895
2896         if (init_slots) {
2897                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2898         } else {
2899                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2900                                         "but failed on all\n", host->num_slots);
2901                 goto err_dmaunmap;
2902         }
2903
2904         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2905                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2906
2907         return 0;
2908
2909 err_dmaunmap:
2910         if (host->use_dma && host->dma_ops->exit)
2911                 host->dma_ops->exit(host);
2912
2913 err_clk_ciu:
2914         if (!IS_ERR(host->ciu_clk))
2915                 clk_disable_unprepare(host->ciu_clk);
2916
2917 err_clk_biu:
2918         if (!IS_ERR(host->biu_clk))
2919                 clk_disable_unprepare(host->biu_clk);
2920
2921         return ret;
2922 }
2923 EXPORT_SYMBOL(dw_mci_probe);
2924
2925 void dw_mci_remove(struct dw_mci *host)
2926 {
2927         int i;
2928
2929         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2930         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2931
2932         for (i = 0; i < host->num_slots; i++) {
2933                 dev_dbg(host->dev, "remove slot %d\n", i);
2934                 if (host->slot[i])
2935                         dw_mci_cleanup_slot(host->slot[i], i);
2936         }
2937
2938         /* disable clock to CIU */
2939         mci_writel(host, CLKENA, 0);
2940         mci_writel(host, CLKSRC, 0);
2941
2942         if (host->use_dma && host->dma_ops->exit)
2943                 host->dma_ops->exit(host);
2944
2945         if (!IS_ERR(host->ciu_clk))
2946                 clk_disable_unprepare(host->ciu_clk);
2947
2948         if (!IS_ERR(host->biu_clk))
2949                 clk_disable_unprepare(host->biu_clk);
2950 }
2951 EXPORT_SYMBOL(dw_mci_remove);
2952
2953
2954
2955 #ifdef CONFIG_PM_SLEEP
2956 /*
2957  * TODO: we should probably disable the clock to the card in the suspend path.
2958  */
2959 int dw_mci_suspend(struct dw_mci *host)
2960 {
2961         return 0;
2962 }
2963 EXPORT_SYMBOL(dw_mci_suspend);
2964
2965 int dw_mci_resume(struct dw_mci *host)
2966 {
2967         int i, ret;
2968
2969         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2970                 ret = -ENODEV;
2971                 return ret;
2972         }
2973
2974         if (host->use_dma && host->dma_ops->init)
2975                 host->dma_ops->init(host);
2976
2977         /*
2978          * Restore the initial value at FIFOTH register
2979          * And Invalidate the prev_blksz with zero
2980          */
2981         mci_writel(host, FIFOTH, host->fifoth_val);
2982         host->prev_blksz = 0;
2983
2984         /* Put in max timeout */
2985         mci_writel(host, TMOUT, 0xFFFFFFFF);
2986
2987         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2988         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2989                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2990                    DW_MCI_ERROR_FLAGS);
2991         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2992
2993         for (i = 0; i < host->num_slots; i++) {
2994                 struct dw_mci_slot *slot = host->slot[i];
2995                 if (!slot)
2996                         continue;
2997                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2998                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2999                         dw_mci_setup_bus(slot, true);
3000                 }
3001         }
3002
3003         /* Now that slots are all setup, we can enable card detect */
3004         dw_mci_enable_cd(host);
3005
3006         return 0;
3007 }
3008 EXPORT_SYMBOL(dw_mci_resume);
3009 #endif /* CONFIG_PM_SLEEP */
3010
3011 static int __init dw_mci_init(void)
3012 {
3013         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3014         return 0;
3015 }
3016
3017 static void __exit dw_mci_exit(void)
3018 {
3019 }
3020
3021 module_init(dw_mci_init);
3022 module_exit(dw_mci_exit);
3023
3024 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3025 MODULE_AUTHOR("NXP Semiconductor VietNam");
3026 MODULE_AUTHOR("Imagination Technologies Ltd");
3027 MODULE_LICENSE("GPL v2");