SDMMC: open the 8bit bus width and IDMA
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / host / rk_sdmmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  */
15
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/delay.h>
31 #include <linux/irq.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/rk_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
38 #include <linux/workqueue.h>
39 #include <linux/of.h>
40 #include <linux/of_gpio.h>
41 #include <linux/mmc/slot-gpio.h>
42
43 #include "rk_sdmmc.h"
44 #include "rk_sdmmc_of.h"
45
46 /* Common flag combinations */
47 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
48                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
49                                  SDMMC_INT_EBE)
50 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
51                                  SDMMC_INT_RESP_ERR)
52 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
53                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
54 #define DW_MCI_SEND_STATUS      1
55 #define DW_MCI_RECV_STATUS      2
56 #define DW_MCI_DMA_THRESHOLD    16
57
58 #define DW_MCI_FREQ_MAX 50000000//200000000     /* unit: HZ */
59 #define DW_MCI_FREQ_MIN 300000//400000          /* unit: HZ */
60
61 #define SDMMC_DATA_TIMEOUT_SD   500; /*max is 250ms refer to Spec; Maybe adapt the value to the sick card.*/
62 #define SDMMC_DATA_TIMEOUT_SDIO 250
63 #define SDMMC_DATA_TIMEOUT_EMMC 2500
64
65 #ifdef CONFIG_MMC_DW_IDMAC
66 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
67                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
68                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
69                                  SDMMC_IDMAC_INT_TI)
70
71 struct idmac_desc {
72         u32             des0;   /* Control Descriptor */
73 #define IDMAC_DES0_DIC  BIT(1)
74 #define IDMAC_DES0_LD   BIT(2)
75 #define IDMAC_DES0_FD   BIT(3)
76 #define IDMAC_DES0_CH   BIT(4)
77 #define IDMAC_DES0_ER   BIT(5)
78 #define IDMAC_DES0_CES  BIT(30)
79 #define IDMAC_DES0_OWN  BIT(31)
80
81         u32             des1;   /* Buffer sizes */
82 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
83         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
84
85         u32             des2;   /* buffer 1 physical address */
86
87         u32             des3;   /* buffer 2 physical address */
88 };
89 #endif /* CONFIG_MMC_DW_IDMAC */
90
91 static const u8 tuning_blk_pattern_4bit[] = {
92         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
93         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
94         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
95         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
96         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
97         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
98         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
99         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
100 };
101
102 static const u8 tuning_blk_pattern_8bit[] = {
103         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
104         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
105         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
106         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
107         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
108         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
109         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
110         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
111         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
112         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
113         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
114         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
115         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
116         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
117         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
118         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
119 };
120
121 /*printk the all register of current host*/
122 static int dw_mci_regs_printk(struct dw_mci *host)
123 {
124     struct sdmmc_reg *regs = dw_mci_regs;
125
126     while( regs->name != 0 ){
127         printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
128         regs++;
129     }
130     printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
131     return 0;
132 }
133
134
135 #if defined(CONFIG_DEBUG_FS)
136 static int dw_mci_req_show(struct seq_file *s, void *v)
137 {
138         struct dw_mci_slot *slot = s->private;
139         struct mmc_request *mrq;
140         struct mmc_command *cmd;
141         struct mmc_command *stop;
142         struct mmc_data *data;
143
144         /* Make sure we get a consistent snapshot */
145         spin_lock_bh(&slot->host->lock);
146         mrq = slot->mrq;
147
148         if (mrq) {
149                 cmd = mrq->cmd;
150                 data = mrq->data;
151                 stop = mrq->stop;
152
153                 if (cmd)
154                         seq_printf(s,
155                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
156                                    cmd->opcode, cmd->arg, cmd->flags,
157                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
158                                    cmd->resp[2], cmd->error);
159                 if (data)
160                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
161                                    data->bytes_xfered, data->blocks,
162                                    data->blksz, data->flags, data->error);
163                 if (stop)
164                         seq_printf(s,
165                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
166                                    stop->opcode, stop->arg, stop->flags,
167                                    stop->resp[0], stop->resp[1], stop->resp[2],
168                                    stop->resp[2], stop->error);
169         }
170
171         spin_unlock_bh(&slot->host->lock);
172
173         return 0;
174 }
175
176 static int dw_mci_req_open(struct inode *inode, struct file *file)
177 {
178         return single_open(file, dw_mci_req_show, inode->i_private);
179 }
180
181 static const struct file_operations dw_mci_req_fops = {
182         .owner          = THIS_MODULE,
183         .open           = dw_mci_req_open,
184         .read           = seq_read,
185         .llseek         = seq_lseek,
186         .release        = single_release,
187 };
188
189 static int dw_mci_regs_show(struct seq_file *s, void *v)
190 {
191         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
192         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
193         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
194         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
195         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
196         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
197
198         return 0;
199 }
200
201 static int dw_mci_regs_open(struct inode *inode, struct file *file)
202 {
203         return single_open(file, dw_mci_regs_show, inode->i_private);
204 }
205
206 static const struct file_operations dw_mci_regs_fops = {
207         .owner          = THIS_MODULE,
208         .open           = dw_mci_regs_open,
209         .read           = seq_read,
210         .llseek         = seq_lseek,
211         .release        = single_release,
212 };
213
214 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
215 {
216         struct mmc_host *mmc = slot->mmc;
217         struct dw_mci *host = slot->host;
218         struct dentry *root;
219         struct dentry *node;
220
221         root = mmc->debugfs_root;
222         if (!root)
223                 return;
224
225         node = debugfs_create_file("regs", S_IRUSR, root, host,
226                                    &dw_mci_regs_fops);
227         if (!node)
228                 goto err;
229
230         node = debugfs_create_file("req", S_IRUSR, root, slot,
231                                    &dw_mci_req_fops);
232         if (!node)
233                 goto err;
234
235         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
236         if (!node)
237                 goto err;
238
239         node = debugfs_create_x32("pending_events", S_IRUSR, root,
240                                   (u32 *)&host->pending_events);
241         if (!node)
242                 goto err;
243
244         node = debugfs_create_x32("completed_events", S_IRUSR, root,
245                                   (u32 *)&host->completed_events);
246         if (!node)
247                 goto err;
248
249         return;
250
251 err:
252         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
253 }
254 #endif /* defined(CONFIG_DEBUG_FS) */
255
256 static void dw_mci_set_timeout(struct dw_mci *host)
257 {
258         /* timeout (maximum) */
259         mci_writel(host, TMOUT, 0xffffffff);
260 }
261
262 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
263 {
264         struct mmc_data *data;
265         struct dw_mci_slot *slot = mmc_priv(mmc);
266         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
267         u32 cmdr;
268         cmd->error = -EINPROGRESS;
269
270         cmdr = cmd->opcode;
271
272         if (cmdr == MMC_STOP_TRANSMISSION)
273                 cmdr |= SDMMC_CMD_STOP;
274         else
275                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
276
277         if (cmd->flags & MMC_RSP_PRESENT) {
278                 /* We expect a response, so set this bit */
279                 cmdr |= SDMMC_CMD_RESP_EXP;
280                 if (cmd->flags & MMC_RSP_136)
281                         cmdr |= SDMMC_CMD_RESP_LONG;
282         }
283
284         if (cmd->flags & MMC_RSP_CRC)
285                 cmdr |= SDMMC_CMD_RESP_CRC;
286
287         data = cmd->data;
288         if (data) {
289                 cmdr |= SDMMC_CMD_DAT_EXP;
290                 if (data->flags & MMC_DATA_STREAM)
291                         cmdr |= SDMMC_CMD_STRM_MODE;
292                 if (data->flags & MMC_DATA_WRITE)
293                         cmdr |= SDMMC_CMD_DAT_WR;
294         }
295
296         if (drv_data && drv_data->prepare_command)
297                 drv_data->prepare_command(slot->host, &cmdr);
298
299         return cmdr;
300 }
301
302 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
303 {
304         struct mmc_command *stop;
305         u32 cmdr;
306
307         if (!cmd->data)
308                 return 0;
309
310         stop = &host->stop_abort;
311         cmdr = cmd->opcode;
312         memset(stop, 0, sizeof(struct mmc_command));
313
314         if (cmdr == MMC_READ_SINGLE_BLOCK ||
315             cmdr == MMC_READ_MULTIPLE_BLOCK ||
316             cmdr == MMC_WRITE_BLOCK ||
317             cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
318                 stop->opcode = MMC_STOP_TRANSMISSION;
319                 stop->arg = 0;
320                 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
321         } else if (cmdr == SD_IO_RW_EXTENDED) {
322                 stop->opcode = SD_IO_RW_DIRECT;
323                 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
324                              ((cmd->arg >> 28) & 0x7);
325                 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
326         } else {
327                 return 0;
328         }
329
330         cmdr = stop->opcode | SDMMC_CMD_STOP |
331                 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
332
333         return cmdr;
334 }
335
336 static void dw_mci_start_command(struct dw_mci *host,
337                                  struct mmc_command *cmd, u32 cmd_flags)
338 {
339         host->cmd = cmd;
340         dev_vdbg(host->dev,
341                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
342                  cmd->arg, cmd_flags);
343
344         mci_writel(host, CMDARG, cmd->arg);
345         wmb();
346     MMC_DBG_INFO_FUNC(host->mmc,"%d..%s start cmd=%d, arg=0x%x[%s]",__LINE__, __FUNCTION__,cmd->opcode, cmd->arg,mmc_hostname(host->mmc));
347     //dw_mci_regs_printk(host);
348
349     if(host->mmc->hold_reg_flag)
350         cmd_flags |= SDMMC_CMD_USE_HOLD_REG;//fix the value to 1 in some Soc,for example RK3188.
351         
352         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
353 }
354
355 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
356 {
357         dw_mci_start_command(host, data->stop, host->stop_cmdr);
358 }
359
360 /* DMA interface functions */
361 static void dw_mci_stop_dma(struct dw_mci *host)
362 {
363         if (host->using_dma) {
364                 host->dma_ops->stop(host);
365                 host->dma_ops->cleanup(host);
366         }
367
368         /* Data transfer was stopped by the interrupt handler */
369         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
370 }
371
372 static int dw_mci_get_dma_dir(struct mmc_data *data)
373 {
374         if (data->flags & MMC_DATA_WRITE)
375                 return DMA_TO_DEVICE;
376         else
377                 return DMA_FROM_DEVICE;
378 }
379
380 #ifdef CONFIG_MMC_DW_IDMAC
381 static void dw_mci_dma_cleanup(struct dw_mci *host)
382 {
383         struct mmc_data *data = host->data;
384
385         if (data)
386                 if (!data->host_cookie)
387                         dma_unmap_sg(host->dev,
388                                      data->sg,
389                                      data->sg_len,
390                                      dw_mci_get_dma_dir(data));
391 }
392
393 static void dw_mci_idmac_reset(struct dw_mci *host)
394 {
395         u32 bmod = mci_readl(host, BMOD);
396         /* Software reset of DMA */
397         bmod |= SDMMC_IDMAC_SWRESET;
398         mci_writel(host, BMOD, bmod);
399 }
400
401 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
402 {
403         u32 temp;
404
405         /* Disable and reset the IDMAC interface */
406         temp = mci_readl(host, CTRL);
407         temp &= ~SDMMC_CTRL_USE_IDMAC;
408         temp |= SDMMC_CTRL_DMA_RESET;
409         mci_writel(host, CTRL, temp);
410
411         /* Stop the IDMAC running */
412         temp = mci_readl(host, BMOD);
413         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
414         temp |= SDMMC_IDMAC_SWRESET;
415         mci_writel(host, BMOD, temp);
416 }
417
418 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
419 {
420         struct mmc_data *data = host->data;
421
422         dev_vdbg(host->dev, "DMA complete\n");
423  //   MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
424  //       host->mrq->cmd->opcode,host->mrq->cmd->arg,data->blocks,data->blksz,mmc_hostname(host->mmc));
425
426         host->dma_ops->cleanup(host);
427
428         /*
429          * If the card was removed, data will be NULL. No point in trying to
430          * send the stop command or waiting for NBUSY in this case.
431          */
432         if (data) {
433                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
434                 tasklet_schedule(&host->tasklet);
435         }
436 }
437
438 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
439                                     unsigned int sg_len)
440 {
441         int i;
442         struct idmac_desc *desc = host->sg_cpu;
443
444         for (i = 0; i < sg_len; i++, desc++) {
445                 unsigned int length = sg_dma_len(&data->sg[i]);
446                 u32 mem_addr = sg_dma_address(&data->sg[i]);
447
448                 /* Set the OWN bit and disable interrupts for this descriptor */
449                 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
450
451                 /* Buffer length */
452                 IDMAC_SET_BUFFER1_SIZE(desc, length);
453
454                 /* Physical address to DMA to/from */
455                 desc->des2 = mem_addr;
456         }
457
458         /* Set first descriptor */
459         desc = host->sg_cpu;
460         desc->des0 |= IDMAC_DES0_FD;
461
462         /* Set last descriptor */
463         desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
464         desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
465         desc->des0 |= IDMAC_DES0_LD;
466
467         wmb();
468 }
469
470 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
471 {
472         u32 temp;
473
474         dw_mci_translate_sglist(host, host->data, sg_len);
475
476         /* Select IDMAC interface */
477         temp = mci_readl(host, CTRL);
478         temp |= SDMMC_CTRL_USE_IDMAC;
479         mci_writel(host, CTRL, temp);
480
481         wmb();
482
483         /* Enable the IDMAC */
484         temp = mci_readl(host, BMOD);
485         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
486         mci_writel(host, BMOD, temp);
487
488         /* Start it running */
489         mci_writel(host, PLDMND, 1);
490 }
491
492 static int dw_mci_idmac_init(struct dw_mci *host)
493 {
494         struct idmac_desc *p;
495         int i;
496
497         /* Number of descriptors in the ring buffer */
498         host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
499
500         /* Forward link the descriptor list */
501         for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
502                 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
503
504         /* Set the last descriptor as the end-of-ring descriptor */
505         p->des3 = host->sg_dma;
506         p->des0 = IDMAC_DES0_ER;
507
508         dw_mci_idmac_reset(host);
509
510         /* Mask out interrupts - get Tx & Rx complete only */
511         mci_writel(host, IDSTS, IDMAC_INT_CLR);
512         mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
513                    SDMMC_IDMAC_INT_TI);
514
515         /* Set the descriptor base address */
516         mci_writel(host, DBADDR, host->sg_dma);
517         return 0;
518 }
519
520 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
521         .init = dw_mci_idmac_init,
522         .start = dw_mci_idmac_start_dma,
523         .stop = dw_mci_idmac_stop_dma,
524         .complete = dw_mci_idmac_complete_dma,
525         .cleanup = dw_mci_dma_cleanup,
526 };
527 #endif /* CONFIG_MMC_DW_IDMAC */
528
529 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
530                                    struct mmc_data *data,
531                                    bool next)
532 {
533         struct scatterlist *sg;
534         unsigned int i, sg_len;
535
536         if (!next && data->host_cookie)
537                 return data->host_cookie;
538
539         /*
540          * We don't do DMA on "complex" transfers, i.e. with
541          * non-word-aligned buffers or lengths. Also, we don't bother
542          * with all the DMA setup overhead for short transfers.
543          */
544         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
545                 return -EINVAL;
546
547         if (data->blksz & 3)
548                 return -EINVAL;
549
550         for_each_sg(data->sg, sg, data->sg_len, i) {
551                 if (sg->offset & 3 || sg->length & 3)
552                         return -EINVAL;
553         }
554
555         sg_len = dma_map_sg(host->dev,
556                             data->sg,
557                             data->sg_len,
558                             dw_mci_get_dma_dir(data));
559         if (sg_len == 0)
560                 return -EINVAL;
561
562         if (next)
563                 data->host_cookie = sg_len;
564
565         return sg_len;
566 }
567
568 static void dw_mci_pre_req(struct mmc_host *mmc,
569                            struct mmc_request *mrq,
570                            bool is_first_req)
571 {
572         struct dw_mci_slot *slot = mmc_priv(mmc);
573         struct mmc_data *data = mrq->data;
574
575         if (!slot->host->use_dma || !data)
576                 return;
577
578         if (data->host_cookie) {
579                 data->host_cookie = 0;
580                 return;
581         }
582
583         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
584                 data->host_cookie = 0;
585 }
586
587 static void dw_mci_post_req(struct mmc_host *mmc,
588                             struct mmc_request *mrq,
589                             int err)
590 {
591         struct dw_mci_slot *slot = mmc_priv(mmc);
592         struct mmc_data *data = mrq->data;
593
594         if (!slot->host->use_dma || !data)
595                 return;
596
597         if (data->host_cookie)
598                 dma_unmap_sg(slot->host->dev,
599                              data->sg,
600                              data->sg_len,
601                              dw_mci_get_dma_dir(data));
602         data->host_cookie = 0;
603 }
604
605 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
606 {
607 #ifdef CONFIG_MMC_DW_IDMAC
608         unsigned int blksz = data->blksz;
609         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
610         u32 fifo_width = 1 << host->data_shift;
611         u32 blksz_depth = blksz / fifo_width, fifoth_val;
612         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
613         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
614
615         tx_wmark = (host->fifo_depth) / 2;
616         tx_wmark_invers = host->fifo_depth - tx_wmark;
617
618         /*
619          * MSIZE is '1',
620          * if blksz is not a multiple of the FIFO width
621          */
622         if (blksz % fifo_width) {
623                 msize = 0;
624                 rx_wmark = 1;
625                 goto done;
626         }
627
628         do {
629                 if (!((blksz_depth % mszs[idx]) ||
630                      (tx_wmark_invers % mszs[idx]))) {
631                         msize = idx;
632                         rx_wmark = mszs[idx] - 1;
633                         break;
634                 }
635         } while (--idx > 0);
636         /*
637          * If idx is '0', it won't be tried
638          * Thus, initial values are uesed
639          */
640 done:
641         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
642         mci_writel(host, FIFOTH, fifoth_val);
643 #endif
644 }
645
646 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
647 {
648         unsigned int blksz = data->blksz;
649         u32 blksz_depth, fifo_depth;
650         u16 thld_size;
651
652         WARN_ON(!(data->flags & MMC_DATA_READ));
653
654         if (host->timing != MMC_TIMING_MMC_HS200 &&
655             host->timing != MMC_TIMING_UHS_SDR104)
656                 goto disable;
657
658         blksz_depth = blksz / (1 << host->data_shift);
659         fifo_depth = host->fifo_depth;
660
661         if (blksz_depth > fifo_depth)
662                 goto disable;
663
664         /*
665          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
666          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
667          * Currently just choose blksz.
668          */
669         thld_size = blksz;
670         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
671         return;
672
673 disable:
674         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
675 }
676
677 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
678 {
679         int sg_len;
680         u32 temp;
681
682         host->using_dma = 0;
683
684         /* If we don't have a channel, we can't do DMA */
685         if (!host->use_dma)
686                 return -ENODEV;
687
688         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
689         if (sg_len < 0) {
690                 host->dma_ops->stop(host);
691                 return sg_len;
692         }
693
694         host->using_dma = 1;
695
696         dev_vdbg(host->dev,
697                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
698                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
699                  sg_len);
700
701         /*
702          * Decide the MSIZE and RX/TX Watermark.
703          * If current block size is same with previous size,
704          * no need to update fifoth.
705          */
706         if (host->prev_blksz != data->blksz)
707                 dw_mci_adjust_fifoth(host, data);
708
709         /* Enable the DMA interface */
710         temp = mci_readl(host, CTRL);
711         temp |= SDMMC_CTRL_DMA_ENABLE;
712         mci_writel(host, CTRL, temp);
713
714         /* Disable RX/TX IRQs, let DMA handle it */
715         temp = mci_readl(host, INTMASK);
716         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
717         mci_writel(host, INTMASK, temp);
718
719         host->dma_ops->start(host, sg_len);
720
721         return 0;
722 }
723
724 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
725 {
726         u32 temp;
727
728         data->error = -EINPROGRESS;
729
730         WARN_ON(host->data);
731         host->sg = NULL;
732         host->data = data;
733
734         if (data->flags & MMC_DATA_READ) {
735                 host->dir_status = DW_MCI_RECV_STATUS;
736                 dw_mci_ctrl_rd_thld(host, data);
737         } else {
738                 host->dir_status = DW_MCI_SEND_STATUS;
739         }
740         
741     MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
742          data->blocks, data->blksz, mmc_hostname(host->mmc));
743
744         if (dw_mci_submit_data_dma(host, data)) {
745                 int flags = SG_MITER_ATOMIC;
746                 if (host->data->flags & MMC_DATA_READ)
747                         flags |= SG_MITER_TO_SG;
748                 else
749                         flags |= SG_MITER_FROM_SG;
750
751                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
752                 host->sg = data->sg;
753                 host->part_buf_start = 0;
754                 host->part_buf_count = 0;
755
756                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
757                 temp = mci_readl(host, INTMASK);
758                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
759                 mci_writel(host, INTMASK, temp);
760
761                 temp = mci_readl(host, CTRL);
762                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
763                 mci_writel(host, CTRL, temp);
764
765                 /*
766                  * Use the initial fifoth_val for PIO mode.
767                  * If next issued data may be transfered by DMA mode,
768                  * prev_blksz should be invalidated.
769                  */
770                 mci_writel(host, FIFOTH, host->fifoth_val);
771                 host->prev_blksz = 0;
772         } else {
773                 /*
774                  * Keep the current block size.
775                  * It will be used to decide whether to update
776                  * fifoth register next time.
777                  */
778                 host->prev_blksz = data->blksz;
779         }
780 }
781
782 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
783 {
784         struct dw_mci *host = slot->host;
785         unsigned long timeout = jiffies + msecs_to_jiffies(500);
786         unsigned int cmd_status = 0;
787
788         mci_writel(host, CMDARG, arg);
789         wmb();
790         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
791
792         while (time_before(jiffies, timeout)) {
793                 cmd_status = mci_readl(host, CMD);
794                 if (!(cmd_status & SDMMC_CMD_START))
795                         return;
796         }
797         dev_err(&slot->mmc->class_dev,
798                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
799                 cmd, arg, cmd_status);
800 }
801
802 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
803 {
804         struct dw_mci *host = slot->host;
805         unsigned int clock = slot->clock;
806         u32 div;
807         u32 clk_en_a;
808
809         if (!clock) {
810                 mci_writel(host, CLKENA, 0);
811                 mci_send_cmd(slot,
812                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
813         } else if (clock != host->current_speed || force_clkinit) {
814                 div = host->bus_hz / clock;
815                 if (host->bus_hz % clock && host->bus_hz > clock)
816                         /*
817                          * move the + 1 after the divide to prevent
818                          * over-clocking the card.
819                          */
820                         div += 1;
821
822                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
823
824                 if ((clock << div) != slot->__clk_old || force_clkinit)
825                         dev_info(&slot->mmc->class_dev,
826                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
827                                  slot->id, host->bus_hz, clock,
828                                  div ? ((host->bus_hz / div) >> 1) :
829                                  host->bus_hz, div);
830
831                 /* disable clock */
832                 mci_writel(host, CLKENA, 0);
833                 mci_writel(host, CLKSRC, 0);
834
835                 /* inform CIU */
836                 mci_send_cmd(slot,
837                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
838
839                 /* set clock to desired speed */
840                 mci_writel(host, CLKDIV, div);
841
842                 /* inform CIU */
843                 mci_send_cmd(slot,
844                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
845
846                 /* enable clock; only low power if no SDIO */
847                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
848                 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
849                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
850                 mci_writel(host, CLKENA, clk_en_a);
851
852                 /* inform CIU */
853                 mci_send_cmd(slot,
854                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
855
856                 /* keep the clock with reflecting clock dividor */
857                 slot->__clk_old = clock << div;
858         }
859
860         host->current_speed = clock;
861
862     if(slot->ctype != slot->pre_ctype)
863             MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]", \
864                 div ? ((host->bus_hz / div) >> 1):host->bus_hz, \
865                 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits", mmc_hostname(host->mmc));
866     slot->pre_ctype = slot->ctype;
867
868         /* Set the current slot bus width */
869         mci_writel(host, CTYPE, (slot->ctype << slot->id));
870
871 }
872
873 static void dw_mci_wait_unbusy(struct dw_mci *host)
874 {\r   
875     unsigned int    timeout= SDMMC_DATA_TIMEOUT_SDIO;
876     unsigned long   time_loop;
877     unsigned int    status;
878
879     MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
880     
881     if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
882         timeout = SDMMC_DATA_TIMEOUT_EMMC;
883     else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
884         timeout = SDMMC_DATA_TIMEOUT_SD;
885         
886     time_loop = jiffies + msecs_to_jiffies(timeout);
887     do {
888         status = mci_readl(host, STATUS);
889         if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
890                 break;
891         //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");   
892     } while (time_before(jiffies, time_loop));
893 }
894
895 static void __dw_mci_start_request(struct dw_mci *host,
896                                    struct dw_mci_slot *slot,
897                                    struct mmc_command *cmd)
898 {
899         struct mmc_request *mrq;
900         struct mmc_data *data;
901         u32 cmdflags;
902
903         mrq = slot->mrq;
904         if (host->pdata->select_slot)
905                 host->pdata->select_slot(slot->id);
906
907         host->cur_slot = slot;
908         host->mrq = mrq;
909 #if 0 //add by xbw,at 2014-03-12
910         /*clean FIFO if it is a new request*/
911     if(!(mrq->cmd->opcode & SDMMC_CMD_STOP)) {
912         MMC_DBG_INFO_FUNC("%d..%s: reset the ctrl.", __LINE__, __FUNCTION__);   
913         mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
914                                 SDMMC_CTRL_DMA_RESET));
915     }
916  #endif   
917     dw_mci_wait_unbusy(host);
918     
919         host->pending_events = 0;
920         host->completed_events = 0;
921         host->data_status = 0;
922
923         data = cmd->data;
924         if (data) {
925                 dw_mci_set_timeout(host);
926                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
927                 mci_writel(host, BLKSIZ, data->blksz);
928         }
929
930         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
931
932         /* this is the first command, send the initialization clock */
933         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
934                 cmdflags |= SDMMC_CMD_INIT;
935
936         if (data) {
937                 dw_mci_submit_data(host, data);
938                 wmb();
939         }
940
941         dw_mci_start_command(host, cmd, cmdflags);
942
943         if (mrq->stop)
944                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
945 }
946
947 static void dw_mci_start_request(struct dw_mci *host,
948                                  struct dw_mci_slot *slot)
949 {
950         struct mmc_request *mrq = slot->mrq;
951         struct mmc_command *cmd;
952         
953     MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
954         mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
955         
956         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
957         __dw_mci_start_request(host, slot, cmd);
958 }
959
960 /* must be called with host->lock held */
961 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
962                                  struct mmc_request *mrq)
963 {
964         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
965                  host->state);
966
967         slot->mrq = mrq;
968
969         if (host->state == STATE_IDLE) {
970                 host->state = STATE_SENDING_CMD;
971                 dw_mci_start_request(host, slot);
972         } else {
973                 list_add_tail(&slot->queue_node, &host->queue);
974         }
975 }
976
977 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
978 {
979         struct dw_mci_slot *slot = mmc_priv(mmc);
980         struct dw_mci *host = slot->host;
981
982         WARN_ON(slot->mrq);
983
984         /*
985          * The check for card presence and queueing of the request must be
986          * atomic, otherwise the card could be removed in between and the
987          * request wouldn't fail until another card was inserted.
988          */
989         spin_lock_bh(&host->lock);
990
991         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
992                 spin_unlock_bh(&host->lock);
993                 mrq->cmd->error = -ENOMEDIUM;
994                 
995                 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_reqeust--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(host->mmc));
996             
997                 mmc_request_done(mmc, mrq);
998                 return;
999         }
1000     MMC_DBG_CMD_FUNC(host->mmc, "======>\n    pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1001         mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1002
1003         dw_mci_queue_request(host, slot, mrq);
1004
1005         spin_unlock_bh(&host->lock);
1006 }
1007
1008 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1009 {
1010         struct dw_mci_slot *slot = mmc_priv(mmc);
1011         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1012         u32 regs;
1013
1014         switch (ios->bus_width) {
1015         case MMC_BUS_WIDTH_4:
1016                 slot->ctype = SDMMC_CTYPE_4BIT;
1017                 break;  
1018         case MMC_BUS_WIDTH_8: 
1019                 slot->ctype = SDMMC_CTYPE_8BIT;
1020                 break;  
1021         default:
1022                 /* set default 1 bit mode */
1023                 slot->ctype = SDMMC_CTYPE_1BIT;
1024                 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1025         }
1026
1027         regs = mci_readl(slot->host, UHS_REG);
1028
1029         /* DDR mode set */
1030         if (ios->timing == MMC_TIMING_UHS_DDR50)
1031                 regs |= ((0x1 << slot->id) << 16);
1032         else
1033                 regs &= ~((0x1 << slot->id) << 16);
1034
1035         mci_writel(slot->host, UHS_REG, regs);
1036         slot->host->timing = ios->timing;
1037
1038         /*
1039          * Use mirror of ios->clock to prevent race with mmc
1040          * core ios update when finding the minimum.
1041          */
1042         slot->clock = ios->clock;
1043
1044         if (drv_data && drv_data->set_ios)
1045                 drv_data->set_ios(slot->host, ios);
1046
1047         /* Slot specific timing and width adjustment */
1048         dw_mci_setup_bus(slot, false);
1049
1050         switch (ios->power_mode) {
1051         case MMC_POWER_UP:
1052                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1053                 /* Power up slot */
1054                 if (slot->host->pdata->setpower)
1055                         slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1056                 regs = mci_readl(slot->host, PWREN);
1057                 regs |= (1 << slot->id);
1058                 mci_writel(slot->host, PWREN, regs);
1059                 break;
1060         case MMC_POWER_OFF:
1061                 /* Power down slot */
1062                 if (slot->host->pdata->setpower)
1063                         slot->host->pdata->setpower(slot->id, 0);
1064                 regs = mci_readl(slot->host, PWREN);
1065                 regs &= ~(1 << slot->id);
1066                 mci_writel(slot->host, PWREN, regs);
1067                 break;
1068         default:
1069                 break;
1070         }
1071 }
1072
1073 static int dw_mci_get_ro(struct mmc_host *mmc)
1074 {
1075         int read_only;
1076         struct dw_mci_slot *slot = mmc_priv(mmc);
1077         struct dw_mci_board *brd = slot->host->pdata;
1078
1079         /* Use platform get_ro function, else try on board write protect */
1080         if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1081                 read_only = 0;
1082         else if (brd->get_ro)
1083                 read_only = brd->get_ro(slot->id);
1084         else if (gpio_is_valid(slot->wp_gpio))
1085                 read_only = gpio_get_value(slot->wp_gpio);
1086         else
1087                 read_only =
1088                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1089
1090         dev_dbg(&mmc->class_dev, "card is %s\n",
1091                 read_only ? "read-only" : "read-write");
1092
1093         return read_only;
1094 }
1095
1096 static int dw_mci_get_cd(struct mmc_host *mmc)
1097 {
1098         int present;
1099         struct dw_mci_slot *slot = mmc_priv(mmc);
1100         struct dw_mci_board *brd = slot->host->pdata;
1101         struct dw_mci *host = slot->host;
1102         int gpio_cd = mmc_gpio_get_cd(mmc);
1103         
1104     if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
1105         spin_lock_bh(&host->lock);
1106         set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1107         spin_unlock_bh(&host->lock);
1108         
1109         return 1;
1110     }
1111
1112         /* Use platform get_cd function, else try onboard card detect */
1113         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1114                 present = 1;
1115         else if (brd->get_cd)
1116                 present = !brd->get_cd(slot->id);
1117         else if (!IS_ERR_VALUE(gpio_cd))
1118                 present = gpio_cd;
1119         else
1120                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1121                         == 0 ? 1 : 0;
1122
1123         spin_lock_bh(&host->lock);
1124         if (present) {
1125                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1126                 dev_dbg(&mmc->class_dev, "card is present\n");
1127         } else {
1128                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1129                 dev_dbg(&mmc->class_dev, "card is not present\n");
1130         }
1131         spin_unlock_bh(&host->lock);
1132
1133         return present;
1134 }
1135
1136 static void dw_mci_hw_reset(struct mmc_host *mmc)
1137 {
1138     struct dw_mci_slot *slot = mmc_priv(mmc);
1139
1140     /* 
1141      * According to eMMC spec 
1142      * tRstW >= 1us ;   RST_n pulse width
1143      * tRSCA >= 200us ; RST_n to Command time
1144      * tRSTH >= 1us ;   RST_n high period
1145      */
1146
1147     mci_writel(slot->host, RST_n, 0x1);
1148     dsb();
1149     udelay(10); //10us for bad quality eMMc.
1150
1151     mci_writel(slot->host, RST_n, 0x0);
1152     dsb();
1153     usleep_range(300, 1000); //ay least 300(> 200us)
1154     
1155 }
1156
1157 /*
1158  * Disable lower power mode.
1159  *
1160  * Low power mode will stop the card clock when idle.  According to the
1161  * description of the CLKENA register we should disable low power mode
1162  * for SDIO cards if we need SDIO interrupts to work.
1163  *
1164  * This function is fast if low power mode is already disabled.
1165  */
1166 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1167 {
1168         struct dw_mci *host = slot->host;
1169         u32 clk_en_a;
1170         const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1171
1172         clk_en_a = mci_readl(host, CLKENA);
1173
1174         if (clk_en_a & clken_low_pwr) {
1175                 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1176                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1177                              SDMMC_CMD_PRV_DAT_WAIT, 0);
1178         }
1179 }
1180
1181 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1182 {
1183         struct dw_mci_slot *slot = mmc_priv(mmc);
1184         struct dw_mci *host = slot->host;
1185         u32 int_mask;
1186
1187         /* Enable/disable Slot Specific SDIO interrupt */
1188         int_mask = mci_readl(host, INTMASK);
1189         if (enb) {
1190                 /*
1191                  * Turn off low power mode if it was enabled.  This is a bit of
1192                  * a heavy operation and we disable / enable IRQs a lot, so
1193                  * we'll leave low power mode disabled and it will get
1194                  * re-enabled again in dw_mci_setup_bus().
1195                  */
1196                 dw_mci_disable_low_power(slot);
1197
1198                 mci_writel(host, INTMASK,
1199                            (int_mask | SDMMC_INT_SDIO(slot->id)));
1200         } else {
1201                 mci_writel(host, INTMASK,
1202                            (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1203         }
1204 }
1205
1206 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1207 {
1208         struct dw_mci_slot *slot = mmc_priv(mmc);
1209         struct dw_mci *host = slot->host;
1210         const struct dw_mci_drv_data *drv_data = host->drv_data;
1211         struct dw_mci_tuning_data tuning_data;
1212         int err = -ENOSYS;
1213
1214         if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1215                 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1216                         tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1217                         tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1218                 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1219                         tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1220                         tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1221                 } else {
1222                         return -EINVAL;
1223                 }
1224         } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1225                 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1226                 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1227         } else {
1228                 dev_err(host->dev,
1229                         "Undefined command(%d) for tuning\n", opcode);
1230                 return -EINVAL;
1231         }
1232
1233         if (drv_data && drv_data->execute_tuning)
1234                 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1235         return err;
1236 }
1237
1238 static const struct mmc_host_ops dw_mci_ops = {
1239         .request                = dw_mci_request,
1240         .pre_req                = dw_mci_pre_req,
1241         .post_req               = dw_mci_post_req,
1242         .set_ios                = dw_mci_set_ios,
1243         .get_ro                 = dw_mci_get_ro,
1244         .get_cd                 = dw_mci_get_cd,
1245         .hw_reset       = dw_mci_hw_reset,
1246         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1247         .execute_tuning         = dw_mci_execute_tuning,
1248 };
1249
1250 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1251         __releases(&host->lock)
1252         __acquires(&host->lock)
1253 {
1254         if(DW_MCI_SEND_STATUS == host->dir_status){
1255             #if 0
1256             if( MMC_BUS_TEST_W != host->cmd->opcode){
1257                 if(host->data_status & SDMMC_INT_DCRC)
1258                     host->data->error = -EILSEQ;
1259                 else if(host->data_status & SDMMC_INT_EBE)
1260                     host->data->error = -ETIMEDOUT;
1261             } else {
1262                 dw_mci_wait_unbusy(host); 
1263             }
1264             #else
1265             dw_mci_wait_unbusy(host);
1266             #endif
1267             
1268         }
1269 }
1270
1271 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1272         __releases(&host->lock)
1273         __acquires(&host->lock)
1274 {
1275         struct dw_mci_slot *slot;
1276         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1277
1278         WARN_ON(host->cmd || host->data);
1279         
1280     dw_mci_deal_data_end(host, mrq);
1281
1282         if(mrq->cmd)
1283        MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1284             mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1285         if(mrq->data)
1286        MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1287             mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1288
1289         host->cur_slot->mrq = NULL;
1290         host->mrq = NULL;
1291         if (!list_empty(&host->queue)) {
1292                 slot = list_entry(host->queue.next,
1293                                   struct dw_mci_slot, queue_node);
1294                 list_del(&slot->queue_node);
1295                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1296                          mmc_hostname(slot->mmc));
1297                 host->state = STATE_SENDING_CMD;
1298                 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1299                 dw_mci_start_request(host, slot);
1300         } else {
1301                 dev_vdbg(host->dev, "list empty\n");
1302                 host->state = STATE_IDLE;
1303         }
1304
1305         spin_unlock(&host->lock);
1306         mmc_request_done(prev_mmc, mrq);
1307         spin_lock(&host->lock);
1308 }
1309
1310 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1311 {
1312         u32 status = host->cmd_status;
1313
1314         host->cmd_status = 0;
1315
1316         /* Read the response from the card (up to 16 bytes) */
1317         if (cmd->flags & MMC_RSP_PRESENT) {
1318                 if (cmd->flags & MMC_RSP_136) {
1319                         cmd->resp[3] = mci_readl(host, RESP0);
1320                         cmd->resp[2] = mci_readl(host, RESP1);
1321                         cmd->resp[1] = mci_readl(host, RESP2);
1322                         cmd->resp[0] = mci_readl(host, RESP3);
1323                         
1324             MMC_DBG_INFO_FUNC(host->mmc," command complete [%s], \ncmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x", \
1325                     mmc_hostname(host->mmc), cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0]);
1326                 } else {
1327                         cmd->resp[0] = mci_readl(host, RESP0);
1328                         cmd->resp[1] = 0;
1329                         cmd->resp[2] = 0;
1330                         cmd->resp[3] = 0;                       
1331             MMC_DBG_INFO_FUNC(host->mmc, " command complete [%s], cmd=%d,resp[0]=0x%x",\
1332                     mmc_hostname(host->mmc),cmd->opcode, cmd->resp[0]);
1333                 }
1334         }
1335
1336         if (status & SDMMC_INT_RTO)
1337                 cmd->error = -ETIMEDOUT;
1338         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1339                 cmd->error = -EILSEQ;
1340         else if (status & SDMMC_INT_RESP_ERR)
1341                 cmd->error = -EIO;
1342         else
1343                 cmd->error = 0;
1344     MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=0x%x [%s]",cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1345
1346         if (cmd->error) {
1347             if(MMC_SEND_STATUS != cmd->opcode)
1348             MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=0x%x [%s]",\
1349                 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1350                 
1351                 /* newer ip versions need a delay between retries */
1352                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1353                         mdelay(20);
1354         }
1355
1356 }
1357
1358 static void dw_mci_tasklet_func(unsigned long priv)
1359 {
1360         struct dw_mci *host = (struct dw_mci *)priv;
1361     struct dw_mci_slot *slot = mmc_priv(host->mmc);
1362         struct mmc_data *data;
1363         struct mmc_command *cmd;
1364         enum dw_mci_state state;
1365         enum dw_mci_state prev_state;
1366         u32 status, ctrl;
1367
1368         spin_lock(&host->lock);
1369
1370         state = host->state;
1371         data = host->data;
1372
1373         do {
1374                 prev_state = state;
1375
1376                 switch (state) {
1377                 case STATE_IDLE:
1378                         break;
1379
1380                 case STATE_SENDING_CMD:
1381                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1382                                                 &host->pending_events))
1383                                 break;
1384
1385                         cmd = host->cmd;
1386                         host->cmd = NULL;
1387                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1388                         dw_mci_command_complete(host, cmd);
1389                         if (cmd == host->mrq->sbc && !cmd->error) {
1390                                 prev_state = state = STATE_SENDING_CMD;
1391                                 __dw_mci_start_request(host, host->cur_slot,
1392                                                        host->mrq->cmd);
1393                                 goto unlock;
1394                         }
1395                         
1396             if (cmd->data && cmd->error) {
1397                                 dw_mci_stop_dma(host);
1398                                 #if 1
1399                 if (data->stop) {
1400                     send_stop_cmd(host, data);
1401                     state = STATE_SENDING_STOP;
1402                     break;
1403                 } else {
1404                     host->data = NULL;
1405                 }
1406                                 #else
1407                                 send_stop_abort(host, data);
1408                                 state = STATE_SENDING_STOP;
1409                                 break;
1410                                 #endif
1411                         }
1412
1413
1414                         if (!host->mrq->data || cmd->error) {
1415                                 dw_mci_request_end(host, host->mrq);
1416                                 goto unlock;
1417                         }
1418
1419                         prev_state = state = STATE_SENDING_DATA;
1420                         /* fall through */
1421
1422                 case STATE_SENDING_DATA:
1423                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1424                                                &host->pending_events)) {
1425                                 dw_mci_stop_dma(host);
1426                                 #if 1
1427                                 if (data->stop)
1428                                         send_stop_cmd(host, data);
1429                                 #else
1430                                 send_stop_abort(host, data);
1431                                 #endif
1432                                 state = STATE_DATA_ERROR;
1433                                 break;
1434                         }
1435             MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
1436                         prev_state,state, mmc_hostname(host->mmc));
1437
1438                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1439                                                 &host->pending_events))
1440                                 break;
1441             MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]:  STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
1442                         prev_state,state,mmc_hostname(host->mmc));
1443             
1444                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1445                         prev_state = state = STATE_DATA_BUSY;
1446                         /* fall through */
1447
1448                 case STATE_DATA_BUSY:
1449                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1450                                                 &host->pending_events))
1451                                 break;
1452                                 
1453                         dw_mci_deal_data_end(host, host->mrq);                  
1454             MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
1455                     prev_state,state,mmc_hostname(host->mmc));
1456
1457                         host->data = NULL;
1458                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1459                         status = host->data_status;
1460
1461                         if (status & DW_MCI_DATA_ERROR_FLAGS) { 
1462                             if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
1463                     MMC_DBG_ERR_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
1464                             prev_state,state, status, mmc_hostname(host->mmc));
1465                             
1466                         if (status & SDMMC_INT_DRTO) {
1467                                         data->error = -ETIMEDOUT;
1468                                 } else if (status & SDMMC_INT_DCRC) {
1469                                         data->error = -EILSEQ;
1470                                 } else if (status & SDMMC_INT_EBE &&
1471                                            host->dir_status ==
1472                                                         DW_MCI_SEND_STATUS) {
1473                                         /*
1474                                          * No data CRC status was returned.
1475                                          * The number of bytes transferred will
1476                                          * be exaggerated in PIO mode.
1477                                          */
1478                                         data->bytes_xfered = 0;
1479                                         data->error = -ETIMEDOUT;
1480                                 } else {
1481                                         dev_err(host->dev,
1482                                                 "data FIFO error "
1483                                                 "(status=%08x)\n",
1484                                                 status);
1485                                         data->error = -EIO;
1486                                 }
1487                                 /*
1488                                  * After an error, there may be data lingering
1489                                  * in the FIFO, so reset it - doing so
1490                                  * generates a block interrupt, hence setting
1491                                  * the scatter-gather pointer to NULL.
1492                                  */
1493                                 sg_miter_stop(&host->sg_miter);
1494                                 host->sg = NULL;
1495                                 ctrl = mci_readl(host, CTRL);
1496                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1497                                 mci_writel(host, CTRL, ctrl);
1498                         } else {
1499                                 data->bytes_xfered = data->blocks * data->blksz;
1500                                 data->error = 0;
1501                         }
1502
1503                         if (!data->stop) {
1504                         MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
1505                     prev_state,state,mmc_hostname(host->mmc));
1506                                 dw_mci_request_end(host, host->mrq);
1507                                 goto unlock;
1508                         }
1509                     MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
1510                 prev_state,state,mmc_hostname(host->mmc));
1511
1512                         if (host->mrq->sbc && !data->error) {
1513                                 data->stop->error = 0;
1514                                 
1515                 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
1516                     prev_state,state,mmc_hostname(host->mmc));
1517
1518                                 dw_mci_request_end(host, host->mrq);
1519                                 goto unlock;
1520                         }
1521
1522                         prev_state = state = STATE_SENDING_STOP;
1523                         if (!data->error)
1524                             send_stop_cmd(host, data);
1525                         #if 0
1526                         if (data->stop && !data->error) {
1527                                 /* stop command for open-ended transfer*/
1528                                 
1529                                 send_stop_abort(host, data);
1530                         }
1531                         #endif
1532                         /* fall through */
1533             MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
1534                 prev_state,state,mmc_hostname(host->mmc));
1535
1536                 case STATE_SENDING_STOP:
1537                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1538                                                 &host->pending_events))
1539                                 break;
1540             MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
1541                 prev_state,state,mmc_hostname(host->mmc));
1542                         
1543              /* CMD error in data command */
1544                         if (host->mrq->cmd->error && host->mrq->data) {
1545                                 sg_miter_stop(&host->sg_miter);
1546                                 host->sg = NULL;
1547                                 ctrl = mci_readl(host, CTRL);
1548                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1549                                 mci_writel(host, CTRL, ctrl);
1550                         }
1551
1552                         host->cmd = NULL;
1553                         host->data = NULL;
1554             #if 1
1555             dw_mci_command_complete(host, host->mrq->stop);
1556             #else
1557                         if (host->mrq->stop)
1558                                 dw_mci_command_complete(host, host->mrq->stop);
1559                         else
1560                                 host->cmd_status = 0;
1561             #endif
1562             
1563                         dw_mci_request_end(host, host->mrq);
1564                         goto unlock;
1565
1566                 case STATE_DATA_ERROR:
1567                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1568                                                 &host->pending_events))
1569                                 break;
1570
1571                         state = STATE_DATA_BUSY;
1572                         break;
1573                 }
1574         } while (state != prev_state);
1575
1576         host->state = state;
1577 unlock:
1578         spin_unlock(&host->lock);
1579
1580 }
1581
1582 /* push final bytes to part_buf, only use during push */
1583 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1584 {
1585         memcpy((void *)&host->part_buf, buf, cnt);
1586         host->part_buf_count = cnt;
1587 }
1588
1589 /* append bytes to part_buf, only use during push */
1590 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1591 {
1592         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1593         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1594         host->part_buf_count += cnt;
1595         return cnt;
1596 }
1597
1598 /* pull first bytes from part_buf, only use during pull */
1599 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1600 {
1601         cnt = min(cnt, (int)host->part_buf_count);
1602         if (cnt) {
1603                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1604                        cnt);
1605                 host->part_buf_count -= cnt;
1606                 host->part_buf_start += cnt;
1607         }
1608         return cnt;
1609 }
1610
1611 /* pull final bytes from the part_buf, assuming it's just been filled */
1612 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1613 {
1614         memcpy(buf, &host->part_buf, cnt);
1615         host->part_buf_start = cnt;
1616         host->part_buf_count = (1 << host->data_shift) - cnt;
1617 }
1618
1619 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1620 {
1621         struct mmc_data *data = host->data;
1622         int init_cnt = cnt;
1623
1624         /* try and push anything in the part_buf */
1625         if (unlikely(host->part_buf_count)) {
1626                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1627                 buf += len;
1628                 cnt -= len;
1629                 if (host->part_buf_count == 2) {
1630                         mci_writew(host, DATA(host->data_offset),
1631                                         host->part_buf16);
1632                         host->part_buf_count = 0;
1633                 }
1634         }
1635 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1636         if (unlikely((unsigned long)buf & 0x1)) {
1637                 while (cnt >= 2) {
1638                         u16 aligned_buf[64];
1639                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1640                         int items = len >> 1;
1641                         int i;
1642                         /* memcpy from input buffer into aligned buffer */
1643                         memcpy(aligned_buf, buf, len);
1644                         buf += len;
1645                         cnt -= len;
1646                         /* push data from aligned buffer into fifo */
1647                         for (i = 0; i < items; ++i)
1648                                 mci_writew(host, DATA(host->data_offset),
1649                                                 aligned_buf[i]);
1650                 }
1651         } else
1652 #endif
1653         {
1654                 u16 *pdata = buf;
1655                 for (; cnt >= 2; cnt -= 2)
1656                         mci_writew(host, DATA(host->data_offset), *pdata++);
1657                 buf = pdata;
1658         }
1659         /* put anything remaining in the part_buf */
1660         if (cnt) {
1661                 dw_mci_set_part_bytes(host, buf, cnt);
1662                  /* Push data if we have reached the expected data length */
1663                 if ((data->bytes_xfered + init_cnt) ==
1664                     (data->blksz * data->blocks))
1665                         mci_writew(host, DATA(host->data_offset),
1666                                    host->part_buf16);
1667         }
1668 }
1669
1670 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1671 {
1672 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1673         if (unlikely((unsigned long)buf & 0x1)) {
1674                 while (cnt >= 2) {
1675                         /* pull data from fifo into aligned buffer */
1676                         u16 aligned_buf[64];
1677                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1678                         int items = len >> 1;
1679                         int i;
1680                         for (i = 0; i < items; ++i)
1681                                 aligned_buf[i] = mci_readw(host,
1682                                                 DATA(host->data_offset));
1683                         /* memcpy from aligned buffer into output buffer */
1684                         memcpy(buf, aligned_buf, len);
1685                         buf += len;
1686                         cnt -= len;
1687                 }
1688         } else
1689 #endif
1690         {
1691                 u16 *pdata = buf;
1692                 for (; cnt >= 2; cnt -= 2)
1693                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1694                 buf = pdata;
1695         }
1696         if (cnt) {
1697                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1698                 dw_mci_pull_final_bytes(host, buf, cnt);
1699         }
1700 }
1701
1702 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1703 {
1704         struct mmc_data *data = host->data;
1705         int init_cnt = cnt;
1706
1707         /* try and push anything in the part_buf */
1708         if (unlikely(host->part_buf_count)) {
1709                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1710                 buf += len;
1711                 cnt -= len;
1712                 if (host->part_buf_count == 4) {
1713                         mci_writel(host, DATA(host->data_offset),
1714                                         host->part_buf32);
1715                         host->part_buf_count = 0;
1716                 }
1717         }
1718 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1719         if (unlikely((unsigned long)buf & 0x3)) {
1720                 while (cnt >= 4) {
1721                         u32 aligned_buf[32];
1722                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1723                         int items = len >> 2;
1724                         int i;
1725                         /* memcpy from input buffer into aligned buffer */
1726                         memcpy(aligned_buf, buf, len);
1727                         buf += len;
1728                         cnt -= len;
1729                         /* push data from aligned buffer into fifo */
1730                         for (i = 0; i < items; ++i)
1731                                 mci_writel(host, DATA(host->data_offset),
1732                                                 aligned_buf[i]);
1733                 }
1734         } else
1735 #endif
1736         {
1737                 u32 *pdata = buf;
1738                 for (; cnt >= 4; cnt -= 4)
1739                         mci_writel(host, DATA(host->data_offset), *pdata++);
1740                 buf = pdata;
1741         }
1742         /* put anything remaining in the part_buf */
1743         if (cnt) {
1744                 dw_mci_set_part_bytes(host, buf, cnt);
1745                  /* Push data if we have reached the expected data length */
1746                 if ((data->bytes_xfered + init_cnt) ==
1747                     (data->blksz * data->blocks))
1748                         mci_writel(host, DATA(host->data_offset),
1749                                    host->part_buf32);
1750         }
1751 }
1752
1753 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1754 {
1755 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1756         if (unlikely((unsigned long)buf & 0x3)) {
1757                 while (cnt >= 4) {
1758                         /* pull data from fifo into aligned buffer */
1759                         u32 aligned_buf[32];
1760                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1761                         int items = len >> 2;
1762                         int i;
1763                         for (i = 0; i < items; ++i)
1764                                 aligned_buf[i] = mci_readl(host,
1765                                                 DATA(host->data_offset));
1766                         /* memcpy from aligned buffer into output buffer */
1767                         memcpy(buf, aligned_buf, len);
1768                         buf += len;
1769                         cnt -= len;
1770                 }
1771         } else
1772 #endif
1773         {
1774                 u32 *pdata = buf;
1775                 for (; cnt >= 4; cnt -= 4)
1776                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1777                 buf = pdata;
1778         }
1779         if (cnt) {
1780                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1781                 dw_mci_pull_final_bytes(host, buf, cnt);
1782         }
1783 }
1784
1785 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1786 {
1787         struct mmc_data *data = host->data;
1788         int init_cnt = cnt;
1789
1790         /* try and push anything in the part_buf */
1791         if (unlikely(host->part_buf_count)) {
1792                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1793                 buf += len;
1794                 cnt -= len;
1795
1796                 if (host->part_buf_count == 8) {
1797                         mci_writeq(host, DATA(host->data_offset),
1798                                         host->part_buf);
1799                         host->part_buf_count = 0;
1800                 }
1801         }
1802 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1803         if (unlikely((unsigned long)buf & 0x7)) {
1804                 while (cnt >= 8) {
1805                         u64 aligned_buf[16];
1806                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1807                         int items = len >> 3;
1808                         int i;
1809                         /* memcpy from input buffer into aligned buffer */
1810                         memcpy(aligned_buf, buf, len);
1811                         buf += len;
1812                         cnt -= len;
1813                         /* push data from aligned buffer into fifo */
1814                         for (i = 0; i < items; ++i)
1815                                 mci_writeq(host, DATA(host->data_offset),
1816                                                 aligned_buf[i]);
1817                 }
1818         } else
1819 #endif
1820         {
1821                 u64 *pdata = buf;
1822                 for (; cnt >= 8; cnt -= 8)
1823                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1824                 buf = pdata;
1825         }
1826         /* put anything remaining in the part_buf */
1827         if (cnt) {
1828                 dw_mci_set_part_bytes(host, buf, cnt);
1829                 /* Push data if we have reached the expected data length */
1830                 if ((data->bytes_xfered + init_cnt) ==
1831                     (data->blksz * data->blocks))
1832                         mci_writeq(host, DATA(host->data_offset),
1833                                    host->part_buf);
1834         }
1835 }
1836
1837 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1838 {
1839 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1840         if (unlikely((unsigned long)buf & 0x7)) {
1841                 while (cnt >= 8) {
1842                         /* pull data from fifo into aligned buffer */
1843                         u64 aligned_buf[16];
1844                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1845                         int items = len >> 3;
1846                         int i;
1847                         for (i = 0; i < items; ++i)
1848                                 aligned_buf[i] = mci_readq(host,
1849                                                 DATA(host->data_offset));
1850                         /* memcpy from aligned buffer into output buffer */
1851                         memcpy(buf, aligned_buf, len);
1852                         buf += len;
1853                         cnt -= len;
1854                 }
1855         } else
1856 #endif
1857         {
1858                 u64 *pdata = buf;
1859                 for (; cnt >= 8; cnt -= 8)
1860                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1861                 buf = pdata;
1862         }
1863         if (cnt) {
1864                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1865                 dw_mci_pull_final_bytes(host, buf, cnt);
1866         }
1867 }
1868
1869 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1870 {
1871         int len;
1872
1873         /* get remaining partial bytes */
1874         len = dw_mci_pull_part_bytes(host, buf, cnt);
1875         if (unlikely(len == cnt))
1876                 return;
1877         buf += len;
1878         cnt -= len;
1879
1880         /* get the rest of the data */
1881         host->pull_data(host, buf, cnt);
1882 }
1883
1884 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1885 {
1886         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1887         void *buf;
1888         unsigned int offset;
1889         struct mmc_data *data = host->data;
1890         int shift = host->data_shift;
1891         u32 status;
1892         unsigned int len;
1893         unsigned int remain, fcnt;
1894
1895         do {
1896                 if (!sg_miter_next(sg_miter))
1897                         goto done;
1898
1899                 host->sg = sg_miter->piter.sg;
1900                 buf = sg_miter->addr;
1901                 remain = sg_miter->length;
1902                 offset = 0;
1903
1904                 do {
1905                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1906                                         << shift) + host->part_buf_count;
1907                         len = min(remain, fcnt);
1908                         if (!len)
1909                                 break;
1910                         dw_mci_pull_data(host, (void *)(buf + offset), len);
1911                         data->bytes_xfered += len;
1912                         offset += len;
1913                         remain -= len;
1914                 } while (remain);
1915
1916                 sg_miter->consumed = offset;
1917                 status = mci_readl(host, MINTSTS);
1918                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1919         /* if the RXDR is ready read again */
1920         } while ((status & SDMMC_INT_RXDR) ||
1921                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1922
1923         if (!remain) {
1924                 if (!sg_miter_next(sg_miter))
1925                         goto done;
1926                 sg_miter->consumed = 0;
1927         }
1928         sg_miter_stop(sg_miter);
1929         return;
1930
1931 done:
1932         sg_miter_stop(sg_miter);
1933         host->sg = NULL;
1934         smp_wmb();
1935         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1936 }
1937
1938 static void dw_mci_write_data_pio(struct dw_mci *host)
1939 {
1940         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1941         void *buf;
1942         unsigned int offset;
1943         struct mmc_data *data = host->data;
1944         int shift = host->data_shift;
1945         u32 status;
1946         unsigned int len;
1947         unsigned int fifo_depth = host->fifo_depth;
1948         unsigned int remain, fcnt;
1949
1950         do {
1951                 if (!sg_miter_next(sg_miter))
1952                         goto done;
1953
1954                 host->sg = sg_miter->piter.sg;
1955                 buf = sg_miter->addr;
1956                 remain = sg_miter->length;
1957                 offset = 0;
1958
1959                 do {
1960                         fcnt = ((fifo_depth -
1961                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1962                                         << shift) - host->part_buf_count;
1963                         len = min(remain, fcnt);
1964                         if (!len)
1965                                 break;
1966                         host->push_data(host, (void *)(buf + offset), len);
1967                         data->bytes_xfered += len;
1968                         offset += len;
1969                         remain -= len;
1970                 } while (remain);
1971
1972                 sg_miter->consumed = offset;
1973                 status = mci_readl(host, MINTSTS);
1974                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1975         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1976
1977         if (!remain) {
1978                 if (!sg_miter_next(sg_miter))
1979                         goto done;
1980                 sg_miter->consumed = 0;
1981         }
1982         sg_miter_stop(sg_miter);
1983         return;
1984
1985 done:
1986         sg_miter_stop(sg_miter);
1987         host->sg = NULL;
1988         smp_wmb();
1989         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1990 }
1991
1992 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1993 {
1994         if (!host->cmd_status)
1995                 host->cmd_status = status;
1996
1997         smp_wmb();
1998
1999         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2000         tasklet_schedule(&host->tasklet);
2001 }
2002
2003 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2004 {
2005         struct dw_mci *host = dev_id;
2006         u32 pending;
2007         int i;
2008
2009         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2010
2011         /*
2012                  * DTO fix - version 2.10a and below, and only if internal DMA
2013                  * is configured.
2014                  */
2015                 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2016                         if (!pending &&
2017                             ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2018                                 pending |= SDMMC_INT_DATA_OVER;
2019         }
2020
2021         if (pending) {
2022                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2023                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2024                         host->cmd_status = pending;
2025                         smp_wmb();
2026                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2027                 }
2028
2029                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2030                         /* if there is an error report DATA_ERROR */
2031                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2032                         host->data_status = pending;
2033                         smp_wmb();
2034                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
2035                         tasklet_schedule(&host->tasklet);
2036                 }
2037
2038                 if (pending & SDMMC_INT_DATA_OVER) {
2039                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2040                         MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2041                         if (!host->data_status)
2042                                 host->data_status = pending;
2043                         smp_wmb();
2044                         if (host->dir_status == DW_MCI_RECV_STATUS) {
2045                                 if (host->sg != NULL)
2046                                         dw_mci_read_data_pio(host, true);
2047                         }
2048                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2049                         tasklet_schedule(&host->tasklet);
2050                 }
2051
2052                 if (pending & SDMMC_INT_RXDR) {
2053                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2054                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2055                                 dw_mci_read_data_pio(host, false);
2056                 }
2057
2058                 if (pending & SDMMC_INT_TXDR) {
2059                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2060                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2061                                 dw_mci_write_data_pio(host);
2062                 }
2063
2064                 if (pending & SDMMC_INT_CMD_DONE) {
2065                 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2066                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2067                         dw_mci_cmd_interrupt(host, pending);
2068                 }
2069
2070                 if (pending & SDMMC_INT_CD) {
2071                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
2072                         queue_work(host->card_workqueue, &host->card_work);
2073                 }
2074
2075                 /* Handle SDIO Interrupts */
2076                 for (i = 0; i < host->num_slots; i++) {
2077                         struct dw_mci_slot *slot = host->slot[i];
2078                         if (pending & SDMMC_INT_SDIO(i)) {
2079                                 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
2080                                 mmc_signal_sdio_irq(slot->mmc);
2081                         }
2082                 }
2083
2084         }
2085
2086 #ifdef CONFIG_MMC_DW_IDMAC
2087         /* Handle DMA interrupts */
2088         pending = mci_readl(host, IDSTS);
2089         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2090                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2091                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2092                 host->dma_ops->complete(host);
2093         }
2094 #endif
2095
2096         return IRQ_HANDLED;
2097 }
2098
2099 static void dw_mci_work_routine_card(struct work_struct *work)
2100 {
2101         struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2102         int i;
2103         
2104         for (i = 0; i < host->num_slots; i++) {
2105                 struct dw_mci_slot *slot = host->slot[i];
2106                 struct mmc_host *mmc = slot->mmc;
2107                 struct mmc_request *mrq;
2108                 int present;
2109                 u32 ctrl;
2110
2111                 present = dw_mci_get_cd(mmc);
2112                 while (present != slot->last_detect_state) {
2113                         dev_dbg(&slot->mmc->class_dev, "card %s\n",
2114                                 present ? "inserted" : "removed");
2115             MMC_DBG_BOOT_FUNC(mmc, "card %s,  devname=%s \n",
2116                                 present ? "inserted" : "removed", mmc_hostname(mmc));
2117
2118                         spin_lock_bh(&host->lock);
2119
2120                         /* Card change detected */
2121                         slot->last_detect_state = present;
2122
2123                         /* Clean up queue if present */
2124                         mrq = slot->mrq;
2125                         if (mrq) {
2126                                 if (mrq == host->mrq) {
2127                                         host->data = NULL;
2128                                         host->cmd = NULL;
2129
2130                                         switch (host->state) {
2131                                         case STATE_IDLE:
2132                                                 break;
2133                                         case STATE_SENDING_CMD:
2134                                                 mrq->cmd->error = -ENOMEDIUM;
2135                                                 if (!mrq->data)
2136                                                         break;
2137                                                 /* fall through */
2138                                         case STATE_SENDING_DATA:
2139                                                 mrq->data->error = -ENOMEDIUM;
2140                                                 dw_mci_stop_dma(host);
2141                                                 break;
2142                                         case STATE_DATA_BUSY:
2143                                         case STATE_DATA_ERROR:
2144                                                 if (mrq->data->error == -EINPROGRESS)
2145                                                         mrq->data->error = -ENOMEDIUM;
2146                                                 if (!mrq->stop)
2147                                                         break;
2148                                                 /* fall through */
2149                                         case STATE_SENDING_STOP:
2150                                                 mrq->stop->error = -ENOMEDIUM;
2151                                                 break;
2152                                         }
2153
2154                                         dw_mci_request_end(host, mrq);
2155                                 } else {
2156                                         list_del(&slot->queue_node);
2157                                         mrq->cmd->error = -ENOMEDIUM;
2158                                         if (mrq->data)
2159                                                 mrq->data->error = -ENOMEDIUM;
2160                                         if (mrq->stop)
2161                                                 mrq->stop->error = -ENOMEDIUM;
2162                                                 
2163                     MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(mmc));
2164
2165                                         spin_unlock(&host->lock);
2166                                         mmc_request_done(slot->mmc, mrq);
2167                                         spin_lock(&host->lock);
2168                                 }
2169                         }
2170
2171                         /* Power down slot */
2172                         if (present == 0) {
2173
2174                                 /*
2175                                  * Clear down the FIFO - doing so generates a
2176                                  * block interrupt, hence setting the
2177                                  * scatter-gather pointer to NULL.
2178                                  */
2179                                 sg_miter_stop(&host->sg_miter);
2180                                 host->sg = NULL;
2181
2182                                 ctrl = mci_readl(host, CTRL);
2183                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
2184                                 mci_writel(host, CTRL, ctrl);
2185
2186 #ifdef CONFIG_MMC_DW_IDMAC
2187                                 dw_mci_idmac_reset(host);
2188 #endif
2189
2190                         }
2191
2192                         spin_unlock_bh(&host->lock);
2193
2194                         present = dw_mci_get_cd(mmc);
2195                 }
2196
2197                 mmc_detect_change(slot->mmc,
2198                         msecs_to_jiffies(host->pdata->detect_delay_ms));
2199         }
2200 }
2201
2202 #ifdef CONFIG_OF
2203 /* given a slot id, find out the device node representing that slot */
2204 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2205 {
2206         struct device_node *np;
2207         const __be32 *addr;
2208         int len;
2209
2210         if (!dev || !dev->of_node)
2211                 return NULL;
2212
2213         for_each_child_of_node(dev->of_node, np) {
2214                 addr = of_get_property(np, "reg", &len);
2215                 if (!addr || (len < sizeof(int)))
2216                         continue;
2217                 if (be32_to_cpup(addr) == slot)
2218                         return np;
2219         }
2220         return NULL;
2221 }
2222
2223 static struct dw_mci_of_slot_quirks {
2224         char *quirk;
2225         int id;
2226 } of_slot_quirks[] = {
2227         {
2228                 .quirk  = "disable-wp",
2229                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2230         },
2231 };
2232
2233 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2234 {
2235         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2236         int quirks = 0;
2237         int idx;
2238
2239         /* get quirks */
2240         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2241                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2242                         quirks |= of_slot_quirks[idx].id;
2243
2244         return quirks;
2245 }
2246
2247 /* find out bus-width for a given slot */
2248 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2249 {
2250         struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2251         u32 bus_wd = 1;
2252
2253         if (!np)
2254                 return 1;
2255
2256         if (of_property_read_u32(np, "bus-width", &bus_wd))
2257                 dev_err(dev, "bus-width property not found, assuming width"
2258                                " as 1\n");
2259         return bus_wd;
2260 }
2261
2262
2263 /* find the pwr-en gpio for a given slot; or -1 if none specified */
2264 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
2265 {
2266         struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2267         int gpio;
2268
2269         if (!np)
2270                 return -EINVAL;
2271
2272         gpio = of_get_named_gpio(np, "pwr-gpios", 0);
2273
2274         /* Having a missing entry is valid; return silently */
2275         if (!gpio_is_valid(gpio))
2276                 return -EINVAL;
2277
2278         if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
2279                 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2280                 return -EINVAL;
2281         }
2282
2283     gpio_direction_output(gpio, 0);//set 0 to pwr-en
2284
2285         return gpio;
2286 }
2287
2288
2289 /* find the write protect gpio for a given slot; or -1 if none specified */
2290 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2291 {
2292         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2293         int gpio;
2294
2295         if (!np)
2296                 return -EINVAL;
2297
2298         gpio = of_get_named_gpio(np, "wp-gpios", 0);
2299
2300         /* Having a missing entry is valid; return silently */
2301         if (!gpio_is_valid(gpio))
2302                 return -EINVAL;
2303
2304         if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2305                 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2306                 return -EINVAL;
2307         }
2308
2309         return gpio;
2310 }
2311
2312 /* find the cd gpio for a given slot */
2313 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2314                                         struct mmc_host *mmc)
2315 {
2316         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2317         int gpio;
2318
2319         if (!np)
2320                 return;
2321
2322         gpio = of_get_named_gpio(np, "cd-gpios", 0);
2323
2324         /* Having a missing entry is valid; return silently */
2325         if (!gpio_is_valid(gpio))
2326                 return;
2327
2328         if (mmc_gpio_request_cd(mmc, gpio, 0))
2329                 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2330 }
2331 #else /* CONFIG_OF */
2332 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2333 {
2334         return 0;
2335 }
2336 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2337 {
2338         return 1;
2339 }
2340 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2341 {
2342         return NULL;
2343 }
2344 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2345 {
2346         return -EINVAL;
2347 }
2348 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2349                                         struct mmc_host *mmc)
2350 {
2351         return;
2352 }
2353 #endif /* CONFIG_OF */
2354
2355 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2356 {
2357         struct mmc_host *mmc;
2358         struct dw_mci_slot *slot;
2359         const struct dw_mci_drv_data *drv_data = host->drv_data;
2360         int ctrl_id, ret;
2361         u32 freq[2];
2362         u8 bus_width;
2363
2364         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2365         if (!mmc)
2366                 return -ENOMEM;
2367
2368         slot = mmc_priv(mmc);
2369         slot->id = id;
2370         slot->mmc = mmc;
2371         slot->host = host;
2372         host->slot[id] = slot;
2373         host->mmc = mmc;
2374
2375         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2376
2377         mmc->ops = &dw_mci_ops;
2378 #if 0
2379     mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
2380     mmc->f_max = host->bus_hz;
2381     printk("%d..%s: fmin=%d, fmax=%d, bus_hz=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max, host->bus_hz);    
2382 #else
2383         if (of_property_read_u32_array(host->dev->of_node,
2384                                        "clock-freq-min-max", freq, 2)) {
2385                 mmc->f_min = DW_MCI_FREQ_MIN;
2386                 mmc->f_max = DW_MCI_FREQ_MAX;
2387                 
2388         printk("%d..%s: fmin=%d, fmax=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max);    
2389         } else {
2390                 mmc->f_min = freq[0];
2391                 mmc->f_max = freq[1];
2392                 
2393         printk("%d..%s: fmin=%d, fmax=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max);    
2394         }
2395 #endif
2396
2397         if (of_find_property(host->dev->of_node, "supports-sd", NULL))
2398                 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;    
2399         if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
2400                 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;  
2401         if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
2402                 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
2403
2404         if (host->pdata->get_ocr)
2405                 mmc->ocr_avail = host->pdata->get_ocr(id);
2406         else{
2407                 //mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2408                 mmc->ocr_avail = MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|MMC_VDD_30_31
2409                      | MMC_VDD_31_32|MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_34_35| MMC_VDD_35_36;
2410         
2411         mmc->ocr_avail |= MMC_VDD_26_27 |MMC_VDD_25_26 |MMC_VDD_24_25 |MMC_VDD_23_24
2412                          |MMC_VDD_22_23 |MMC_VDD_21_22 |MMC_VDD_20_21 |MMC_VDD_165_195;
2413         }
2414
2415         /*
2416          * Start with slot power disabled, it will be enabled when a card
2417          * is detected.
2418          */
2419         if (host->pdata->setpower)
2420                 host->pdata->setpower(id, 0);
2421
2422         if (host->pdata->caps)
2423                 mmc->caps = host->pdata->caps;
2424
2425         if (host->pdata->pm_caps)
2426                 mmc->pm_caps = host->pdata->pm_caps;
2427
2428         if (host->dev->of_node) {
2429                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2430                 if (ctrl_id < 0)
2431                         ctrl_id = 0;
2432         } else {
2433                 ctrl_id = to_platform_device(host->dev)->id;
2434         }
2435         if (drv_data && drv_data->caps)
2436                 mmc->caps |= drv_data->caps[ctrl_id];
2437         if (drv_data && drv_data->hold_reg_flag)
2438                 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];         
2439
2440         if (host->pdata->caps2)
2441                 mmc->caps2 = host->pdata->caps2;
2442
2443         if (host->pdata->get_bus_wd)
2444                 bus_width = host->pdata->get_bus_wd(slot->id);
2445         else if (host->dev->of_node)
2446                 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2447         else
2448                 bus_width = 1;
2449
2450         switch (bus_width) {
2451         case 8:
2452                 mmc->caps |= MMC_CAP_8_BIT_DATA;
2453         case 4:
2454                 mmc->caps |= MMC_CAP_4_BIT_DATA;
2455         }
2456         if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
2457                 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
2458         if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
2459                 mmc->caps |= MMC_CAP_SDIO_IRQ;
2460         if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
2461                 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
2462         if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
2463                 mmc->pm_caps |= MMC_PM_KEEP_POWER;
2464         if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
2465                 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2466
2467         if (host->pdata->blk_settings) {
2468                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2469                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2470                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2471                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2472                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2473         } else {
2474                 /* Useful defaults if platform data is unset. */
2475 #ifdef CONFIG_MMC_DW_IDMAC
2476                 mmc->max_segs = host->ring_size;
2477                 mmc->max_blk_size = 65536;
2478                 mmc->max_blk_count = host->ring_size;
2479                 mmc->max_seg_size = 0x1000;
2480                 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2481 #else
2482                 mmc->max_segs = 64;
2483                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2484                 mmc->max_blk_count = 512;
2485                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2486                 mmc->max_seg_size = mmc->max_req_size;
2487 #endif /* CONFIG_MMC_DW_IDMAC */
2488         }
2489     //pwr_en   
2490     slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
2491
2492 if (gpio_is_valid(slot->pwr_en_gpio))
2493 {
2494     host->vmmc = NULL;
2495 }else{
2496
2497         host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
2498         if (IS_ERR(host->vmmc)) {
2499                 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
2500                 host->vmmc = NULL;
2501         } else {
2502                 ret = regulator_enable(host->vmmc);
2503                 if (ret) {
2504                         dev_err(host->dev,
2505                                 "failed to enable regulator: %d\n", ret);
2506                         goto err_setup_bus;
2507                 }
2508         }
2509 }
2510         slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2511         dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2512
2513         ret = mmc_add_host(mmc);
2514         if (ret)
2515                 goto err_setup_bus;
2516
2517 #if defined(CONFIG_DEBUG_FS)
2518         dw_mci_init_debugfs(slot);
2519 #endif
2520
2521         /* Card initially undetected */
2522         slot->last_detect_state = 0;
2523
2524         return 0;
2525
2526 err_setup_bus:
2527         mmc_free_host(mmc);
2528         return -EINVAL;
2529 }
2530
2531 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2532 {
2533         /* Shutdown detect IRQ */
2534         if (slot->host->pdata->exit)
2535                 slot->host->pdata->exit(id);
2536
2537         /* Debugfs stuff is cleaned up by mmc core */
2538         mmc_remove_host(slot->mmc);
2539         slot->host->slot[id] = NULL;
2540         mmc_free_host(slot->mmc);
2541 }
2542
2543 static void dw_mci_init_dma(struct dw_mci *host)
2544 {
2545         /* Alloc memory for sg translation */
2546         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2547                                           &host->sg_dma, GFP_KERNEL);
2548         if (!host->sg_cpu) {
2549                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2550                         __func__);
2551                 goto no_dma;
2552         }
2553
2554         /* Determine which DMA interface to use */
2555 #ifdef CONFIG_MMC_DW_IDMAC
2556         host->dma_ops = &dw_mci_idmac_ops;
2557         dev_info(host->dev, "Using internal DMA controller.\n");
2558 #endif
2559
2560         if (!host->dma_ops)
2561                 goto no_dma;
2562
2563         if (host->dma_ops->init && host->dma_ops->start &&
2564             host->dma_ops->stop && host->dma_ops->cleanup) {
2565                 if (host->dma_ops->init(host)) {
2566                         dev_err(host->dev, "%s: Unable to initialize "
2567                                 "DMA Controller.\n", __func__);
2568                         goto no_dma;
2569                 }
2570         } else {
2571                 dev_err(host->dev, "DMA initialization not found.\n");
2572                 goto no_dma;
2573         }
2574
2575         host->use_dma = 1;
2576         return;
2577
2578 no_dma:
2579         dev_info(host->dev, "Using PIO mode.\n");
2580         host->use_dma = 0;
2581         return;
2582 }
2583
2584 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2585 {
2586         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2587         unsigned int ctrl;
2588
2589         mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2590                                 SDMMC_CTRL_DMA_RESET));
2591
2592         /* wait till resets clear */
2593         do {
2594                 ctrl = mci_readl(host, CTRL);
2595                 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2596                               SDMMC_CTRL_DMA_RESET)))
2597                         return true;
2598         } while (time_before(jiffies, timeout));
2599
2600         dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2601
2602         return false;
2603 }
2604
2605 #ifdef CONFIG_OF
2606 static struct dw_mci_of_quirks {
2607         char *quirk;
2608         int id;
2609 } of_quirks[] = {
2610         {
2611                 .quirk  = "broken-cd",
2612                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2613         },
2614 };
2615
2616 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2617 {
2618         struct dw_mci_board *pdata;
2619         struct device *dev = host->dev;
2620         struct device_node *np = dev->of_node;
2621         const struct dw_mci_drv_data *drv_data = host->drv_data;
2622         int idx, ret;
2623         u32 clock_frequency;
2624
2625         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2626         if (!pdata) {
2627                 dev_err(dev, "could not allocate memory for pdata\n");
2628                 return ERR_PTR(-ENOMEM);
2629         }
2630
2631         /* find out number of slots supported */
2632         if (of_property_read_u32(dev->of_node, "num-slots",
2633                                 &pdata->num_slots)) {
2634                 dev_info(dev, "num-slots property not found, "
2635                                 "assuming 1 slot is available\n");
2636                 pdata->num_slots = 1;
2637         }
2638
2639         /* get quirks */
2640         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2641                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2642                         pdata->quirks |= of_quirks[idx].id;
2643
2644         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2645                 dev_info(dev, "fifo-depth property not found, using "
2646                                 "value of FIFOTH register as default\n");
2647
2648         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2649
2650         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2651                 pdata->bus_hz = clock_frequency;
2652
2653         if (drv_data && drv_data->parse_dt) {
2654                 ret = drv_data->parse_dt(host);
2655                 if (ret)
2656                         return ERR_PTR(ret);
2657         }
2658
2659         if (of_find_property(np, "keep-power-in-suspend", NULL))
2660                 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2661                 
2662
2663
2664         if (of_find_property(np, "enable-sdio-wakeup", NULL))
2665                 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2666
2667         if (of_find_property(np, "supports-highspeed", NULL))
2668                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2669
2670         if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2671                 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2672
2673         if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2674                 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2675
2676         if (of_get_property(np, "cd-inverted", NULL))
2677                 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2678         if (of_get_property(np, "bootpart-no-access", NULL))
2679                 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;        
2680
2681         return pdata;
2682 }
2683
2684 #else /* CONFIG_OF */
2685 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2686 {
2687         return ERR_PTR(-EINVAL);
2688 }
2689 #endif /* CONFIG_OF */
2690
2691 int dw_mci_probe(struct dw_mci *host)
2692 {
2693         const struct dw_mci_drv_data *drv_data = host->drv_data;
2694         int width, i, ret = 0;
2695         u32 fifo_size;
2696         int init_slots = 0;
2697
2698         if (!host->pdata) {
2699                 host->pdata = dw_mci_parse_dt(host);
2700                 if (IS_ERR(host->pdata)) {
2701                         dev_err(host->dev, "platform data not available\n");
2702                         return -EINVAL;
2703                 }
2704         }
2705
2706         if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2707                 dev_err(host->dev,
2708                         "Platform data must supply select_slot function\n");
2709                 return -ENODEV;
2710         }
2711
2712         host->biu_clk = devm_clk_get(host->dev, "biu");
2713         if (IS_ERR(host->biu_clk)) {
2714                 dev_dbg(host->dev, "biu clock not available\n");
2715         } else {
2716                 ret = clk_prepare_enable(host->biu_clk);
2717                 if (ret) {
2718                         dev_err(host->dev, "failed to enable biu clock\n");
2719                         return ret;
2720                 }
2721         }
2722
2723         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2724         if (IS_ERR(host->ciu_clk)) {
2725                 dev_dbg(host->dev, "ciu clock not available\n");
2726                 host->bus_hz = host->pdata->bus_hz;
2727         } else {
2728                 ret = clk_prepare_enable(host->ciu_clk);
2729                 if (ret) {
2730                         dev_err(host->dev, "failed to enable ciu clock\n");
2731                         goto err_clk_biu;
2732                 }
2733         }
2734
2735 #if 1
2736     //test, modify by xbw
2737     host->bus_hz = 50000000;
2738 #else
2739         if (drv_data && drv_data->init) {
2740                 ret = drv_data->init(host);
2741                 if (ret) {
2742                         dev_err(host->dev,
2743                                 "implementation specific init failed\n");
2744                         goto err_clk_ciu;
2745                 }
2746                 host->bus_hz = clk_get_rate(host->ciu_clk);
2747 #endif
2748         if (drv_data && drv_data->setup_clock) {
2749                 ret = drv_data->setup_clock(host);
2750                 if (ret) {
2751                         dev_err(host->dev,
2752                                 "implementation specific clock setup failed\n");
2753                         goto err_clk_ciu;
2754                 }
2755         }
2756
2757         if (!host->bus_hz) {
2758                 dev_err(host->dev,
2759                         "Platform data must supply bus speed\n");
2760                 ret = -ENODEV;
2761                 goto err_clk_ciu;
2762         }
2763
2764         host->quirks = host->pdata->quirks;
2765
2766         spin_lock_init(&host->lock);
2767         INIT_LIST_HEAD(&host->queue);
2768
2769         /*
2770          * Get the host data width - this assumes that HCON has been set with
2771          * the correct values.
2772          */
2773         i = (mci_readl(host, HCON) >> 7) & 0x7;
2774         if (!i) {
2775                 host->push_data = dw_mci_push_data16;
2776                 host->pull_data = dw_mci_pull_data16;
2777                 width = 16;
2778                 host->data_shift = 1;
2779         } else if (i == 2) {
2780                 host->push_data = dw_mci_push_data64;
2781                 host->pull_data = dw_mci_pull_data64;
2782                 width = 64;
2783                 host->data_shift = 3;
2784         } else {
2785                 /* Check for a reserved value, and warn if it is */
2786                 WARN((i != 1),
2787                      "HCON reports a reserved host data width!\n"
2788                      "Defaulting to 32-bit access.\n");
2789                 host->push_data = dw_mci_push_data32;
2790                 host->pull_data = dw_mci_pull_data32;
2791                 width = 32;
2792                 host->data_shift = 2;
2793         }
2794
2795         /* Reset all blocks */
2796         if (!mci_wait_reset(host->dev, host))
2797                 return -ENODEV;
2798
2799         host->dma_ops = host->pdata->dma_ops;
2800         dw_mci_init_dma(host);
2801
2802         /* Clear the interrupts for the host controller */
2803         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2804         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2805
2806         /* Put in max timeout */
2807         mci_writel(host, TMOUT, 0xFFFFFFFF);
2808
2809         /*
2810          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2811          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2812          */
2813         if (!host->pdata->fifo_depth) {
2814                 /*
2815                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2816                  * have been overwritten by the bootloader, just like we're
2817                  * about to do, so if you know the value for your hardware, you
2818                  * should put it in the platform data.
2819                  */
2820                 fifo_size = mci_readl(host, FIFOTH);
2821                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2822         } else {
2823                 fifo_size = host->pdata->fifo_depth;
2824         }
2825         host->fifo_depth = fifo_size;
2826         host->fifoth_val =
2827                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2828         mci_writel(host, FIFOTH, host->fifoth_val);
2829
2830         /* disable clock to CIU */
2831         mci_writel(host, CLKENA, 0);
2832         mci_writel(host, CLKSRC, 0);
2833
2834         /*
2835          * In 2.40a spec, Data offset is changed.
2836          * Need to check the version-id and set data-offset for DATA register.
2837          */
2838         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2839         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2840
2841         if (host->verid < DW_MMC_240A)
2842                 host->data_offset = DATA_OFFSET;
2843         else
2844                 host->data_offset = DATA_240A_OFFSET;
2845
2846         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2847         host->card_workqueue = alloc_workqueue("dw-mci-card",
2848                         WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2849         if (!host->card_workqueue) {
2850                 ret = -ENOMEM;
2851                 goto err_dmaunmap;
2852         }
2853         INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2854         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2855                                host->irq_flags, "dw-mci", host);
2856         if (ret)
2857                 goto err_workqueue;
2858
2859         if (host->pdata->num_slots)
2860                 host->num_slots = host->pdata->num_slots;
2861         else
2862                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2863
2864         /*
2865          * Enable interrupts for command done, data over, data empty, card det,
2866          * receive ready and error such as transmit, receive timeout, crc error
2867          */
2868         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2869         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2870                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2871                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2872         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2873
2874         dev_info(host->dev, "DW MMC controller at irq %d, "
2875                  "%d bit host data width, "
2876                  "%u deep fifo\n",
2877                  host->irq, width, fifo_size);
2878
2879         /* We need at least one slot to succeed */
2880         for (i = 0; i < host->num_slots; i++) {
2881                 ret = dw_mci_init_slot(host, i);
2882                 if (ret)
2883                         dev_dbg(host->dev, "slot %d init failed\n", i);
2884                 else
2885                         init_slots++;
2886         }
2887
2888         if (init_slots) {
2889                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2890         } else {
2891                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2892                                         "but failed on all\n", host->num_slots);
2893                 goto err_workqueue;
2894         }
2895
2896
2897         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2898                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2899
2900         return 0;
2901
2902 err_workqueue:
2903         destroy_workqueue(host->card_workqueue);
2904
2905 err_dmaunmap:
2906         if (host->use_dma && host->dma_ops->exit)
2907                 host->dma_ops->exit(host);
2908
2909         if (host->vmmc)
2910                 regulator_disable(host->vmmc);
2911
2912 err_clk_ciu:
2913         if (!IS_ERR(host->ciu_clk))
2914                 clk_disable_unprepare(host->ciu_clk);
2915
2916 err_clk_biu:
2917         if (!IS_ERR(host->biu_clk))
2918                 clk_disable_unprepare(host->biu_clk);
2919
2920         return ret;
2921 }
2922 EXPORT_SYMBOL(dw_mci_probe);
2923
2924 void dw_mci_remove(struct dw_mci *host)
2925 {
2926         int i;
2927
2928         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2929         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2930
2931         for (i = 0; i < host->num_slots; i++) {
2932                 dev_dbg(host->dev, "remove slot %d\n", i);
2933                 if (host->slot[i])
2934                         dw_mci_cleanup_slot(host->slot[i], i);
2935         }
2936
2937         /* disable clock to CIU */
2938         mci_writel(host, CLKENA, 0);
2939         mci_writel(host, CLKSRC, 0);
2940
2941         destroy_workqueue(host->card_workqueue);
2942
2943         if (host->use_dma && host->dma_ops->exit)
2944                 host->dma_ops->exit(host);
2945
2946         if (host->vmmc)
2947                 regulator_disable(host->vmmc);
2948
2949         if (!IS_ERR(host->ciu_clk))
2950                 clk_disable_unprepare(host->ciu_clk);
2951
2952         if (!IS_ERR(host->biu_clk))
2953                 clk_disable_unprepare(host->biu_clk);
2954 }
2955 EXPORT_SYMBOL(dw_mci_remove);
2956
2957
2958
2959 #ifdef CONFIG_PM_SLEEP
2960 /*
2961  * TODO: we should probably disable the clock to the card in the suspend path.
2962  */
2963 int dw_mci_suspend(struct dw_mci *host)
2964 {
2965         /*int i, ret = 0;
2966
2967         for (i = 0; i < host->num_slots; i++) {
2968                 struct dw_mci_slot *slot = host->slot[i];
2969                 if (!slot)
2970                         continue;
2971                 ret = mmc_suspend_host(slot->mmc);
2972                 if (ret < 0) {
2973                         while (--i >= 0) {
2974                                 slot = host->slot[i];
2975                                 if (slot)
2976                                         mmc_resume_host(host->slot[i]->mmc);
2977                         }
2978                         return ret;
2979                 }
2980         }
2981         */
2982         if (host->vmmc)
2983                 regulator_disable(host->vmmc);
2984
2985         return 0;
2986 }
2987 EXPORT_SYMBOL(dw_mci_suspend);
2988
2989 int dw_mci_resume(struct dw_mci *host)
2990 {
2991         int i, ret;
2992
2993         if (host->vmmc) {
2994                 ret = regulator_enable(host->vmmc);
2995                 if (ret) {
2996                         dev_err(host->dev,
2997                                 "failed to enable regulator: %d\n", ret);
2998                         return ret;
2999                 }
3000         }
3001
3002         if (!mci_wait_reset(host->dev, host)) {
3003                 ret = -ENODEV;
3004                 return ret;
3005         }
3006
3007         if (host->use_dma && host->dma_ops->init)
3008                 host->dma_ops->init(host);
3009
3010         /*
3011          * Restore the initial value at FIFOTH register
3012          * And Invalidate the prev_blksz with zero
3013          */
3014         mci_writel(host, FIFOTH, host->fifoth_val);
3015         host->prev_blksz = 0;
3016         /* Put in max timeout */
3017         mci_writel(host, TMOUT, 0xFFFFFFFF);
3018
3019         mci_writel(host, RINTSTS, 0xFFFFFFFF);
3020         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3021                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3022                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
3023         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3024
3025         for (i = 0; i < host->num_slots; i++) {
3026                 struct dw_mci_slot *slot = host->slot[i];
3027                 if (!slot)
3028                         continue;
3029                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3030                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3031                         dw_mci_setup_bus(slot, true);
3032                 }
3033
3034         //      ret = mmc_resume_host(host->slot[i]->mmc);
3035         //      if (ret < 0)
3036         //              return ret;
3037         }
3038         return 0;
3039 }
3040 EXPORT_SYMBOL(dw_mci_resume);
3041 #endif /* CONFIG_PM_SLEEP */
3042
3043 static int __init dw_mci_init(void)
3044 {
3045         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3046         return 0;
3047 }
3048
3049 static void __exit dw_mci_exit(void)
3050 {
3051 }
3052
3053 module_init(dw_mci_init);
3054 module_exit(dw_mci_exit);
3055
3056 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3057
3058 MODULE_AUTHOR("NXP Semiconductor VietNam");
3059 MODULE_AUTHOR("Imagination Technologies Ltd");
3060 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
3061
3062 MODULE_LICENSE("GPL v2");