4d792a609b8aec20ea1897aa73139bebc04c8af6
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / host / rk_sdmmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  */
15
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/delay.h>
31 #include <linux/irq.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/rk_mmc.h>
36 #include <linux/bitops.h>
37 #include <linux/regulator/consumer.h>
38 #include <linux/workqueue.h>
39 #include <linux/of.h>
40 #include <linux/of_gpio.h>
41 #include <linux/mmc/slot-gpio.h>
42
43 #include "rk_sdmmc.h"
44 #include "rk_sdmmc_of.h"
45
46 /* Common flag combinations */
47 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
48                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
49                                  SDMMC_INT_EBE)
50 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
51                                  SDMMC_INT_RESP_ERR)
52 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
53                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
54 #define DW_MCI_SEND_STATUS      1
55 #define DW_MCI_RECV_STATUS      2
56 #define DW_MCI_DMA_THRESHOLD    16
57
58 #define DW_MCI_FREQ_MAX 50000000//200000000     /* unit: HZ */
59 #define DW_MCI_FREQ_MIN 300000//400000          /* unit: HZ */
60
61 #define SDMMC_DATA_TIMEOUT_SD   500; /*max is 250ms refer to Spec; Maybe adapt the value to the sick card.*/
62 #define SDMMC_DATA_TIMEOUT_SDIO 250
63 #define SDMMC_DATA_TIMEOUT_EMMC 2500
64
65 #ifdef CONFIG_MMC_DW_IDMAC
66 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
67                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
68                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
69                                  SDMMC_IDMAC_INT_TI)
70
71 struct idmac_desc {
72         u32             des0;   /* Control Descriptor */
73 #define IDMAC_DES0_DIC  BIT(1)
74 #define IDMAC_DES0_LD   BIT(2)
75 #define IDMAC_DES0_FD   BIT(3)
76 #define IDMAC_DES0_CH   BIT(4)
77 #define IDMAC_DES0_ER   BIT(5)
78 #define IDMAC_DES0_CES  BIT(30)
79 #define IDMAC_DES0_OWN  BIT(31)
80
81         u32             des1;   /* Buffer sizes */
82 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
83         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
84
85         u32             des2;   /* buffer 1 physical address */
86
87         u32             des3;   /* buffer 2 physical address */
88 };
89 #endif /* CONFIG_MMC_DW_IDMAC */
90
91 static const u8 tuning_blk_pattern_4bit[] = {
92         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
93         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
94         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
95         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
96         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
97         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
98         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
99         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
100 };
101
102 static const u8 tuning_blk_pattern_8bit[] = {
103         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
104         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
105         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
106         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
107         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
108         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
109         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
110         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
111         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
112         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
113         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
114         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
115         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
116         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
117         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
118         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
119 };
120
121 /*printk the all register of current host*/
122 static int dw_mci_regs_printk(struct dw_mci *host)
123 {
124     struct sdmmc_reg *regs = dw_mci_regs;
125
126     while( regs->name != 0 ){
127         printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
128         regs++;
129     }
130     printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
131     return 0;
132 }
133
134
135 #if defined(CONFIG_DEBUG_FS)
136 static int dw_mci_req_show(struct seq_file *s, void *v)
137 {
138         struct dw_mci_slot *slot = s->private;
139         struct mmc_request *mrq;
140         struct mmc_command *cmd;
141         struct mmc_command *stop;
142         struct mmc_data *data;
143
144         /* Make sure we get a consistent snapshot */
145         spin_lock_bh(&slot->host->lock);
146         mrq = slot->mrq;
147
148         if (mrq) {
149                 cmd = mrq->cmd;
150                 data = mrq->data;
151                 stop = mrq->stop;
152
153                 if (cmd)
154                         seq_printf(s,
155                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
156                                    cmd->opcode, cmd->arg, cmd->flags,
157                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
158                                    cmd->resp[2], cmd->error);
159                 if (data)
160                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
161                                    data->bytes_xfered, data->blocks,
162                                    data->blksz, data->flags, data->error);
163                 if (stop)
164                         seq_printf(s,
165                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
166                                    stop->opcode, stop->arg, stop->flags,
167                                    stop->resp[0], stop->resp[1], stop->resp[2],
168                                    stop->resp[2], stop->error);
169         }
170
171         spin_unlock_bh(&slot->host->lock);
172
173         return 0;
174 }
175
176 static int dw_mci_req_open(struct inode *inode, struct file *file)
177 {
178         return single_open(file, dw_mci_req_show, inode->i_private);
179 }
180
181 static const struct file_operations dw_mci_req_fops = {
182         .owner          = THIS_MODULE,
183         .open           = dw_mci_req_open,
184         .read           = seq_read,
185         .llseek         = seq_lseek,
186         .release        = single_release,
187 };
188
189 static int dw_mci_regs_show(struct seq_file *s, void *v)
190 {
191         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
192         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
193         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
194         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
195         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
196         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
197
198         return 0;
199 }
200
201 static int dw_mci_regs_open(struct inode *inode, struct file *file)
202 {
203         return single_open(file, dw_mci_regs_show, inode->i_private);
204 }
205
206 static const struct file_operations dw_mci_regs_fops = {
207         .owner          = THIS_MODULE,
208         .open           = dw_mci_regs_open,
209         .read           = seq_read,
210         .llseek         = seq_lseek,
211         .release        = single_release,
212 };
213
214 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
215 {
216         struct mmc_host *mmc = slot->mmc;
217         struct dw_mci *host = slot->host;
218         struct dentry *root;
219         struct dentry *node;
220
221         root = mmc->debugfs_root;
222         if (!root)
223                 return;
224
225         node = debugfs_create_file("regs", S_IRUSR, root, host,
226                                    &dw_mci_regs_fops);
227         if (!node)
228                 goto err;
229
230         node = debugfs_create_file("req", S_IRUSR, root, slot,
231                                    &dw_mci_req_fops);
232         if (!node)
233                 goto err;
234
235         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
236         if (!node)
237                 goto err;
238
239         node = debugfs_create_x32("pending_events", S_IRUSR, root,
240                                   (u32 *)&host->pending_events);
241         if (!node)
242                 goto err;
243
244         node = debugfs_create_x32("completed_events", S_IRUSR, root,
245                                   (u32 *)&host->completed_events);
246         if (!node)
247                 goto err;
248
249         return;
250
251 err:
252         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
253 }
254 #endif /* defined(CONFIG_DEBUG_FS) */
255
256 static void dw_mci_set_timeout(struct dw_mci *host)
257 {
258         /* timeout (maximum) */
259         mci_writel(host, TMOUT, 0xffffffff);
260 }
261
262 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
263 {
264         struct mmc_data *data;
265         struct dw_mci_slot *slot = mmc_priv(mmc);
266         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
267         u32 cmdr;
268         cmd->error = -EINPROGRESS;
269
270         cmdr = cmd->opcode;
271
272         if (cmdr == MMC_STOP_TRANSMISSION)
273                 cmdr |= SDMMC_CMD_STOP;
274         else
275                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
276
277         if (cmd->flags & MMC_RSP_PRESENT) {
278                 /* We expect a response, so set this bit */
279                 cmdr |= SDMMC_CMD_RESP_EXP;
280                 if (cmd->flags & MMC_RSP_136)
281                         cmdr |= SDMMC_CMD_RESP_LONG;
282         }
283
284         if (cmd->flags & MMC_RSP_CRC)
285                 cmdr |= SDMMC_CMD_RESP_CRC;
286
287         data = cmd->data;
288         if (data) {
289                 cmdr |= SDMMC_CMD_DAT_EXP;
290                 if (data->flags & MMC_DATA_STREAM)
291                         cmdr |= SDMMC_CMD_STRM_MODE;
292                 if (data->flags & MMC_DATA_WRITE)
293                         cmdr |= SDMMC_CMD_DAT_WR;
294         }
295
296         if (drv_data && drv_data->prepare_command)
297                 drv_data->prepare_command(slot->host, &cmdr);
298
299         return cmdr;
300 }
301
302 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
303 {
304         struct mmc_command *stop;
305         u32 cmdr;
306
307         if (!cmd->data)
308                 return 0;
309
310         stop = &host->stop_abort;
311         cmdr = cmd->opcode;
312         memset(stop, 0, sizeof(struct mmc_command));
313
314         if (cmdr == MMC_READ_SINGLE_BLOCK ||
315             cmdr == MMC_READ_MULTIPLE_BLOCK ||
316             cmdr == MMC_WRITE_BLOCK ||
317             cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
318                 stop->opcode = MMC_STOP_TRANSMISSION;
319                 stop->arg = 0;
320                 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
321         } else if (cmdr == SD_IO_RW_EXTENDED) {
322                 stop->opcode = SD_IO_RW_DIRECT;
323                 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
324                              ((cmd->arg >> 28) & 0x7);
325                 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
326         } else {
327                 return 0;
328         }
329
330         cmdr = stop->opcode | SDMMC_CMD_STOP |
331                 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
332
333         return cmdr;
334 }
335
336 static void dw_mci_start_command(struct dw_mci *host,
337                                  struct mmc_command *cmd, u32 cmd_flags)
338 {
339         host->cmd = cmd;
340         dev_vdbg(host->dev,
341                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
342                  cmd->arg, cmd_flags);
343
344         mci_writel(host, CMDARG, cmd->arg);
345         wmb();
346     MMC_DBG_INFO_FUNC(host->mmc,"%d..%s start cmd=%d, arg=0x%x[%s]",__LINE__, __FUNCTION__,cmd->opcode, cmd->arg,mmc_hostname(host->mmc));
347     //dw_mci_regs_printk(host);
348
349     if(host->mmc->hold_reg_flag)
350         cmd_flags |= SDMMC_CMD_USE_HOLD_REG;//fix the value to 1 in some Soc,for example RK3188.
351         
352         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
353 }
354
355 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
356 {
357         dw_mci_start_command(host, data->stop, host->stop_cmdr);
358 }
359
360 /* DMA interface functions */
361 static void dw_mci_stop_dma(struct dw_mci *host)
362 {
363         if (host->using_dma) {
364                 host->dma_ops->stop(host);
365                 host->dma_ops->cleanup(host);
366         }
367
368         /* Data transfer was stopped by the interrupt handler */
369         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
370 }
371
372 static int dw_mci_get_dma_dir(struct mmc_data *data)
373 {
374         if (data->flags & MMC_DATA_WRITE)
375                 return DMA_TO_DEVICE;
376         else
377                 return DMA_FROM_DEVICE;
378 }
379
380 #ifdef CONFIG_MMC_DW_IDMAC
381 static void dw_mci_dma_cleanup(struct dw_mci *host)
382 {
383         struct mmc_data *data = host->data;
384
385         if (data)
386                 if (!data->host_cookie)
387                         dma_unmap_sg(host->dev,
388                                      data->sg,
389                                      data->sg_len,
390                                      dw_mci_get_dma_dir(data));
391 }
392
393 static void dw_mci_idmac_reset(struct dw_mci *host)
394 {
395         u32 bmod = mci_readl(host, BMOD);
396         /* Software reset of DMA */
397         bmod |= SDMMC_IDMAC_SWRESET;
398         mci_writel(host, BMOD, bmod);
399 }
400
401 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
402 {
403         u32 temp;
404
405         /* Disable and reset the IDMAC interface */
406         temp = mci_readl(host, CTRL);
407         temp &= ~SDMMC_CTRL_USE_IDMAC;
408         temp |= SDMMC_CTRL_DMA_RESET;
409         mci_writel(host, CTRL, temp);
410
411         /* Stop the IDMAC running */
412         temp = mci_readl(host, BMOD);
413         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
414         temp |= SDMMC_IDMAC_SWRESET;
415         mci_writel(host, BMOD, temp);
416 }
417
418 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
419 {
420         struct mmc_data *data = host->data;
421
422         dev_vdbg(host->dev, "DMA complete\n");
423
424     /*
425     MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
426         host->mrq->cmd->opcode,host->mrq->cmd->arg,data->blocks,data->blksz,mmc_hostname(host->mmc));
427     */
428     
429         host->dma_ops->cleanup(host);
430
431         /*
432          * If the card was removed, data will be NULL. No point in trying to
433          * send the stop command or waiting for NBUSY in this case.
434          */
435         if (data) {
436                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
437                 tasklet_schedule(&host->tasklet);
438         }
439 }
440
441 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
442                                     unsigned int sg_len)
443 {
444         int i;
445         struct idmac_desc *desc = host->sg_cpu;
446
447         for (i = 0; i < sg_len; i++, desc++) {
448                 unsigned int length = sg_dma_len(&data->sg[i]);
449                 u32 mem_addr = sg_dma_address(&data->sg[i]);
450
451                 /* Set the OWN bit and disable interrupts for this descriptor */
452                 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
453
454                 /* Buffer length */
455                 IDMAC_SET_BUFFER1_SIZE(desc, length);
456
457                 /* Physical address to DMA to/from */
458                 desc->des2 = mem_addr;
459         }
460
461         /* Set first descriptor */
462         desc = host->sg_cpu;
463         desc->des0 |= IDMAC_DES0_FD;
464
465         /* Set last descriptor */
466         desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
467         desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
468         desc->des0 |= IDMAC_DES0_LD;
469
470         wmb();
471 }
472
473 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
474 {
475         u32 temp;
476
477         dw_mci_translate_sglist(host, host->data, sg_len);
478
479         /* Select IDMAC interface */
480         temp = mci_readl(host, CTRL);
481         temp |= SDMMC_CTRL_USE_IDMAC;
482         mci_writel(host, CTRL, temp);
483
484         wmb();
485
486         /* Enable the IDMAC */
487         temp = mci_readl(host, BMOD);
488         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
489         mci_writel(host, BMOD, temp);
490
491         /* Start it running */
492         mci_writel(host, PLDMND, 1);
493 }
494
495 static int dw_mci_idmac_init(struct dw_mci *host)
496 {
497         struct idmac_desc *p;
498         int i;
499
500         /* Number of descriptors in the ring buffer */
501         host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
502
503         /* Forward link the descriptor list */
504         for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
505                 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
506
507         /* Set the last descriptor as the end-of-ring descriptor */
508         p->des3 = host->sg_dma;
509         p->des0 = IDMAC_DES0_ER;
510
511         dw_mci_idmac_reset(host);
512
513         /* Mask out interrupts - get Tx & Rx complete only */
514         mci_writel(host, IDSTS, IDMAC_INT_CLR);
515         mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
516                    SDMMC_IDMAC_INT_TI);
517
518         /* Set the descriptor base address */
519         mci_writel(host, DBADDR, host->sg_dma);
520         return 0;
521 }
522
523 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
524         .init = dw_mci_idmac_init,
525         .start = dw_mci_idmac_start_dma,
526         .stop = dw_mci_idmac_stop_dma,
527         .complete = dw_mci_idmac_complete_dma,
528         .cleanup = dw_mci_dma_cleanup,
529 };
530 #endif /* CONFIG_MMC_DW_IDMAC */
531
532 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
533                                    struct mmc_data *data,
534                                    bool next)
535 {
536         struct scatterlist *sg;
537         unsigned int i, sg_len;
538
539         if (!next && data->host_cookie)
540                 return data->host_cookie;
541
542         /*
543          * We don't do DMA on "complex" transfers, i.e. with
544          * non-word-aligned buffers or lengths. Also, we don't bother
545          * with all the DMA setup overhead for short transfers.
546          */
547         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
548                 return -EINVAL;
549
550         if (data->blksz & 3)
551                 return -EINVAL;
552
553         for_each_sg(data->sg, sg, data->sg_len, i) {
554                 if (sg->offset & 3 || sg->length & 3)
555                         return -EINVAL;
556         }
557
558         sg_len = dma_map_sg(host->dev,
559                             data->sg,
560                             data->sg_len,
561                             dw_mci_get_dma_dir(data));
562         if (sg_len == 0)
563                 return -EINVAL;
564
565         if (next)
566                 data->host_cookie = sg_len;
567
568         return sg_len;
569 }
570
571 static void dw_mci_pre_req(struct mmc_host *mmc,
572                            struct mmc_request *mrq,
573                            bool is_first_req)
574 {
575         struct dw_mci_slot *slot = mmc_priv(mmc);
576         struct mmc_data *data = mrq->data;
577
578         if (!slot->host->use_dma || !data)
579                 return;
580
581         if (data->host_cookie) {
582                 data->host_cookie = 0;
583                 return;
584         }
585
586         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
587                 data->host_cookie = 0;
588 }
589
590 static void dw_mci_post_req(struct mmc_host *mmc,
591                             struct mmc_request *mrq,
592                             int err)
593 {
594         struct dw_mci_slot *slot = mmc_priv(mmc);
595         struct mmc_data *data = mrq->data;
596
597         if (!slot->host->use_dma || !data)
598                 return;
599
600         if (data->host_cookie)
601                 dma_unmap_sg(slot->host->dev,
602                              data->sg,
603                              data->sg_len,
604                              dw_mci_get_dma_dir(data));
605         data->host_cookie = 0;
606 }
607
608 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
609 {
610 #ifdef CONFIG_MMC_DW_IDMAC
611         unsigned int blksz = data->blksz;
612         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
613         u32 fifo_width = 1 << host->data_shift;
614         u32 blksz_depth = blksz / fifo_width, fifoth_val;
615         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
616         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
617
618         tx_wmark = (host->fifo_depth) / 2;
619         tx_wmark_invers = host->fifo_depth - tx_wmark;
620
621         /*
622          * MSIZE is '1',
623          * if blksz is not a multiple of the FIFO width
624          */
625         if (blksz % fifo_width) {
626                 msize = 0;
627                 rx_wmark = 1;
628                 goto done;
629         }
630
631         do {
632                 if (!((blksz_depth % mszs[idx]) ||
633                      (tx_wmark_invers % mszs[idx]))) {
634                         msize = idx;
635                         rx_wmark = mszs[idx] - 1;
636                         break;
637                 }
638         } while (--idx > 0);
639         /*
640          * If idx is '0', it won't be tried
641          * Thus, initial values are uesed
642          */
643 done:
644         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
645         mci_writel(host, FIFOTH, fifoth_val);
646 #endif
647 }
648
649 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
650 {
651         unsigned int blksz = data->blksz;
652         u32 blksz_depth, fifo_depth;
653         u16 thld_size;
654
655         WARN_ON(!(data->flags & MMC_DATA_READ));
656
657         if (host->timing != MMC_TIMING_MMC_HS200 &&
658             host->timing != MMC_TIMING_UHS_SDR104)
659                 goto disable;
660
661         blksz_depth = blksz / (1 << host->data_shift);
662         fifo_depth = host->fifo_depth;
663
664         if (blksz_depth > fifo_depth)
665                 goto disable;
666
667         /*
668          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
669          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
670          * Currently just choose blksz.
671          */
672         thld_size = blksz;
673         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
674         return;
675
676 disable:
677         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
678 }
679
680 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
681 {
682         int sg_len;
683         u32 temp;
684
685         host->using_dma = 0;
686
687         /* If we don't have a channel, we can't do DMA */
688         if (!host->use_dma)
689                 return -ENODEV;
690
691         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
692         if (sg_len < 0) {
693                 host->dma_ops->stop(host);
694                 return sg_len;
695         }
696
697         host->using_dma = 1;
698
699         dev_vdbg(host->dev,
700                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
701                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
702                  sg_len);
703
704         /*
705          * Decide the MSIZE and RX/TX Watermark.
706          * If current block size is same with previous size,
707          * no need to update fifoth.
708          */
709         if (host->prev_blksz != data->blksz)
710                 dw_mci_adjust_fifoth(host, data);
711
712         /* Enable the DMA interface */
713         temp = mci_readl(host, CTRL);
714         temp |= SDMMC_CTRL_DMA_ENABLE;
715         mci_writel(host, CTRL, temp);
716
717         /* Disable RX/TX IRQs, let DMA handle it */
718         temp = mci_readl(host, INTMASK);
719         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
720         mci_writel(host, INTMASK, temp);
721
722         host->dma_ops->start(host, sg_len);
723
724         return 0;
725 }
726
727 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
728 {
729         u32 temp;
730
731         data->error = -EINPROGRESS;
732
733         WARN_ON(host->data);
734         host->sg = NULL;
735         host->data = data;
736
737         if (data->flags & MMC_DATA_READ) {
738                 host->dir_status = DW_MCI_RECV_STATUS;
739                 dw_mci_ctrl_rd_thld(host, data);
740         } else {
741                 host->dir_status = DW_MCI_SEND_STATUS;
742         }
743         
744     MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
745          data->blocks, data->blksz, mmc_hostname(host->mmc));
746
747         if (dw_mci_submit_data_dma(host, data)) {
748                 int flags = SG_MITER_ATOMIC;
749                 if (host->data->flags & MMC_DATA_READ)
750                         flags |= SG_MITER_TO_SG;
751                 else
752                         flags |= SG_MITER_FROM_SG;
753
754                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
755                 host->sg = data->sg;
756                 host->part_buf_start = 0;
757                 host->part_buf_count = 0;
758
759                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
760                 temp = mci_readl(host, INTMASK);
761                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
762                 mci_writel(host, INTMASK, temp);
763
764                 temp = mci_readl(host, CTRL);
765                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
766                 mci_writel(host, CTRL, temp);
767
768                 /*
769                  * Use the initial fifoth_val for PIO mode.
770                  * If next issued data may be transfered by DMA mode,
771                  * prev_blksz should be invalidated.
772                  */
773                 mci_writel(host, FIFOTH, host->fifoth_val);
774                 host->prev_blksz = 0;
775         } else {
776                 /*
777                  * Keep the current block size.
778                  * It will be used to decide whether to update
779                  * fifoth register next time.
780                  */
781                 host->prev_blksz = data->blksz;
782         }
783 }
784
785 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
786 {
787         struct dw_mci *host = slot->host;
788         unsigned long timeout = jiffies + msecs_to_jiffies(500);
789         unsigned int cmd_status = 0;
790
791         mci_writel(host, CMDARG, arg);
792         wmb();
793         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
794
795         while (time_before(jiffies, timeout)) {
796                 cmd_status = mci_readl(host, CMD);
797                 if (!(cmd_status & SDMMC_CMD_START))
798                         return;
799         }
800         dev_err(&slot->mmc->class_dev,
801                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
802                 cmd, arg, cmd_status);
803 }
804
805 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
806 {
807         struct dw_mci *host = slot->host;
808         unsigned int clock = slot->clock;
809         u32 div;
810         u32 clk_en_a;
811         u32 sdio_int;
812
813         if (!clock) {
814                 mci_writel(host, CLKENA, 0);
815                 mci_send_cmd(slot,
816                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
817         } else if (clock != host->current_speed || force_clkinit) {
818                 div = host->bus_hz / clock;
819                 if (host->bus_hz % clock && host->bus_hz > clock)
820                         /*
821                          * move the + 1 after the divide to prevent
822                          * over-clocking the card.
823                          */
824                         div += 1;
825
826                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
827
828                 if ((clock << div) != slot->__clk_old || force_clkinit)
829                         dev_info(&slot->mmc->class_dev,
830                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
831                                  slot->id, host->bus_hz, clock,
832                                  div ? ((host->bus_hz / div) >> 1) :
833                                  host->bus_hz, div);
834
835                 /* disable clock */
836                 mci_writel(host, CLKENA, 0);
837                 mci_writel(host, CLKSRC, 0);
838
839                 /* inform CIU */
840                 mci_send_cmd(slot,
841                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
842
843                 /* set clock to desired speed */
844                 mci_writel(host, CLKDIV, div);
845
846                 /* inform CIU */
847                 mci_send_cmd(slot,
848                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
849
850                 /* enable clock; only low power if no SDIO */
851                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
852
853                 if (host->verid < DW_MMC_240A)
854                     sdio_int = SDMMC_INT_SDIO(slot->id);
855             else
856                     sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
857
858                 if (!(mci_readl(host, INTMASK) & sdio_int))
859                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
860                 mci_writel(host, CLKENA, clk_en_a);
861
862                 /* inform CIU */
863                 mci_send_cmd(slot,
864                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
865
866                 /* keep the clock with reflecting clock dividor */
867                 slot->__clk_old = clock << div;
868         }
869
870         host->current_speed = clock;
871
872     if(slot->ctype != slot->pre_ctype)
873             MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]", \
874                 div ? ((host->bus_hz / div) >> 1):host->bus_hz, \
875                 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits", mmc_hostname(host->mmc));
876     slot->pre_ctype = slot->ctype;
877
878         /* Set the current slot bus width */
879         mci_writel(host, CTYPE, (slot->ctype << slot->id));
880
881 }
882
883 static void dw_mci_wait_unbusy(struct dw_mci *host)
884 {\r   
885     unsigned int    timeout= SDMMC_DATA_TIMEOUT_SDIO;
886     unsigned long   time_loop;
887     unsigned int    status;
888
889     MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
890     
891     if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
892         timeout = SDMMC_DATA_TIMEOUT_EMMC;
893     else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
894         timeout = SDMMC_DATA_TIMEOUT_SD;
895         
896     time_loop = jiffies + msecs_to_jiffies(timeout);
897     do {
898         status = mci_readl(host, STATUS);
899         if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
900                 break;
901         //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");   
902     } while (time_before(jiffies, time_loop));
903 }
904
905 static void __dw_mci_start_request(struct dw_mci *host,
906                                    struct dw_mci_slot *slot,
907                                    struct mmc_command *cmd)
908 {
909         struct mmc_request *mrq;
910         struct mmc_data *data;
911         u32 cmdflags;
912
913         mrq = slot->mrq;
914         if (host->pdata->select_slot)
915                 host->pdata->select_slot(slot->id);
916
917         host->cur_slot = slot;
918         host->mrq = mrq;
919 #if 0 //add by xbw,at 2014-03-12
920         /*clean FIFO if it is a new request*/
921     if(!(mrq->cmd->opcode & SDMMC_CMD_STOP)) {
922         MMC_DBG_INFO_FUNC("%d..%s: reset the ctrl.", __LINE__, __FUNCTION__);   
923         mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
924                                 SDMMC_CTRL_DMA_RESET));
925     }
926  #endif   
927     dw_mci_wait_unbusy(host);
928     
929         host->pending_events = 0;
930         host->completed_events = 0;
931         host->data_status = 0;
932
933         data = cmd->data;
934         if (data) {
935                 dw_mci_set_timeout(host);
936                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
937                 mci_writel(host, BLKSIZ, data->blksz);
938         }
939
940         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
941
942         /* this is the first command, send the initialization clock */
943         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
944                 cmdflags |= SDMMC_CMD_INIT;
945
946         if (data) {
947                 dw_mci_submit_data(host, data);
948                 wmb();
949         }
950
951         dw_mci_start_command(host, cmd, cmdflags);
952
953         if (mrq->stop)
954                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
955 }
956
957 static void dw_mci_start_request(struct dw_mci *host,
958                                  struct dw_mci_slot *slot)
959 {
960         struct mmc_request *mrq = slot->mrq;
961         struct mmc_command *cmd;
962         
963     MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
964         mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
965         
966         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
967         __dw_mci_start_request(host, slot, cmd);
968 }
969
970 /* must be called with host->lock held */
971 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
972                                  struct mmc_request *mrq)
973 {
974         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
975                  host->state);
976
977         slot->mrq = mrq;
978
979         if (host->state == STATE_IDLE) {
980                 host->state = STATE_SENDING_CMD;
981                 dw_mci_start_request(host, slot);
982         } else {
983                 list_add_tail(&slot->queue_node, &host->queue);
984         }
985 }
986
987 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
988 {
989         struct dw_mci_slot *slot = mmc_priv(mmc);
990         struct dw_mci *host = slot->host;
991
992         WARN_ON(slot->mrq);
993
994         /*
995          * The check for card presence and queueing of the request must be
996          * atomic, otherwise the card could be removed in between and the
997          * request wouldn't fail until another card was inserted.
998          */
999         spin_lock_bh(&host->lock);
1000
1001         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1002                 spin_unlock_bh(&host->lock);
1003                 mrq->cmd->error = -ENOMEDIUM;
1004                 
1005                 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_reqeust--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(host->mmc));
1006             
1007                 mmc_request_done(mmc, mrq);
1008                 return;
1009         }
1010     MMC_DBG_CMD_FUNC(host->mmc, "======>\n    pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1011         mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1012
1013         dw_mci_queue_request(host, slot, mrq);
1014
1015         spin_unlock_bh(&host->lock);
1016 }
1017
1018 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1019 {
1020         struct dw_mci_slot *slot = mmc_priv(mmc);
1021         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1022         u32 regs;
1023
1024         switch (ios->bus_width) {
1025         case MMC_BUS_WIDTH_4:
1026                 slot->ctype = SDMMC_CTYPE_4BIT;
1027                 break;  
1028         case MMC_BUS_WIDTH_8: 
1029                 slot->ctype = SDMMC_CTYPE_8BIT;
1030                 break;  
1031         default:
1032                 /* set default 1 bit mode */
1033                 slot->ctype = SDMMC_CTYPE_1BIT;
1034                 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1035         }
1036
1037         regs = mci_readl(slot->host, UHS_REG);
1038
1039         /* DDR mode set */
1040         if (ios->timing == MMC_TIMING_UHS_DDR50)
1041                 regs |= ((0x1 << slot->id) << 16);
1042         else
1043                 regs &= ~((0x1 << slot->id) << 16);
1044
1045         mci_writel(slot->host, UHS_REG, regs);
1046         slot->host->timing = ios->timing;
1047
1048         /*
1049          * Use mirror of ios->clock to prevent race with mmc
1050          * core ios update when finding the minimum.
1051          */
1052         slot->clock = ios->clock;
1053
1054         if (drv_data && drv_data->set_ios)
1055                 drv_data->set_ios(slot->host, ios);
1056
1057         /* Slot specific timing and width adjustment */
1058         dw_mci_setup_bus(slot, false);
1059
1060         switch (ios->power_mode) {
1061         case MMC_POWER_UP:
1062                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1063                 /* Power up slot */
1064                 if (slot->host->pdata->setpower)
1065                         slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1066                 regs = mci_readl(slot->host, PWREN);
1067                 regs |= (1 << slot->id);
1068                 mci_writel(slot->host, PWREN, regs);
1069                 break;
1070         case MMC_POWER_OFF:
1071                 /* Power down slot */
1072                 if (slot->host->pdata->setpower)
1073                         slot->host->pdata->setpower(slot->id, 0);
1074                 regs = mci_readl(slot->host, PWREN);
1075                 regs &= ~(1 << slot->id);
1076                 mci_writel(slot->host, PWREN, regs);
1077                 break;
1078         default:
1079                 break;
1080         }
1081 }
1082
1083 static int dw_mci_get_ro(struct mmc_host *mmc)
1084 {
1085         int read_only;
1086         struct dw_mci_slot *slot = mmc_priv(mmc);
1087         struct dw_mci_board *brd = slot->host->pdata;
1088
1089         /* Use platform get_ro function, else try on board write protect */
1090         if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1091                 read_only = 0;
1092         else if (brd->get_ro)
1093                 read_only = brd->get_ro(slot->id);
1094         else if (gpio_is_valid(slot->wp_gpio))
1095                 read_only = gpio_get_value(slot->wp_gpio);
1096         else
1097                 read_only =
1098                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1099
1100         dev_dbg(&mmc->class_dev, "card is %s\n",
1101                 read_only ? "read-only" : "read-write");
1102
1103         return read_only;
1104 }
1105
1106 static int dw_mci_get_cd(struct mmc_host *mmc)
1107 {
1108         int present;
1109         struct dw_mci_slot *slot = mmc_priv(mmc);
1110         struct dw_mci_board *brd = slot->host->pdata;
1111         struct dw_mci *host = slot->host;
1112         int gpio_cd = mmc_gpio_get_cd(mmc);
1113         
1114     if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
1115         spin_lock_bh(&host->lock);
1116         set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1117         spin_unlock_bh(&host->lock);
1118         
1119         return 1;
1120     }
1121
1122         /* Use platform get_cd function, else try onboard card detect */
1123         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1124                 present = 1;
1125         else if (brd->get_cd)
1126                 present = !brd->get_cd(slot->id);
1127         else if (!IS_ERR_VALUE(gpio_cd))
1128                 present = gpio_cd;
1129         else
1130                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1131                         == 0 ? 1 : 0;
1132
1133         spin_lock_bh(&host->lock);
1134         if (present) {
1135                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1136                 dev_dbg(&mmc->class_dev, "card is present\n");
1137         } else {
1138                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1139                 dev_dbg(&mmc->class_dev, "card is not present\n");
1140         }
1141         spin_unlock_bh(&host->lock);
1142
1143         return present;
1144 }
1145
1146 static void dw_mci_hw_reset(struct mmc_host *mmc)
1147 {
1148     struct dw_mci_slot *slot = mmc_priv(mmc);
1149
1150     /* 
1151      * According to eMMC spec 
1152      * tRstW >= 1us ;   RST_n pulse width
1153      * tRSCA >= 200us ; RST_n to Command time
1154      * tRSTH >= 1us ;   RST_n high period
1155      */
1156
1157     mci_writel(slot->host, RST_n, 0x1);
1158     dsb();
1159     udelay(10); //10us for bad quality eMMc.
1160
1161     mci_writel(slot->host, RST_n, 0x0);
1162     dsb();
1163     usleep_range(300, 1000); //ay least 300(> 200us)
1164     
1165 }
1166
1167 /*
1168  * Disable lower power mode.
1169  *
1170  * Low power mode will stop the card clock when idle.  According to the
1171  * description of the CLKENA register we should disable low power mode
1172  * for SDIO cards if we need SDIO interrupts to work.
1173  *
1174  * This function is fast if low power mode is already disabled.
1175  */
1176 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1177 {
1178         struct dw_mci *host = slot->host;
1179         u32 clk_en_a;
1180         const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1181
1182         clk_en_a = mci_readl(host, CLKENA);
1183
1184         if (clk_en_a & clken_low_pwr) {
1185                 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1186                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1187                              SDMMC_CMD_PRV_DAT_WAIT, 0);
1188         }
1189 }
1190
1191 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1192 {
1193         struct dw_mci_slot *slot = mmc_priv(mmc);
1194         struct dw_mci *host = slot->host;
1195         u32 int_mask;
1196         u32 sdio_int;
1197
1198         /* Enable/disable Slot Specific SDIO interrupt */
1199         int_mask = mci_readl(host, INTMASK);
1200
1201     if (host->verid < DW_MMC_240A)
1202                 sdio_int = SDMMC_INT_SDIO(slot->id);
1203         else
1204                 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1205         
1206         if (enb) {
1207                 /*
1208                  * Turn off low power mode if it was enabled.  This is a bit of
1209                  * a heavy operation and we disable / enable IRQs a lot, so
1210                  * we'll leave low power mode disabled and it will get
1211                  * re-enabled again in dw_mci_setup_bus().
1212                  */
1213                 dw_mci_disable_low_power(slot);
1214
1215                 mci_writel(host, INTMASK,
1216                            (int_mask | sdio_int));
1217         } else {
1218                 mci_writel(host, INTMASK,
1219                            (int_mask & ~sdio_int));
1220         }
1221 }
1222
1223 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1224 {
1225         struct dw_mci_slot *slot = mmc_priv(mmc);
1226         struct dw_mci *host = slot->host;
1227         const struct dw_mci_drv_data *drv_data = host->drv_data;
1228         struct dw_mci_tuning_data tuning_data;
1229         int err = -ENOSYS;
1230
1231         if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1232                 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1233                         tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1234                         tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1235                 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1236                         tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1237                         tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1238                 } else {
1239                         return -EINVAL;
1240                 }
1241         } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1242                 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1243                 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1244         } else {
1245                 dev_err(host->dev,
1246                         "Undefined command(%d) for tuning\n", opcode);
1247                 return -EINVAL;
1248         }
1249
1250         if (drv_data && drv_data->execute_tuning)
1251                 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1252         return err;
1253 }
1254
1255 static const struct mmc_host_ops dw_mci_ops = {
1256         .request                = dw_mci_request,
1257         .pre_req                = dw_mci_pre_req,
1258         .post_req               = dw_mci_post_req,
1259         .set_ios                = dw_mci_set_ios,
1260         .get_ro                 = dw_mci_get_ro,
1261         .get_cd                 = dw_mci_get_cd,
1262         .hw_reset       = dw_mci_hw_reset,
1263         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1264         .execute_tuning         = dw_mci_execute_tuning,
1265 };
1266
1267 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1268         __releases(&host->lock)
1269         __acquires(&host->lock)
1270 {
1271         if(DW_MCI_SEND_STATUS == host->dir_status){
1272             #if 0
1273             if( MMC_BUS_TEST_W != host->cmd->opcode){
1274                 if(host->data_status & SDMMC_INT_DCRC)
1275                     host->data->error = -EILSEQ;
1276                 else if(host->data_status & SDMMC_INT_EBE)
1277                     host->data->error = -ETIMEDOUT;
1278             } else {
1279                 dw_mci_wait_unbusy(host); 
1280             }
1281             #else
1282             dw_mci_wait_unbusy(host);
1283             #endif
1284             
1285         }
1286 }
1287
1288 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1289         __releases(&host->lock)
1290         __acquires(&host->lock)
1291 {
1292         struct dw_mci_slot *slot;
1293         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1294
1295         WARN_ON(host->cmd || host->data);
1296         
1297     dw_mci_deal_data_end(host, mrq);
1298
1299         if(mrq->cmd)
1300        MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1301             mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1302         if(mrq->data)
1303        MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1304             mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1305
1306         host->cur_slot->mrq = NULL;
1307         host->mrq = NULL;
1308         if (!list_empty(&host->queue)) {
1309                 slot = list_entry(host->queue.next,
1310                                   struct dw_mci_slot, queue_node);
1311                 list_del(&slot->queue_node);
1312                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1313                          mmc_hostname(slot->mmc));
1314                 host->state = STATE_SENDING_CMD;
1315                 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1316                 dw_mci_start_request(host, slot);
1317         } else {
1318                 dev_vdbg(host->dev, "list empty\n");
1319                 host->state = STATE_IDLE;
1320         }
1321
1322         spin_unlock(&host->lock);
1323         mmc_request_done(prev_mmc, mrq);
1324         spin_lock(&host->lock);
1325 }
1326
1327 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1328 {
1329         u32 status = host->cmd_status;
1330
1331         host->cmd_status = 0;
1332
1333         /* Read the response from the card (up to 16 bytes) */
1334         if (cmd->flags & MMC_RSP_PRESENT) {
1335                 if (cmd->flags & MMC_RSP_136) {
1336                         cmd->resp[3] = mci_readl(host, RESP0);
1337                         cmd->resp[2] = mci_readl(host, RESP1);
1338                         cmd->resp[1] = mci_readl(host, RESP2);
1339                         cmd->resp[0] = mci_readl(host, RESP3);
1340                         
1341             MMC_DBG_INFO_FUNC(host->mmc," command complete [%s], \ncmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x", \
1342                     mmc_hostname(host->mmc), cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0]);
1343                 } else {
1344                         cmd->resp[0] = mci_readl(host, RESP0);
1345                         cmd->resp[1] = 0;
1346                         cmd->resp[2] = 0;
1347                         cmd->resp[3] = 0;                       
1348             MMC_DBG_INFO_FUNC(host->mmc, " command complete [%s], cmd=%d,resp[0]=0x%x",\
1349                     mmc_hostname(host->mmc),cmd->opcode, cmd->resp[0]);
1350                 }
1351         }
1352
1353         if (status & SDMMC_INT_RTO)
1354                 cmd->error = -ETIMEDOUT;
1355         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1356                 cmd->error = -EILSEQ;
1357         else if (status & SDMMC_INT_RESP_ERR)
1358                 cmd->error = -EIO;
1359         else
1360                 cmd->error = 0;
1361     MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=0x%x [%s]",cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1362
1363         if (cmd->error) {
1364             if(MMC_SEND_STATUS != cmd->opcode)
1365             MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=0x%x [%s]",\
1366                 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1367                 
1368                 /* newer ip versions need a delay between retries */
1369                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1370                         mdelay(20);
1371         }
1372
1373 }
1374
1375 static void dw_mci_tasklet_func(unsigned long priv)
1376 {
1377         struct dw_mci *host = (struct dw_mci *)priv;
1378     struct dw_mci_slot *slot = mmc_priv(host->mmc);
1379         struct mmc_data *data;
1380         struct mmc_command *cmd;
1381         enum dw_mci_state state;
1382         enum dw_mci_state prev_state;
1383         u32 status, ctrl;
1384
1385         spin_lock(&host->lock);
1386
1387         state = host->state;
1388         data = host->data;
1389
1390         do {
1391                 prev_state = state;
1392
1393                 switch (state) {
1394                 case STATE_IDLE:
1395                         break;
1396
1397                 case STATE_SENDING_CMD:
1398                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1399                                                 &host->pending_events))
1400                                 break;
1401
1402                         cmd = host->cmd;
1403                         host->cmd = NULL;
1404                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1405                         dw_mci_command_complete(host, cmd);
1406                         if (cmd == host->mrq->sbc && !cmd->error) {
1407                                 prev_state = state = STATE_SENDING_CMD;
1408                                 __dw_mci_start_request(host, host->cur_slot,
1409                                                        host->mrq->cmd);
1410                                 goto unlock;
1411                         }
1412                         
1413             if (cmd->data && cmd->error) {
1414                                 dw_mci_stop_dma(host);
1415                                 #if 1
1416                 if (data->stop) {
1417                     send_stop_cmd(host, data);
1418                     state = STATE_SENDING_STOP;
1419                     break;
1420                 } else {
1421                     host->data = NULL;
1422                 }
1423                                 #else
1424                                 send_stop_abort(host, data);
1425                                 state = STATE_SENDING_STOP;
1426                                 break;
1427                                 #endif
1428                         }
1429
1430
1431                         if (!host->mrq->data || cmd->error) {
1432                                 dw_mci_request_end(host, host->mrq);
1433                                 goto unlock;
1434                         }
1435
1436                         prev_state = state = STATE_SENDING_DATA;
1437                         /* fall through */
1438
1439                 case STATE_SENDING_DATA:
1440                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1441                                                &host->pending_events)) {
1442                                 dw_mci_stop_dma(host);
1443                                 #if 1
1444                                 if (data->stop)
1445                                         send_stop_cmd(host, data);
1446                                 #else
1447                                 send_stop_abort(host, data);
1448                                 #endif
1449                                 state = STATE_DATA_ERROR;
1450                                 break;
1451                         }
1452             MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
1453                         prev_state,state, mmc_hostname(host->mmc));
1454
1455                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1456                                                 &host->pending_events))
1457                                 break;
1458             MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]:  STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
1459                         prev_state,state,mmc_hostname(host->mmc));
1460             
1461                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1462                         prev_state = state = STATE_DATA_BUSY;
1463                         /* fall through */
1464
1465                 case STATE_DATA_BUSY:
1466                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1467                                                 &host->pending_events))
1468                                 break;
1469                                 
1470                         dw_mci_deal_data_end(host, host->mrq);                  
1471             MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
1472                     prev_state,state,mmc_hostname(host->mmc));
1473
1474                         host->data = NULL;
1475                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1476                         status = host->data_status;
1477
1478                         if (status & DW_MCI_DATA_ERROR_FLAGS) { 
1479                             if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
1480                     MMC_DBG_ERR_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
1481                             prev_state,state, status, mmc_hostname(host->mmc));
1482                             
1483                         if (status & SDMMC_INT_DRTO) {
1484                                         data->error = -ETIMEDOUT;
1485                                 } else if (status & SDMMC_INT_DCRC) {
1486                                         data->error = -EILSEQ;
1487                                 } else if (status & SDMMC_INT_EBE &&
1488                                            host->dir_status ==
1489                                                         DW_MCI_SEND_STATUS) {
1490                                         /*
1491                                          * No data CRC status was returned.
1492                                          * The number of bytes transferred will
1493                                          * be exaggerated in PIO mode.
1494                                          */
1495                                         data->bytes_xfered = 0;
1496                                         data->error = -ETIMEDOUT;
1497                                 } else {
1498                                         dev_err(host->dev,
1499                                                 "data FIFO error "
1500                                                 "(status=%08x)\n",
1501                                                 status);
1502                                         data->error = -EIO;
1503                                 }
1504                                 /*
1505                                  * After an error, there may be data lingering
1506                                  * in the FIFO, so reset it - doing so
1507                                  * generates a block interrupt, hence setting
1508                                  * the scatter-gather pointer to NULL.
1509                                  */
1510                                 sg_miter_stop(&host->sg_miter);
1511                                 host->sg = NULL;
1512                                 ctrl = mci_readl(host, CTRL);
1513                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1514                                 mci_writel(host, CTRL, ctrl);
1515                         } else {
1516                                 data->bytes_xfered = data->blocks * data->blksz;
1517                                 data->error = 0;
1518                         }
1519
1520                         if (!data->stop) {
1521                         MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
1522                     prev_state,state,mmc_hostname(host->mmc));
1523                                 dw_mci_request_end(host, host->mrq);
1524                                 goto unlock;
1525                         }
1526                     MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
1527                 prev_state,state,mmc_hostname(host->mmc));
1528
1529                         if (host->mrq->sbc && !data->error) {
1530                                 data->stop->error = 0;
1531                                 
1532                 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
1533                     prev_state,state,mmc_hostname(host->mmc));
1534
1535                                 dw_mci_request_end(host, host->mrq);
1536                                 goto unlock;
1537                         }
1538
1539                         prev_state = state = STATE_SENDING_STOP;
1540                         if (!data->error)
1541                             send_stop_cmd(host, data);
1542                         #if 0
1543                         if (data->stop && !data->error) {
1544                                 /* stop command for open-ended transfer*/
1545                                 
1546                                 send_stop_abort(host, data);
1547                         }
1548                         #endif
1549                         /* fall through */
1550             MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
1551                 prev_state,state,mmc_hostname(host->mmc));
1552
1553                 case STATE_SENDING_STOP:
1554                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1555                                                 &host->pending_events))
1556                                 break;
1557             MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
1558                 prev_state,state,mmc_hostname(host->mmc));
1559                         
1560              /* CMD error in data command */
1561                         if (host->mrq->cmd->error && host->mrq->data) {
1562                                 sg_miter_stop(&host->sg_miter);
1563                                 host->sg = NULL;
1564                                 ctrl = mci_readl(host, CTRL);
1565                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1566                                 mci_writel(host, CTRL, ctrl);
1567                         }
1568
1569                         host->cmd = NULL;
1570                         host->data = NULL;
1571             #if 1
1572             dw_mci_command_complete(host, host->mrq->stop);
1573             #else
1574                         if (host->mrq->stop)
1575                                 dw_mci_command_complete(host, host->mrq->stop);
1576                         else
1577                                 host->cmd_status = 0;
1578             #endif
1579             
1580                         dw_mci_request_end(host, host->mrq);
1581                         goto unlock;
1582
1583                 case STATE_DATA_ERROR:
1584                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1585                                                 &host->pending_events))
1586                                 break;
1587
1588                         state = STATE_DATA_BUSY;
1589                         break;
1590                 }
1591         } while (state != prev_state);
1592
1593         host->state = state;
1594 unlock:
1595         spin_unlock(&host->lock);
1596
1597 }
1598
1599 /* push final bytes to part_buf, only use during push */
1600 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1601 {
1602         memcpy((void *)&host->part_buf, buf, cnt);
1603         host->part_buf_count = cnt;
1604 }
1605
1606 /* append bytes to part_buf, only use during push */
1607 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1608 {
1609         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1610         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1611         host->part_buf_count += cnt;
1612         return cnt;
1613 }
1614
1615 /* pull first bytes from part_buf, only use during pull */
1616 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1617 {
1618         cnt = min(cnt, (int)host->part_buf_count);
1619         if (cnt) {
1620                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1621                        cnt);
1622                 host->part_buf_count -= cnt;
1623                 host->part_buf_start += cnt;
1624         }
1625         return cnt;
1626 }
1627
1628 /* pull final bytes from the part_buf, assuming it's just been filled */
1629 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1630 {
1631         memcpy(buf, &host->part_buf, cnt);
1632         host->part_buf_start = cnt;
1633         host->part_buf_count = (1 << host->data_shift) - cnt;
1634 }
1635
1636 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1637 {
1638         struct mmc_data *data = host->data;
1639         int init_cnt = cnt;
1640
1641         /* try and push anything in the part_buf */
1642         if (unlikely(host->part_buf_count)) {
1643                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1644                 buf += len;
1645                 cnt -= len;
1646                 if (host->part_buf_count == 2) {
1647                         mci_writew(host, DATA(host->data_offset),
1648                                         host->part_buf16);
1649                         host->part_buf_count = 0;
1650                 }
1651         }
1652 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1653         if (unlikely((unsigned long)buf & 0x1)) {
1654                 while (cnt >= 2) {
1655                         u16 aligned_buf[64];
1656                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1657                         int items = len >> 1;
1658                         int i;
1659                         /* memcpy from input buffer into aligned buffer */
1660                         memcpy(aligned_buf, buf, len);
1661                         buf += len;
1662                         cnt -= len;
1663                         /* push data from aligned buffer into fifo */
1664                         for (i = 0; i < items; ++i)
1665                                 mci_writew(host, DATA(host->data_offset),
1666                                                 aligned_buf[i]);
1667                 }
1668         } else
1669 #endif
1670         {
1671                 u16 *pdata = buf;
1672                 for (; cnt >= 2; cnt -= 2)
1673                         mci_writew(host, DATA(host->data_offset), *pdata++);
1674                 buf = pdata;
1675         }
1676         /* put anything remaining in the part_buf */
1677         if (cnt) {
1678                 dw_mci_set_part_bytes(host, buf, cnt);
1679                  /* Push data if we have reached the expected data length */
1680                 if ((data->bytes_xfered + init_cnt) ==
1681                     (data->blksz * data->blocks))
1682                         mci_writew(host, DATA(host->data_offset),
1683                                    host->part_buf16);
1684         }
1685 }
1686
1687 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1688 {
1689 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1690         if (unlikely((unsigned long)buf & 0x1)) {
1691                 while (cnt >= 2) {
1692                         /* pull data from fifo into aligned buffer */
1693                         u16 aligned_buf[64];
1694                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1695                         int items = len >> 1;
1696                         int i;
1697                         for (i = 0; i < items; ++i)
1698                                 aligned_buf[i] = mci_readw(host,
1699                                                 DATA(host->data_offset));
1700                         /* memcpy from aligned buffer into output buffer */
1701                         memcpy(buf, aligned_buf, len);
1702                         buf += len;
1703                         cnt -= len;
1704                 }
1705         } else
1706 #endif
1707         {
1708                 u16 *pdata = buf;
1709                 for (; cnt >= 2; cnt -= 2)
1710                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1711                 buf = pdata;
1712         }
1713         if (cnt) {
1714                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1715                 dw_mci_pull_final_bytes(host, buf, cnt);
1716         }
1717 }
1718
1719 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1720 {
1721         struct mmc_data *data = host->data;
1722         int init_cnt = cnt;
1723
1724         /* try and push anything in the part_buf */
1725         if (unlikely(host->part_buf_count)) {
1726                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1727                 buf += len;
1728                 cnt -= len;
1729                 if (host->part_buf_count == 4) {
1730                         mci_writel(host, DATA(host->data_offset),
1731                                         host->part_buf32);
1732                         host->part_buf_count = 0;
1733                 }
1734         }
1735 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1736         if (unlikely((unsigned long)buf & 0x3)) {
1737                 while (cnt >= 4) {
1738                         u32 aligned_buf[32];
1739                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1740                         int items = len >> 2;
1741                         int i;
1742                         /* memcpy from input buffer into aligned buffer */
1743                         memcpy(aligned_buf, buf, len);
1744                         buf += len;
1745                         cnt -= len;
1746                         /* push data from aligned buffer into fifo */
1747                         for (i = 0; i < items; ++i)
1748                                 mci_writel(host, DATA(host->data_offset),
1749                                                 aligned_buf[i]);
1750                 }
1751         } else
1752 #endif
1753         {
1754                 u32 *pdata = buf;
1755                 for (; cnt >= 4; cnt -= 4)
1756                         mci_writel(host, DATA(host->data_offset), *pdata++);
1757                 buf = pdata;
1758         }
1759         /* put anything remaining in the part_buf */
1760         if (cnt) {
1761                 dw_mci_set_part_bytes(host, buf, cnt);
1762                  /* Push data if we have reached the expected data length */
1763                 if ((data->bytes_xfered + init_cnt) ==
1764                     (data->blksz * data->blocks))
1765                         mci_writel(host, DATA(host->data_offset),
1766                                    host->part_buf32);
1767         }
1768 }
1769
1770 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1771 {
1772 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1773         if (unlikely((unsigned long)buf & 0x3)) {
1774                 while (cnt >= 4) {
1775                         /* pull data from fifo into aligned buffer */
1776                         u32 aligned_buf[32];
1777                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1778                         int items = len >> 2;
1779                         int i;
1780                         for (i = 0; i < items; ++i)
1781                                 aligned_buf[i] = mci_readl(host,
1782                                                 DATA(host->data_offset));
1783                         /* memcpy from aligned buffer into output buffer */
1784                         memcpy(buf, aligned_buf, len);
1785                         buf += len;
1786                         cnt -= len;
1787                 }
1788         } else
1789 #endif
1790         {
1791                 u32 *pdata = buf;
1792                 for (; cnt >= 4; cnt -= 4)
1793                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1794                 buf = pdata;
1795         }
1796         if (cnt) {
1797                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1798                 dw_mci_pull_final_bytes(host, buf, cnt);
1799         }
1800 }
1801
1802 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1803 {
1804         struct mmc_data *data = host->data;
1805         int init_cnt = cnt;
1806
1807         /* try and push anything in the part_buf */
1808         if (unlikely(host->part_buf_count)) {
1809                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1810                 buf += len;
1811                 cnt -= len;
1812
1813                 if (host->part_buf_count == 8) {
1814                         mci_writeq(host, DATA(host->data_offset),
1815                                         host->part_buf);
1816                         host->part_buf_count = 0;
1817                 }
1818         }
1819 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1820         if (unlikely((unsigned long)buf & 0x7)) {
1821                 while (cnt >= 8) {
1822                         u64 aligned_buf[16];
1823                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1824                         int items = len >> 3;
1825                         int i;
1826                         /* memcpy from input buffer into aligned buffer */
1827                         memcpy(aligned_buf, buf, len);
1828                         buf += len;
1829                         cnt -= len;
1830                         /* push data from aligned buffer into fifo */
1831                         for (i = 0; i < items; ++i)
1832                                 mci_writeq(host, DATA(host->data_offset),
1833                                                 aligned_buf[i]);
1834                 }
1835         } else
1836 #endif
1837         {
1838                 u64 *pdata = buf;
1839                 for (; cnt >= 8; cnt -= 8)
1840                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1841                 buf = pdata;
1842         }
1843         /* put anything remaining in the part_buf */
1844         if (cnt) {
1845                 dw_mci_set_part_bytes(host, buf, cnt);
1846                 /* Push data if we have reached the expected data length */
1847                 if ((data->bytes_xfered + init_cnt) ==
1848                     (data->blksz * data->blocks))
1849                         mci_writeq(host, DATA(host->data_offset),
1850                                    host->part_buf);
1851         }
1852 }
1853
1854 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1855 {
1856 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1857         if (unlikely((unsigned long)buf & 0x7)) {
1858                 while (cnt >= 8) {
1859                         /* pull data from fifo into aligned buffer */
1860                         u64 aligned_buf[16];
1861                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1862                         int items = len >> 3;
1863                         int i;
1864                         for (i = 0; i < items; ++i)
1865                                 aligned_buf[i] = mci_readq(host,
1866                                                 DATA(host->data_offset));
1867                         /* memcpy from aligned buffer into output buffer */
1868                         memcpy(buf, aligned_buf, len);
1869                         buf += len;
1870                         cnt -= len;
1871                 }
1872         } else
1873 #endif
1874         {
1875                 u64 *pdata = buf;
1876                 for (; cnt >= 8; cnt -= 8)
1877                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1878                 buf = pdata;
1879         }
1880         if (cnt) {
1881                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1882                 dw_mci_pull_final_bytes(host, buf, cnt);
1883         }
1884 }
1885
1886 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1887 {
1888         int len;
1889
1890         /* get remaining partial bytes */
1891         len = dw_mci_pull_part_bytes(host, buf, cnt);
1892         if (unlikely(len == cnt))
1893                 return;
1894         buf += len;
1895         cnt -= len;
1896
1897         /* get the rest of the data */
1898         host->pull_data(host, buf, cnt);
1899 }
1900
1901 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1902 {
1903         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1904         void *buf;
1905         unsigned int offset;
1906         struct mmc_data *data = host->data;
1907         int shift = host->data_shift;
1908         u32 status;
1909         unsigned int len;
1910         unsigned int remain, fcnt;
1911
1912         do {
1913                 if (!sg_miter_next(sg_miter))
1914                         goto done;
1915
1916                 host->sg = sg_miter->piter.sg;
1917                 buf = sg_miter->addr;
1918                 remain = sg_miter->length;
1919                 offset = 0;
1920
1921                 do {
1922                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1923                                         << shift) + host->part_buf_count;
1924                         len = min(remain, fcnt);
1925                         if (!len)
1926                                 break;
1927                         dw_mci_pull_data(host, (void *)(buf + offset), len);
1928                         data->bytes_xfered += len;
1929                         offset += len;
1930                         remain -= len;
1931                 } while (remain);
1932
1933                 sg_miter->consumed = offset;
1934                 status = mci_readl(host, MINTSTS);
1935                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1936         /* if the RXDR is ready read again */
1937         } while ((status & SDMMC_INT_RXDR) ||
1938                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1939
1940         if (!remain) {
1941                 if (!sg_miter_next(sg_miter))
1942                         goto done;
1943                 sg_miter->consumed = 0;
1944         }
1945         sg_miter_stop(sg_miter);
1946         return;
1947
1948 done:
1949         sg_miter_stop(sg_miter);
1950         host->sg = NULL;
1951         smp_wmb();
1952         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1953 }
1954
1955 static void dw_mci_write_data_pio(struct dw_mci *host)
1956 {
1957         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1958         void *buf;
1959         unsigned int offset;
1960         struct mmc_data *data = host->data;
1961         int shift = host->data_shift;
1962         u32 status;
1963         unsigned int len;
1964         unsigned int fifo_depth = host->fifo_depth;
1965         unsigned int remain, fcnt;
1966
1967         do {
1968                 if (!sg_miter_next(sg_miter))
1969                         goto done;
1970
1971                 host->sg = sg_miter->piter.sg;
1972                 buf = sg_miter->addr;
1973                 remain = sg_miter->length;
1974                 offset = 0;
1975
1976                 do {
1977                         fcnt = ((fifo_depth -
1978                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1979                                         << shift) - host->part_buf_count;
1980                         len = min(remain, fcnt);
1981                         if (!len)
1982                                 break;
1983                         host->push_data(host, (void *)(buf + offset), len);
1984                         data->bytes_xfered += len;
1985                         offset += len;
1986                         remain -= len;
1987                 } while (remain);
1988
1989                 sg_miter->consumed = offset;
1990                 status = mci_readl(host, MINTSTS);
1991                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1992         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1993
1994         if (!remain) {
1995                 if (!sg_miter_next(sg_miter))
1996                         goto done;
1997                 sg_miter->consumed = 0;
1998         }
1999         sg_miter_stop(sg_miter);
2000         return;
2001
2002 done:
2003         sg_miter_stop(sg_miter);
2004         host->sg = NULL;
2005         smp_wmb();
2006         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2007 }
2008
2009 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2010 {
2011         if (!host->cmd_status)
2012                 host->cmd_status = status;
2013
2014         smp_wmb();
2015
2016         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2017         tasklet_schedule(&host->tasklet);
2018 }
2019
2020 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2021 {
2022         struct dw_mci *host = dev_id;
2023         u32 pending, sdio_int;
2024         int i;
2025
2026         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2027
2028         /*
2029                  * DTO fix - version 2.10a and below, and only if internal DMA
2030                  * is configured.
2031                  */
2032                 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2033                         if (!pending &&
2034                             ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2035                                 pending |= SDMMC_INT_DATA_OVER;
2036         }
2037
2038         if (pending) {
2039                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2040                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2041                         host->cmd_status = pending;
2042                         smp_wmb();
2043                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2044                 }
2045
2046                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2047                         /* if there is an error report DATA_ERROR */
2048                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2049                         host->data_status = pending;
2050                         smp_wmb();
2051                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
2052                         tasklet_schedule(&host->tasklet);
2053                 }
2054
2055                 if (pending & SDMMC_INT_DATA_OVER) {
2056                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2057                         MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2058                         if (!host->data_status)
2059                                 host->data_status = pending;
2060                         smp_wmb();
2061                         if (host->dir_status == DW_MCI_RECV_STATUS) {
2062                                 if (host->sg != NULL)
2063                                         dw_mci_read_data_pio(host, true);
2064                         }
2065                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2066                         tasklet_schedule(&host->tasklet);
2067                 }
2068
2069                 if (pending & SDMMC_INT_RXDR) {
2070                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2071                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2072                                 dw_mci_read_data_pio(host, false);
2073                 }
2074
2075                 if (pending & SDMMC_INT_TXDR) {
2076                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2077                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2078                                 dw_mci_write_data_pio(host);
2079                 }
2080
2081                 if (pending & SDMMC_INT_CMD_DONE) {
2082                 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2083                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2084                         dw_mci_cmd_interrupt(host, pending);
2085                 }
2086
2087                 if (pending & SDMMC_INT_CD) {
2088                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
2089                         queue_work(host->card_workqueue, &host->card_work);
2090                 }
2091
2092                 /* Handle SDIO Interrupts */
2093                 for (i = 0; i < host->num_slots; i++) {
2094                         struct dw_mci_slot *slot = host->slot[i];
2095
2096             if (host->verid < DW_MMC_240A)
2097                     sdio_int = SDMMC_INT_SDIO(i);
2098             else
2099                     sdio_int = SDMMC_INT_SDIO(i + 8);
2100                         
2101                         if (pending & sdio_int) {
2102                                 mci_writel(host, RINTSTS, sdio_int);
2103                                 mmc_signal_sdio_irq(slot->mmc);
2104                         }
2105                 }
2106
2107         }
2108
2109 #ifdef CONFIG_MMC_DW_IDMAC
2110         /* Handle DMA interrupts */
2111         pending = mci_readl(host, IDSTS);
2112         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2113                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2114                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2115                 host->dma_ops->complete(host);
2116         }
2117 #endif
2118
2119         return IRQ_HANDLED;
2120 }
2121
2122 static void dw_mci_work_routine_card(struct work_struct *work)
2123 {
2124         struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2125         int i;
2126         
2127         for (i = 0; i < host->num_slots; i++) {
2128                 struct dw_mci_slot *slot = host->slot[i];
2129                 struct mmc_host *mmc = slot->mmc;
2130                 struct mmc_request *mrq;
2131                 int present;
2132                 u32 ctrl;
2133
2134                 present = dw_mci_get_cd(mmc);
2135                 while (present != slot->last_detect_state) {
2136                         dev_dbg(&slot->mmc->class_dev, "card %s\n",
2137                                 present ? "inserted" : "removed");
2138             MMC_DBG_BOOT_FUNC(mmc, "card %s,  devname=%s \n",
2139                                 present ? "inserted" : "removed", mmc_hostname(mmc));
2140
2141                         spin_lock_bh(&host->lock);
2142
2143                         /* Card change detected */
2144                         slot->last_detect_state = present;
2145
2146                         /* Clean up queue if present */
2147                         mrq = slot->mrq;
2148                         if (mrq) {
2149                                 if (mrq == host->mrq) {
2150                                         host->data = NULL;
2151                                         host->cmd = NULL;
2152
2153                                         switch (host->state) {
2154                                         case STATE_IDLE:
2155                                                 break;
2156                                         case STATE_SENDING_CMD:
2157                                                 mrq->cmd->error = -ENOMEDIUM;
2158                                                 if (!mrq->data)
2159                                                         break;
2160                                                 /* fall through */
2161                                         case STATE_SENDING_DATA:
2162                                                 mrq->data->error = -ENOMEDIUM;
2163                                                 dw_mci_stop_dma(host);
2164                                                 break;
2165                                         case STATE_DATA_BUSY:
2166                                         case STATE_DATA_ERROR:
2167                                                 if (mrq->data->error == -EINPROGRESS)
2168                                                         mrq->data->error = -ENOMEDIUM;
2169                                                 if (!mrq->stop)
2170                                                         break;
2171                                                 /* fall through */
2172                                         case STATE_SENDING_STOP:
2173                                                 mrq->stop->error = -ENOMEDIUM;
2174                                                 break;
2175                                         }
2176
2177                                         dw_mci_request_end(host, mrq);
2178                                 } else {
2179                                         list_del(&slot->queue_node);
2180                                         mrq->cmd->error = -ENOMEDIUM;
2181                                         if (mrq->data)
2182                                                 mrq->data->error = -ENOMEDIUM;
2183                                         if (mrq->stop)
2184                                                 mrq->stop->error = -ENOMEDIUM;
2185                                                 
2186                     MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(mmc));
2187
2188                                         spin_unlock(&host->lock);
2189                                         mmc_request_done(slot->mmc, mrq);
2190                                         spin_lock(&host->lock);
2191                                 }
2192                         }
2193
2194                         /* Power down slot */
2195                         if (present == 0) {
2196
2197                                 /*
2198                                  * Clear down the FIFO - doing so generates a
2199                                  * block interrupt, hence setting the
2200                                  * scatter-gather pointer to NULL.
2201                                  */
2202                                 sg_miter_stop(&host->sg_miter);
2203                                 host->sg = NULL;
2204
2205                                 ctrl = mci_readl(host, CTRL);
2206                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
2207                                 mci_writel(host, CTRL, ctrl);
2208
2209 #ifdef CONFIG_MMC_DW_IDMAC
2210                                 dw_mci_idmac_reset(host);
2211 #endif
2212
2213                         }
2214
2215                         spin_unlock_bh(&host->lock);
2216
2217                         present = dw_mci_get_cd(mmc);
2218                 }
2219
2220                 mmc_detect_change(slot->mmc,
2221                         msecs_to_jiffies(host->pdata->detect_delay_ms));
2222         }
2223 }
2224
2225 #ifdef CONFIG_OF
2226 /* given a slot id, find out the device node representing that slot */
2227 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2228 {
2229         struct device_node *np;
2230         const __be32 *addr;
2231         int len;
2232
2233         if (!dev || !dev->of_node)
2234                 return NULL;
2235
2236         for_each_child_of_node(dev->of_node, np) {
2237                 addr = of_get_property(np, "reg", &len);
2238                 if (!addr || (len < sizeof(int)))
2239                         continue;
2240                 if (be32_to_cpup(addr) == slot)
2241                         return np;
2242         }
2243         return NULL;
2244 }
2245
2246 static struct dw_mci_of_slot_quirks {
2247         char *quirk;
2248         int id;
2249 } of_slot_quirks[] = {
2250         {
2251                 .quirk  = "disable-wp",
2252                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2253         },
2254 };
2255
2256 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2257 {
2258         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2259         int quirks = 0;
2260         int idx;
2261
2262         /* get quirks */
2263         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2264                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2265                         quirks |= of_slot_quirks[idx].id;
2266
2267         return quirks;
2268 }
2269
2270 /* find out bus-width for a given slot */
2271 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2272 {
2273         struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2274         u32 bus_wd = 1;
2275
2276         if (!np)
2277                 return 1;
2278
2279         if (of_property_read_u32(np, "bus-width", &bus_wd))
2280                 dev_err(dev, "bus-width property not found, assuming width"
2281                                " as 1\n");
2282         return bus_wd;
2283 }
2284
2285
2286 /* find the pwr-en gpio for a given slot; or -1 if none specified */
2287 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
2288 {
2289         struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2290         int gpio;
2291
2292         if (!np)
2293                 return -EINVAL;
2294
2295         gpio = of_get_named_gpio(np, "pwr-gpios", 0);
2296
2297         /* Having a missing entry is valid; return silently */
2298         if (!gpio_is_valid(gpio))
2299                 return -EINVAL;
2300
2301         if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
2302                 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2303                 return -EINVAL;
2304         }
2305
2306     gpio_direction_output(gpio, 0);//set 0 to pwr-en
2307
2308         return gpio;
2309 }
2310
2311
2312 /* find the write protect gpio for a given slot; or -1 if none specified */
2313 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2314 {
2315         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2316         int gpio;
2317
2318         if (!np)
2319                 return -EINVAL;
2320
2321         gpio = of_get_named_gpio(np, "wp-gpios", 0);
2322
2323         /* Having a missing entry is valid; return silently */
2324         if (!gpio_is_valid(gpio))
2325                 return -EINVAL;
2326
2327         if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2328                 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2329                 return -EINVAL;
2330         }
2331
2332         return gpio;
2333 }
2334
2335 /* find the cd gpio for a given slot */
2336 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2337                                         struct mmc_host *mmc)
2338 {
2339         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2340         int gpio;
2341
2342         if (!np)
2343                 return;
2344
2345         gpio = of_get_named_gpio(np, "cd-gpios", 0);
2346
2347         /* Having a missing entry is valid; return silently */
2348         if (!gpio_is_valid(gpio))
2349                 return;
2350
2351         if (mmc_gpio_request_cd(mmc, gpio, 0))
2352                 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2353 }
2354 #else /* CONFIG_OF */
2355 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2356 {
2357         return 0;
2358 }
2359 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2360 {
2361         return 1;
2362 }
2363 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2364 {
2365         return NULL;
2366 }
2367 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2368 {
2369         return -EINVAL;
2370 }
2371 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2372                                         struct mmc_host *mmc)
2373 {
2374         return;
2375 }
2376 #endif /* CONFIG_OF */
2377
2378 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2379 {
2380         struct mmc_host *mmc;
2381         struct dw_mci_slot *slot;
2382         const struct dw_mci_drv_data *drv_data = host->drv_data;
2383         int ctrl_id, ret;
2384         u32 freq[2];
2385         u8 bus_width;
2386
2387         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2388         if (!mmc)
2389                 return -ENOMEM;
2390
2391         slot = mmc_priv(mmc);
2392         slot->id = id;
2393         slot->mmc = mmc;
2394         slot->host = host;
2395         host->slot[id] = slot;
2396         host->mmc = mmc;
2397
2398         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2399
2400         mmc->ops = &dw_mci_ops;
2401 #if 0
2402     mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
2403     mmc->f_max = host->bus_hz;
2404     printk("%d..%s: fmin=%d, fmax=%d, bus_hz=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max, host->bus_hz);    
2405 #else
2406         if (of_property_read_u32_array(host->dev->of_node,
2407                                        "clock-freq-min-max", freq, 2)) {
2408                 mmc->f_min = DW_MCI_FREQ_MIN;
2409                 mmc->f_max = DW_MCI_FREQ_MAX;
2410                 
2411         printk("%d..%s: fmin=%d, fmax=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max);    
2412         } else {
2413                 mmc->f_min = freq[0];
2414                 mmc->f_max = freq[1];
2415                 
2416         printk("%d..%s: fmin=%d, fmax=%d \n", __LINE__,__FUNCTION__,mmc->f_min, mmc->f_max);    
2417         }
2418 #endif
2419
2420         if (of_find_property(host->dev->of_node, "supports-sd", NULL))
2421                 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;    
2422         if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
2423                 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;  
2424         if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
2425                 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
2426
2427         if (host->pdata->get_ocr)
2428                 mmc->ocr_avail = host->pdata->get_ocr(id);
2429         else{
2430                 //mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2431                 mmc->ocr_avail = MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|MMC_VDD_30_31
2432                      | MMC_VDD_31_32|MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_34_35| MMC_VDD_35_36;
2433         
2434         mmc->ocr_avail |= MMC_VDD_26_27 |MMC_VDD_25_26 |MMC_VDD_24_25 |MMC_VDD_23_24
2435                          |MMC_VDD_22_23 |MMC_VDD_21_22 |MMC_VDD_20_21 |MMC_VDD_165_195;
2436         }
2437
2438         /*
2439          * Start with slot power disabled, it will be enabled when a card
2440          * is detected.
2441          */
2442         if (host->pdata->setpower)
2443                 host->pdata->setpower(id, 0);
2444
2445         if (host->pdata->caps)
2446                 mmc->caps = host->pdata->caps;
2447
2448         if (host->pdata->pm_caps)
2449                 mmc->pm_caps = host->pdata->pm_caps;
2450
2451         if (host->dev->of_node) {
2452                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2453                 if (ctrl_id < 0)
2454                         ctrl_id = 0;
2455         } else {
2456                 ctrl_id = to_platform_device(host->dev)->id;
2457         }
2458         if (drv_data && drv_data->caps)
2459                 mmc->caps |= drv_data->caps[ctrl_id];
2460         if (drv_data && drv_data->hold_reg_flag)
2461                 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];         
2462
2463         if (host->pdata->caps2)
2464                 mmc->caps2 = host->pdata->caps2;
2465
2466         if (host->pdata->get_bus_wd)
2467                 bus_width = host->pdata->get_bus_wd(slot->id);
2468         else if (host->dev->of_node)
2469                 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2470         else
2471                 bus_width = 1;
2472
2473         switch (bus_width) {
2474         case 8:
2475                 mmc->caps |= MMC_CAP_8_BIT_DATA;
2476         case 4:
2477                 mmc->caps |= MMC_CAP_4_BIT_DATA;
2478         }
2479         if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
2480                 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
2481         if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
2482                 mmc->caps |= MMC_CAP_SDIO_IRQ;
2483         if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
2484                 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
2485         if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
2486                 mmc->pm_caps |= MMC_PM_KEEP_POWER;
2487         if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
2488                 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2489
2490         if (host->pdata->blk_settings) {
2491                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2492                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2493                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2494                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2495                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2496         } else {
2497                 /* Useful defaults if platform data is unset. */
2498 #ifdef CONFIG_MMC_DW_IDMAC
2499                 mmc->max_segs = host->ring_size;
2500                 mmc->max_blk_size = 65536;
2501                 mmc->max_blk_count = host->ring_size;
2502                 mmc->max_seg_size = 0x1000;
2503                 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2504 #else
2505                 mmc->max_segs = 64;
2506                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2507                 mmc->max_blk_count = 512;
2508                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2509                 mmc->max_seg_size = mmc->max_req_size;
2510 #endif /* CONFIG_MMC_DW_IDMAC */
2511         }
2512     //pwr_en   
2513     slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
2514
2515 if (gpio_is_valid(slot->pwr_en_gpio))
2516 {
2517     host->vmmc = NULL;
2518 }else{
2519
2520         host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
2521         if (IS_ERR(host->vmmc)) {
2522                 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
2523                 host->vmmc = NULL;
2524         } else {
2525                 ret = regulator_enable(host->vmmc);
2526                 if (ret) {
2527                         dev_err(host->dev,
2528                                 "failed to enable regulator: %d\n", ret);
2529                         goto err_setup_bus;
2530                 }
2531         }
2532 }
2533         slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2534         dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2535
2536         ret = mmc_add_host(mmc);
2537         if (ret)
2538                 goto err_setup_bus;
2539
2540 #if defined(CONFIG_DEBUG_FS)
2541         dw_mci_init_debugfs(slot);
2542 #endif
2543
2544         /* Card initially undetected */
2545         slot->last_detect_state = 0;
2546
2547         return 0;
2548
2549 err_setup_bus:
2550         mmc_free_host(mmc);
2551         return -EINVAL;
2552 }
2553
2554 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2555 {
2556         /* Shutdown detect IRQ */
2557         if (slot->host->pdata->exit)
2558                 slot->host->pdata->exit(id);
2559
2560         /* Debugfs stuff is cleaned up by mmc core */
2561         mmc_remove_host(slot->mmc);
2562         slot->host->slot[id] = NULL;
2563         mmc_free_host(slot->mmc);
2564 }
2565
2566 static void dw_mci_init_dma(struct dw_mci *host)
2567 {
2568         /* Alloc memory for sg translation */
2569         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2570                                           &host->sg_dma, GFP_KERNEL);
2571         if (!host->sg_cpu) {
2572                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2573                         __func__);
2574                 goto no_dma;
2575         }
2576
2577         /* Determine which DMA interface to use */
2578 #ifdef CONFIG_MMC_DW_IDMAC
2579         host->dma_ops = &dw_mci_idmac_ops;
2580         dev_info(host->dev, "Using internal DMA controller.\n");
2581 #endif
2582
2583         if (!host->dma_ops)
2584                 goto no_dma;
2585
2586         if (host->dma_ops->init && host->dma_ops->start &&
2587             host->dma_ops->stop && host->dma_ops->cleanup) {
2588                 if (host->dma_ops->init(host)) {
2589                         dev_err(host->dev, "%s: Unable to initialize "
2590                                 "DMA Controller.\n", __func__);
2591                         goto no_dma;
2592                 }
2593         } else {
2594                 dev_err(host->dev, "DMA initialization not found.\n");
2595                 goto no_dma;
2596         }
2597
2598         host->use_dma = 1;
2599         return;
2600
2601 no_dma:
2602         dev_info(host->dev, "Using PIO mode.\n");
2603         host->use_dma = 0;
2604         return;
2605 }
2606
2607 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2608 {
2609         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2610         unsigned int ctrl;
2611
2612         mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2613                                 SDMMC_CTRL_DMA_RESET));
2614
2615         /* wait till resets clear */
2616         do {
2617                 ctrl = mci_readl(host, CTRL);
2618                 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2619                               SDMMC_CTRL_DMA_RESET)))
2620                         return true;
2621         } while (time_before(jiffies, timeout));
2622
2623         dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2624
2625         return false;
2626 }
2627
2628 #ifdef CONFIG_OF
2629 static struct dw_mci_of_quirks {
2630         char *quirk;
2631         int id;
2632 } of_quirks[] = {
2633         {
2634                 .quirk  = "broken-cd",
2635                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2636         },
2637 };
2638
2639 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2640 {
2641         struct dw_mci_board *pdata;
2642         struct device *dev = host->dev;
2643         struct device_node *np = dev->of_node;
2644         const struct dw_mci_drv_data *drv_data = host->drv_data;
2645         int idx, ret;
2646         u32 clock_frequency;
2647
2648         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2649         if (!pdata) {
2650                 dev_err(dev, "could not allocate memory for pdata\n");
2651                 return ERR_PTR(-ENOMEM);
2652         }
2653
2654         /* find out number of slots supported */
2655         if (of_property_read_u32(dev->of_node, "num-slots",
2656                                 &pdata->num_slots)) {
2657                 dev_info(dev, "num-slots property not found, "
2658                                 "assuming 1 slot is available\n");
2659                 pdata->num_slots = 1;
2660         }
2661
2662         /* get quirks */
2663         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2664                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2665                         pdata->quirks |= of_quirks[idx].id;
2666
2667         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2668                 dev_info(dev, "fifo-depth property not found, using "
2669                                 "value of FIFOTH register as default\n");
2670
2671         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2672
2673         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2674                 pdata->bus_hz = clock_frequency;
2675
2676         if (drv_data && drv_data->parse_dt) {
2677                 ret = drv_data->parse_dt(host);
2678                 if (ret)
2679                         return ERR_PTR(ret);
2680         }
2681
2682         if (of_find_property(np, "keep-power-in-suspend", NULL))
2683                 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2684                 
2685
2686
2687         if (of_find_property(np, "enable-sdio-wakeup", NULL))
2688                 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2689
2690         if (of_find_property(np, "supports-highspeed", NULL))
2691                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2692
2693         if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2694                 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2695
2696         if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2697                 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2698
2699         if (of_get_property(np, "cd-inverted", NULL))
2700                 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2701         if (of_get_property(np, "bootpart-no-access", NULL))
2702                 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;        
2703
2704         return pdata;
2705 }
2706
2707 #else /* CONFIG_OF */
2708 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2709 {
2710         return ERR_PTR(-EINVAL);
2711 }
2712 #endif /* CONFIG_OF */
2713
2714 int dw_mci_probe(struct dw_mci *host)
2715 {
2716         const struct dw_mci_drv_data *drv_data = host->drv_data;
2717         int width, i, ret = 0;
2718         u32 fifo_size;
2719         int init_slots = 0;
2720
2721         if (!host->pdata) {
2722                 host->pdata = dw_mci_parse_dt(host);
2723                 if (IS_ERR(host->pdata)) {
2724                         dev_err(host->dev, "platform data not available\n");
2725                         return -EINVAL;
2726                 }
2727         }
2728
2729         if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2730                 dev_err(host->dev,
2731                         "Platform data must supply select_slot function\n");
2732                 return -ENODEV;
2733         }
2734
2735         host->biu_clk = devm_clk_get(host->dev, "biu");
2736         if (IS_ERR(host->biu_clk)) {
2737                 dev_dbg(host->dev, "biu clock not available\n");
2738         } else {
2739                 ret = clk_prepare_enable(host->biu_clk);
2740                 if (ret) {
2741                         dev_err(host->dev, "failed to enable biu clock\n");
2742                         return ret;
2743                 }
2744         }
2745
2746         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2747         if (IS_ERR(host->ciu_clk)) {
2748                 dev_dbg(host->dev, "ciu clock not available\n");
2749                 host->bus_hz = host->pdata->bus_hz;
2750         } else {
2751                 ret = clk_prepare_enable(host->ciu_clk);
2752                 if (ret) {
2753                         dev_err(host->dev, "failed to enable ciu clock\n");
2754                         goto err_clk_biu;
2755                 }
2756         }
2757
2758 #if 1
2759     //test, modify by xbw
2760     host->bus_hz = 50000000;
2761 #else
2762         if (drv_data && drv_data->init) {
2763                 ret = drv_data->init(host);
2764                 if (ret) {
2765                         dev_err(host->dev,
2766                                 "implementation specific init failed\n");
2767                         goto err_clk_ciu;
2768                 }
2769                 host->bus_hz = clk_get_rate(host->ciu_clk);
2770 #endif
2771         if (drv_data && drv_data->setup_clock) {
2772                 ret = drv_data->setup_clock(host);
2773                 if (ret) {
2774                         dev_err(host->dev,
2775                                 "implementation specific clock setup failed\n");
2776                         goto err_clk_ciu;
2777                 }
2778         }
2779
2780         if (!host->bus_hz) {
2781                 dev_err(host->dev,
2782                         "Platform data must supply bus speed\n");
2783                 ret = -ENODEV;
2784                 goto err_clk_ciu;
2785         }
2786
2787         host->quirks = host->pdata->quirks;
2788
2789         spin_lock_init(&host->lock);
2790         INIT_LIST_HEAD(&host->queue);
2791
2792         /*
2793          * Get the host data width - this assumes that HCON has been set with
2794          * the correct values.
2795          */
2796         i = (mci_readl(host, HCON) >> 7) & 0x7;
2797         if (!i) {
2798                 host->push_data = dw_mci_push_data16;
2799                 host->pull_data = dw_mci_pull_data16;
2800                 width = 16;
2801                 host->data_shift = 1;
2802         } else if (i == 2) {
2803                 host->push_data = dw_mci_push_data64;
2804                 host->pull_data = dw_mci_pull_data64;
2805                 width = 64;
2806                 host->data_shift = 3;
2807         } else {
2808                 /* Check for a reserved value, and warn if it is */
2809                 WARN((i != 1),
2810                      "HCON reports a reserved host data width!\n"
2811                      "Defaulting to 32-bit access.\n");
2812                 host->push_data = dw_mci_push_data32;
2813                 host->pull_data = dw_mci_pull_data32;
2814                 width = 32;
2815                 host->data_shift = 2;
2816         }
2817
2818         /* Reset all blocks */
2819         if (!mci_wait_reset(host->dev, host))
2820                 return -ENODEV;
2821
2822         host->dma_ops = host->pdata->dma_ops;
2823         dw_mci_init_dma(host);
2824
2825         /* Clear the interrupts for the host controller */
2826         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2827         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2828
2829         /* Put in max timeout */
2830         mci_writel(host, TMOUT, 0xFFFFFFFF);
2831
2832         /*
2833          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2834          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2835          */
2836         if (!host->pdata->fifo_depth) {
2837                 /*
2838                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2839                  * have been overwritten by the bootloader, just like we're
2840                  * about to do, so if you know the value for your hardware, you
2841                  * should put it in the platform data.
2842                  */
2843                 fifo_size = mci_readl(host, FIFOTH);
2844                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2845         } else {
2846                 fifo_size = host->pdata->fifo_depth;
2847         }
2848         host->fifo_depth = fifo_size;
2849         host->fifoth_val =
2850                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2851         mci_writel(host, FIFOTH, host->fifoth_val);
2852
2853         /* disable clock to CIU */
2854         mci_writel(host, CLKENA, 0);
2855         mci_writel(host, CLKSRC, 0);
2856
2857         /*
2858          * In 2.40a spec, Data offset is changed.
2859          * Need to check the version-id and set data-offset for DATA register.
2860          */
2861         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2862         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2863
2864         if (host->verid < DW_MMC_240A)
2865                 host->data_offset = DATA_OFFSET;
2866         else
2867                 host->data_offset = DATA_240A_OFFSET;
2868
2869         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2870         host->card_workqueue = alloc_workqueue("dw-mci-card",
2871                         WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2872         if (!host->card_workqueue) {
2873                 ret = -ENOMEM;
2874                 goto err_dmaunmap;
2875         }
2876         INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2877         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2878                                host->irq_flags, "dw-mci", host);
2879         if (ret)
2880                 goto err_workqueue;
2881
2882         if (host->pdata->num_slots)
2883                 host->num_slots = host->pdata->num_slots;
2884         else
2885                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2886
2887         /*
2888          * Enable interrupts for command done, data over, data empty, card det,
2889          * receive ready and error such as transmit, receive timeout, crc error
2890          */
2891         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2892         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2893                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2894                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2895         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2896
2897         dev_info(host->dev, "DW MMC controller at irq %d, "
2898                  "%d bit host data width, "
2899                  "%u deep fifo\n",
2900                  host->irq, width, fifo_size);
2901
2902         /* We need at least one slot to succeed */
2903         for (i = 0; i < host->num_slots; i++) {
2904                 ret = dw_mci_init_slot(host, i);
2905                 if (ret)
2906                         dev_dbg(host->dev, "slot %d init failed\n", i);
2907                 else
2908                         init_slots++;
2909         }
2910
2911         if (init_slots) {
2912                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2913         } else {
2914                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2915                                         "but failed on all\n", host->num_slots);
2916                 goto err_workqueue;
2917         }
2918
2919
2920         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2921                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2922
2923         return 0;
2924
2925 err_workqueue:
2926         destroy_workqueue(host->card_workqueue);
2927
2928 err_dmaunmap:
2929         if (host->use_dma && host->dma_ops->exit)
2930                 host->dma_ops->exit(host);
2931
2932         if (host->vmmc)
2933                 regulator_disable(host->vmmc);
2934
2935 err_clk_ciu:
2936         if (!IS_ERR(host->ciu_clk))
2937                 clk_disable_unprepare(host->ciu_clk);
2938
2939 err_clk_biu:
2940         if (!IS_ERR(host->biu_clk))
2941                 clk_disable_unprepare(host->biu_clk);
2942
2943         return ret;
2944 }
2945 EXPORT_SYMBOL(dw_mci_probe);
2946
2947 void dw_mci_remove(struct dw_mci *host)
2948 {
2949         int i;
2950
2951         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2952         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2953
2954         for (i = 0; i < host->num_slots; i++) {
2955                 dev_dbg(host->dev, "remove slot %d\n", i);
2956                 if (host->slot[i])
2957                         dw_mci_cleanup_slot(host->slot[i], i);
2958         }
2959
2960         /* disable clock to CIU */
2961         mci_writel(host, CLKENA, 0);
2962         mci_writel(host, CLKSRC, 0);
2963
2964         destroy_workqueue(host->card_workqueue);
2965
2966         if (host->use_dma && host->dma_ops->exit)
2967                 host->dma_ops->exit(host);
2968
2969         if (host->vmmc)
2970                 regulator_disable(host->vmmc);
2971
2972         if (!IS_ERR(host->ciu_clk))
2973                 clk_disable_unprepare(host->ciu_clk);
2974
2975         if (!IS_ERR(host->biu_clk))
2976                 clk_disable_unprepare(host->biu_clk);
2977 }
2978 EXPORT_SYMBOL(dw_mci_remove);
2979
2980
2981
2982 #ifdef CONFIG_PM_SLEEP
2983 /*
2984  * TODO: we should probably disable the clock to the card in the suspend path.
2985  */
2986 int dw_mci_suspend(struct dw_mci *host)
2987 {
2988         /*int i, ret = 0;
2989
2990         for (i = 0; i < host->num_slots; i++) {
2991                 struct dw_mci_slot *slot = host->slot[i];
2992                 if (!slot)
2993                         continue;
2994                 ret = mmc_suspend_host(slot->mmc);
2995                 if (ret < 0) {
2996                         while (--i >= 0) {
2997                                 slot = host->slot[i];
2998                                 if (slot)
2999                                         mmc_resume_host(host->slot[i]->mmc);
3000                         }
3001                         return ret;
3002                 }
3003         }
3004         */
3005         if (host->vmmc)
3006                 regulator_disable(host->vmmc);
3007
3008         return 0;
3009 }
3010 EXPORT_SYMBOL(dw_mci_suspend);
3011
3012 int dw_mci_resume(struct dw_mci *host)
3013 {
3014         int i, ret;
3015
3016         if (host->vmmc) {
3017                 ret = regulator_enable(host->vmmc);
3018                 if (ret) {
3019                         dev_err(host->dev,
3020                                 "failed to enable regulator: %d\n", ret);
3021                         return ret;
3022                 }
3023         }
3024
3025         if (!mci_wait_reset(host->dev, host)) {
3026                 ret = -ENODEV;
3027                 return ret;
3028         }
3029
3030         if (host->use_dma && host->dma_ops->init)
3031                 host->dma_ops->init(host);
3032
3033         /*
3034          * Restore the initial value at FIFOTH register
3035          * And Invalidate the prev_blksz with zero
3036          */
3037         mci_writel(host, FIFOTH, host->fifoth_val);
3038         host->prev_blksz = 0;
3039         /* Put in max timeout */
3040         mci_writel(host, TMOUT, 0xFFFFFFFF);
3041
3042         mci_writel(host, RINTSTS, 0xFFFFFFFF);
3043         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3044                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3045                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
3046         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3047
3048         for (i = 0; i < host->num_slots; i++) {
3049                 struct dw_mci_slot *slot = host->slot[i];
3050                 if (!slot)
3051                         continue;
3052                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3053                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3054                         dw_mci_setup_bus(slot, true);
3055                 }
3056
3057         //      ret = mmc_resume_host(host->slot[i]->mmc);
3058         //      if (ret < 0)
3059         //              return ret;
3060         }
3061         return 0;
3062 }
3063 EXPORT_SYMBOL(dw_mci_resume);
3064 #endif /* CONFIG_PM_SLEEP */
3065
3066 static int __init dw_mci_init(void)
3067 {
3068         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3069         return 0;
3070 }
3071
3072 static void __exit dw_mci_exit(void)
3073 {
3074 }
3075
3076 module_init(dw_mci_init);
3077 module_exit(dw_mci_exit);
3078
3079 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3080
3081 MODULE_AUTHOR("NXP Semiconductor VietNam");
3082 MODULE_AUTHOR("Imagination Technologies Ltd");
3083 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
3084
3085 MODULE_LICENSE("GPL v2");