8072e16d6d46743bedcd97a5567b13ec2ecef7eb
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / host / sdhci.c
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/regulator/consumer.h>
23
24 #include <linux/leds.h>
25
26 #include <linux/mmc/mmc.h>
27 #include <linux/mmc/host.h>
28
29 #include "sdhci.h"
30
31 #define DRIVER_NAME "sdhci"
32
33 #define DBG(f, x...) \
34         pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
35
36 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
37         defined(CONFIG_MMC_SDHCI_MODULE))
38 #define SDHCI_USE_LEDS_CLASS
39 #endif
40
41 static unsigned int debug_quirks = 0;
42
43 static void sdhci_finish_data(struct sdhci_host *);
44
45 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
46 static void sdhci_finish_command(struct sdhci_host *);
47
48 static void sdhci_dumpregs(struct sdhci_host *host)
49 {
50         printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
51                 mmc_hostname(host->mmc));
52
53         printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
54                 sdhci_readl(host, SDHCI_DMA_ADDRESS),
55                 sdhci_readw(host, SDHCI_HOST_VERSION));
56         printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
57                 sdhci_readw(host, SDHCI_BLOCK_SIZE),
58                 sdhci_readw(host, SDHCI_BLOCK_COUNT));
59         printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
60                 sdhci_readl(host, SDHCI_ARGUMENT),
61                 sdhci_readw(host, SDHCI_TRANSFER_MODE));
62         printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
63                 sdhci_readl(host, SDHCI_PRESENT_STATE),
64                 sdhci_readb(host, SDHCI_HOST_CONTROL));
65         printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
66                 sdhci_readb(host, SDHCI_POWER_CONTROL),
67                 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
68         printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
69                 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
70                 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
71         printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
72                 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
73                 sdhci_readl(host, SDHCI_INT_STATUS));
74         printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
75                 sdhci_readl(host, SDHCI_INT_ENABLE),
76                 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
77         printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
78                 sdhci_readw(host, SDHCI_ACMD12_ERR),
79                 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
80         printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
81                 sdhci_readl(host, SDHCI_CAPABILITIES),
82                 sdhci_readl(host, SDHCI_CAPABILITIES_1));
83         printk(KERN_DEBUG DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
84                 sdhci_readw(host, SDHCI_COMMAND),
85                 sdhci_readl(host, SDHCI_MAX_CURRENT));
86         printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
87                 sdhci_readw(host, SDHCI_HOST_CONTROL2));
88
89         if (host->flags & SDHCI_USE_ADMA)
90                 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
91                        readl(host->ioaddr + SDHCI_ADMA_ERROR),
92                        readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
93
94         printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
95 }
96
97 /*****************************************************************************\
98  *                                                                           *
99  * Low level functions                                                       *
100  *                                                                           *
101 \*****************************************************************************/
102
103 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
104 {
105         u32 ier;
106
107         ier = sdhci_readl(host, SDHCI_INT_ENABLE);
108         ier &= ~clear;
109         ier |= set;
110         sdhci_writel(host, ier, SDHCI_INT_ENABLE);
111         sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
112 }
113
114 static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
115 {
116         sdhci_clear_set_irqs(host, 0, irqs);
117 }
118
119 static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
120 {
121         sdhci_clear_set_irqs(host, irqs, 0);
122 }
123
124 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
125 {
126         u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
127
128         if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
129                 return;
130
131         if (enable)
132                 sdhci_unmask_irqs(host, irqs);
133         else
134                 sdhci_mask_irqs(host, irqs);
135 }
136
137 static void sdhci_enable_card_detection(struct sdhci_host *host)
138 {
139         sdhci_set_card_detection(host, true);
140 }
141
142 static void sdhci_disable_card_detection(struct sdhci_host *host)
143 {
144         sdhci_set_card_detection(host, false);
145 }
146
147 static void sdhci_reset(struct sdhci_host *host, u8 mask)
148 {
149         unsigned long timeout;
150         u32 uninitialized_var(ier);
151
152         if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
153                 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
154                         SDHCI_CARD_PRESENT))
155                         return;
156         }
157
158         if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
159                 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
160
161         if (host->ops->platform_reset_enter)
162                 host->ops->platform_reset_enter(host, mask);
163
164         sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
165
166         if (mask & SDHCI_RESET_ALL)
167                 host->clock = 0;
168
169         /* Wait max 100 ms */
170         timeout = 100;
171
172         /* hw clears the bit when it's done */
173         while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
174                 if (timeout == 0) {
175                         printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
176                                 mmc_hostname(host->mmc), (int)mask);
177                         sdhci_dumpregs(host);
178                         return;
179                 }
180                 timeout--;
181                 mdelay(1);
182         }
183
184         if (host->ops->platform_reset_exit)
185                 host->ops->platform_reset_exit(host, mask);
186
187         if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
188                 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
189 }
190
191 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
192
193 static void sdhci_init(struct sdhci_host *host, int soft)
194 {
195         if (soft)
196                 sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
197         else
198                 sdhci_reset(host, SDHCI_RESET_ALL);
199
200         sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
201                 SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
202                 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
203                 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
204                 SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
205
206         if (soft) {
207                 /* force clock reconfiguration */
208                 host->clock = 0;
209                 sdhci_set_ios(host->mmc, &host->mmc->ios);
210         }
211 }
212
213 static void sdhci_reinit(struct sdhci_host *host)
214 {
215         sdhci_init(host, 0);
216         sdhci_enable_card_detection(host);
217 }
218
219 static void sdhci_activate_led(struct sdhci_host *host)
220 {
221         u8 ctrl;
222
223         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
224         ctrl |= SDHCI_CTRL_LED;
225         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
226 }
227
228 static void sdhci_deactivate_led(struct sdhci_host *host)
229 {
230         u8 ctrl;
231
232         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
233         ctrl &= ~SDHCI_CTRL_LED;
234         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
235 }
236
237 #ifdef SDHCI_USE_LEDS_CLASS
238 static void sdhci_led_control(struct led_classdev *led,
239         enum led_brightness brightness)
240 {
241         struct sdhci_host *host = container_of(led, struct sdhci_host, led);
242         unsigned long flags;
243
244         spin_lock_irqsave(&host->lock, flags);
245
246         if (brightness == LED_OFF)
247                 sdhci_deactivate_led(host);
248         else
249                 sdhci_activate_led(host);
250
251         spin_unlock_irqrestore(&host->lock, flags);
252 }
253 #endif
254
255 /*****************************************************************************\
256  *                                                                           *
257  * Core functions                                                            *
258  *                                                                           *
259 \*****************************************************************************/
260
261 static void sdhci_read_block_pio(struct sdhci_host *host)
262 {
263         unsigned long flags;
264         size_t blksize, len, chunk;
265         u32 uninitialized_var(scratch);
266         u8 *buf;
267
268         DBG("PIO reading\n");
269
270         blksize = host->data->blksz;
271         chunk = 0;
272
273         local_irq_save(flags);
274
275         while (blksize) {
276                 if (!sg_miter_next(&host->sg_miter))
277                         BUG();
278
279                 len = min(host->sg_miter.length, blksize);
280
281                 blksize -= len;
282                 host->sg_miter.consumed = len;
283
284                 buf = host->sg_miter.addr;
285
286                 while (len) {
287                         if (chunk == 0) {
288                                 scratch = sdhci_readl(host, SDHCI_BUFFER);
289                                 chunk = 4;
290                         }
291
292                         *buf = scratch & 0xFF;
293
294                         buf++;
295                         scratch >>= 8;
296                         chunk--;
297                         len--;
298                 }
299         }
300
301         sg_miter_stop(&host->sg_miter);
302
303         local_irq_restore(flags);
304 }
305
306 static void sdhci_write_block_pio(struct sdhci_host *host)
307 {
308         unsigned long flags;
309         size_t blksize, len, chunk;
310         u32 scratch;
311         u8 *buf;
312
313         DBG("PIO writing\n");
314
315         blksize = host->data->blksz;
316         chunk = 0;
317         scratch = 0;
318
319         local_irq_save(flags);
320
321         while (blksize) {
322                 if (!sg_miter_next(&host->sg_miter))
323                         BUG();
324
325                 len = min(host->sg_miter.length, blksize);
326
327                 blksize -= len;
328                 host->sg_miter.consumed = len;
329
330                 buf = host->sg_miter.addr;
331
332                 while (len) {
333                         scratch |= (u32)*buf << (chunk * 8);
334
335                         buf++;
336                         chunk++;
337                         len--;
338
339                         if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
340                                 sdhci_writel(host, scratch, SDHCI_BUFFER);
341                                 chunk = 0;
342                                 scratch = 0;
343                         }
344                 }
345         }
346
347         sg_miter_stop(&host->sg_miter);
348
349         local_irq_restore(flags);
350 }
351
352 static void sdhci_transfer_pio(struct sdhci_host *host)
353 {
354         u32 mask;
355
356         BUG_ON(!host->data);
357
358         if (host->blocks == 0)
359                 return;
360
361         if (host->data->flags & MMC_DATA_READ)
362                 mask = SDHCI_DATA_AVAILABLE;
363         else
364                 mask = SDHCI_SPACE_AVAILABLE;
365
366         /*
367          * Some controllers (JMicron JMB38x) mess up the buffer bits
368          * for transfers < 4 bytes. As long as it is just one block,
369          * we can ignore the bits.
370          */
371         if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
372                 (host->data->blocks == 1))
373                 mask = ~0;
374
375         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
376                 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
377                         udelay(100);
378
379                 if (host->data->flags & MMC_DATA_READ)
380                         sdhci_read_block_pio(host);
381                 else
382                         sdhci_write_block_pio(host);
383
384                 host->blocks--;
385                 if (host->blocks == 0)
386                         break;
387         }
388
389         DBG("PIO transfer complete.\n");
390 }
391
392 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
393 {
394         local_irq_save(*flags);
395         return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
396 }
397
398 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
399 {
400         kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
401         local_irq_restore(*flags);
402 }
403
404 static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
405 {
406         __le32 *dataddr = (__le32 __force *)(desc + 4);
407         __le16 *cmdlen = (__le16 __force *)desc;
408
409         /* SDHCI specification says ADMA descriptors should be 4 byte
410          * aligned, so using 16 or 32bit operations should be safe. */
411
412         cmdlen[0] = cpu_to_le16(cmd);
413         cmdlen[1] = cpu_to_le16(len);
414
415         dataddr[0] = cpu_to_le32(addr);
416 }
417
418 static int sdhci_adma_table_pre(struct sdhci_host *host,
419         struct mmc_data *data)
420 {
421         int direction;
422
423         u8 *desc;
424         u8 *align;
425         dma_addr_t addr;
426         dma_addr_t align_addr;
427         int len, offset;
428
429         struct scatterlist *sg;
430         int i;
431         char *buffer;
432         unsigned long flags;
433
434         /*
435          * The spec does not specify endianness of descriptor table.
436          * We currently guess that it is LE.
437          */
438
439         if (data->flags & MMC_DATA_READ)
440                 direction = DMA_FROM_DEVICE;
441         else
442                 direction = DMA_TO_DEVICE;
443
444         /*
445          * The ADMA descriptor table is mapped further down as we
446          * need to fill it with data first.
447          */
448
449         host->align_addr = dma_map_single(mmc_dev(host->mmc),
450                 host->align_buffer, 128 * 4, direction);
451         if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
452                 goto fail;
453         BUG_ON(host->align_addr & 0x3);
454
455         host->sg_count = dma_map_sg(mmc_dev(host->mmc),
456                 data->sg, data->sg_len, direction);
457         if (host->sg_count == 0)
458                 goto unmap_align;
459
460         desc = host->adma_desc;
461         align = host->align_buffer;
462
463         align_addr = host->align_addr;
464
465         for_each_sg(data->sg, sg, host->sg_count, i) {
466                 addr = sg_dma_address(sg);
467                 len = sg_dma_len(sg);
468
469                 /*
470                  * The SDHCI specification states that ADMA
471                  * addresses must be 32-bit aligned. If they
472                  * aren't, then we use a bounce buffer for
473                  * the (up to three) bytes that screw up the
474                  * alignment.
475                  */
476                 offset = (4 - (addr & 0x3)) & 0x3;
477                 if (offset) {
478                         if (data->flags & MMC_DATA_WRITE) {
479                                 buffer = sdhci_kmap_atomic(sg, &flags);
480                                 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
481                                 memcpy(align, buffer, offset);
482                                 sdhci_kunmap_atomic(buffer, &flags);
483                         }
484
485                         /* tran, valid */
486                         sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
487
488                         BUG_ON(offset > 65536);
489
490                         align += 4;
491                         align_addr += 4;
492
493                         desc += 8;
494
495                         addr += offset;
496                         len -= offset;
497                 }
498
499                 BUG_ON(len > 65536);
500
501                 /* tran, valid */
502                 sdhci_set_adma_desc(desc, addr, len, 0x21);
503                 desc += 8;
504
505                 /*
506                  * If this triggers then we have a calculation bug
507                  * somewhere. :/
508                  */
509                 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
510         }
511
512         if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
513                 /*
514                 * Mark the last descriptor as the terminating descriptor
515                 */
516                 if (desc != host->adma_desc) {
517                         desc -= 8;
518                         desc[0] |= 0x2; /* end */
519                 }
520         } else {
521                 /*
522                 * Add a terminating entry.
523                 */
524
525                 /* nop, end, valid */
526                 sdhci_set_adma_desc(desc, 0, 0, 0x3);
527         }
528
529         /*
530          * Resync align buffer as we might have changed it.
531          */
532         if (data->flags & MMC_DATA_WRITE) {
533                 dma_sync_single_for_device(mmc_dev(host->mmc),
534                         host->align_addr, 128 * 4, direction);
535         }
536
537         host->adma_addr = dma_map_single(mmc_dev(host->mmc),
538                 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
539         if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
540                 goto unmap_entries;
541         BUG_ON(host->adma_addr & 0x3);
542
543         return 0;
544
545 unmap_entries:
546         dma_unmap_sg(mmc_dev(host->mmc), data->sg,
547                 data->sg_len, direction);
548 unmap_align:
549         dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
550                 128 * 4, direction);
551 fail:
552         return -EINVAL;
553 }
554
555 static void sdhci_adma_table_post(struct sdhci_host *host,
556         struct mmc_data *data)
557 {
558         int direction;
559
560         struct scatterlist *sg;
561         int i, size;
562         u8 *align;
563         char *buffer;
564         unsigned long flags;
565
566         if (data->flags & MMC_DATA_READ)
567                 direction = DMA_FROM_DEVICE;
568         else
569                 direction = DMA_TO_DEVICE;
570
571         dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
572                 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
573
574         dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
575                 128 * 4, direction);
576
577         if (data->flags & MMC_DATA_READ) {
578                 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
579                         data->sg_len, direction);
580
581                 align = host->align_buffer;
582
583                 for_each_sg(data->sg, sg, host->sg_count, i) {
584                         if (sg_dma_address(sg) & 0x3) {
585                                 size = 4 - (sg_dma_address(sg) & 0x3);
586
587                                 buffer = sdhci_kmap_atomic(sg, &flags);
588                                 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
589                                 memcpy(buffer, align, size);
590                                 sdhci_kunmap_atomic(buffer, &flags);
591
592                                 align += 4;
593                         }
594                 }
595         }
596
597         dma_unmap_sg(mmc_dev(host->mmc), data->sg,
598                 data->sg_len, direction);
599 }
600
601 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
602 {
603         u8 count;
604         struct mmc_data *data = cmd->data;
605         unsigned target_timeout, current_timeout;
606
607         /*
608          * If the host controller provides us with an incorrect timeout
609          * value, just skip the check and use 0xE.  The hardware may take
610          * longer to time out, but that's much better than having a too-short
611          * timeout value.
612          */
613         if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
614                 return 0xE;
615
616         /* Unspecified timeout, assume max */
617         if (!data && !cmd->cmd_timeout_ms)
618                 return 0xE;
619
620         /* timeout in us */
621         if (!data)
622                 target_timeout = cmd->cmd_timeout_ms * 1000;
623         else
624                 target_timeout = data->timeout_ns / 1000 +
625                         data->timeout_clks / host->clock;
626
627         if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
628                 host->timeout_clk = host->clock / 1000;
629
630         /*
631          * Figure out needed cycles.
632          * We do this in steps in order to fit inside a 32 bit int.
633          * The first step is the minimum timeout, which will have a
634          * minimum resolution of 6 bits:
635          * (1) 2^13*1000 > 2^22,
636          * (2) host->timeout_clk < 2^16
637          *     =>
638          *     (1) / (2) > 2^6
639          */
640         BUG_ON(!host->timeout_clk);
641         count = 0;
642         current_timeout = (1 << 13) * 1000 / host->timeout_clk;
643         while (current_timeout < target_timeout) {
644                 count++;
645                 current_timeout <<= 1;
646                 if (count >= 0xF)
647                         break;
648         }
649
650         if (count >= 0xF) {
651                 printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
652                        mmc_hostname(host->mmc), cmd->opcode);
653                 count = 0xE;
654         }
655
656         return count;
657 }
658
659 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
660 {
661         u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
662         u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
663
664         if (host->flags & SDHCI_REQ_USE_DMA)
665                 sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
666         else
667                 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
668 }
669
670 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
671 {
672         u8 count;
673         u8 ctrl;
674         struct mmc_data *data = cmd->data;
675         int ret;
676
677         WARN_ON(host->data);
678
679         if (data || (cmd->flags & MMC_RSP_BUSY)) {
680                 count = sdhci_calc_timeout(host, cmd);
681                 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
682         }
683
684         if (!data)
685                 return;
686
687         /* Sanity checks */
688         BUG_ON(data->blksz * data->blocks > 524288);
689         BUG_ON(data->blksz > host->mmc->max_blk_size);
690         BUG_ON(data->blocks > 65535);
691
692         host->data = data;
693         host->data_early = 0;
694         host->data->bytes_xfered = 0;
695
696         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
697                 host->flags |= SDHCI_REQ_USE_DMA;
698
699         /*
700          * FIXME: This doesn't account for merging when mapping the
701          * scatterlist.
702          */
703         if (host->flags & SDHCI_REQ_USE_DMA) {
704                 int broken, i;
705                 struct scatterlist *sg;
706
707                 broken = 0;
708                 if (host->flags & SDHCI_USE_ADMA) {
709                         if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
710                                 broken = 1;
711                 } else {
712                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
713                                 broken = 1;
714                 }
715
716                 if (unlikely(broken)) {
717                         for_each_sg(data->sg, sg, data->sg_len, i) {
718                                 if (sg->length & 0x3) {
719                                         DBG("Reverting to PIO because of "
720                                                 "transfer size (%d)\n",
721                                                 sg->length);
722                                         host->flags &= ~SDHCI_REQ_USE_DMA;
723                                         break;
724                                 }
725                         }
726                 }
727         }
728
729         /*
730          * The assumption here being that alignment is the same after
731          * translation to device address space.
732          */
733         if (host->flags & SDHCI_REQ_USE_DMA) {
734                 int broken, i;
735                 struct scatterlist *sg;
736
737                 broken = 0;
738                 if (host->flags & SDHCI_USE_ADMA) {
739                         /*
740                          * As we use 3 byte chunks to work around
741                          * alignment problems, we need to check this
742                          * quirk.
743                          */
744                         if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
745                                 broken = 1;
746                 } else {
747                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
748                                 broken = 1;
749                 }
750
751                 if (unlikely(broken)) {
752                         for_each_sg(data->sg, sg, data->sg_len, i) {
753                                 if (sg->offset & 0x3) {
754                                         DBG("Reverting to PIO because of "
755                                                 "bad alignment\n");
756                                         host->flags &= ~SDHCI_REQ_USE_DMA;
757                                         break;
758                                 }
759                         }
760                 }
761         }
762
763         if (host->flags & SDHCI_REQ_USE_DMA) {
764                 if (host->flags & SDHCI_USE_ADMA) {
765                         ret = sdhci_adma_table_pre(host, data);
766                         if (ret) {
767                                 /*
768                                  * This only happens when someone fed
769                                  * us an invalid request.
770                                  */
771                                 WARN_ON(1);
772                                 host->flags &= ~SDHCI_REQ_USE_DMA;
773                         } else {
774                                 sdhci_writel(host, host->adma_addr,
775                                         SDHCI_ADMA_ADDRESS);
776                         }
777                 } else {
778                         int sg_cnt;
779
780                         sg_cnt = dma_map_sg(mmc_dev(host->mmc),
781                                         data->sg, data->sg_len,
782                                         (data->flags & MMC_DATA_READ) ?
783                                                 DMA_FROM_DEVICE :
784                                                 DMA_TO_DEVICE);
785                         if (sg_cnt == 0) {
786                                 /*
787                                  * This only happens when someone fed
788                                  * us an invalid request.
789                                  */
790                                 WARN_ON(1);
791                                 host->flags &= ~SDHCI_REQ_USE_DMA;
792                         } else {
793                                 WARN_ON(sg_cnt != 1);
794                                 sdhci_writel(host, sg_dma_address(data->sg),
795                                         SDHCI_DMA_ADDRESS);
796                         }
797                 }
798         }
799
800         /*
801          * Always adjust the DMA selection as some controllers
802          * (e.g. JMicron) can't do PIO properly when the selection
803          * is ADMA.
804          */
805         if (host->version >= SDHCI_SPEC_200) {
806                 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
807                 ctrl &= ~SDHCI_CTRL_DMA_MASK;
808                 if ((host->flags & SDHCI_REQ_USE_DMA) &&
809                         (host->flags & SDHCI_USE_ADMA))
810                         ctrl |= SDHCI_CTRL_ADMA32;
811                 else
812                         ctrl |= SDHCI_CTRL_SDMA;
813                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
814         }
815
816         if (!(host->flags & SDHCI_REQ_USE_DMA)) {
817                 int flags;
818
819                 flags = SG_MITER_ATOMIC;
820                 if (host->data->flags & MMC_DATA_READ)
821                         flags |= SG_MITER_TO_SG;
822                 else
823                         flags |= SG_MITER_FROM_SG;
824                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
825                 host->blocks = data->blocks;
826         }
827
828         sdhci_set_transfer_irqs(host);
829
830         /* Set the DMA boundary value and block size */
831         sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
832                 data->blksz), SDHCI_BLOCK_SIZE);
833         sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
834 }
835
836 static void sdhci_set_transfer_mode(struct sdhci_host *host,
837         struct mmc_data *data)
838 {
839         u16 mode;
840
841         if (data == NULL)
842                 return;
843
844         WARN_ON(!host->data);
845
846         mode = SDHCI_TRNS_BLK_CNT_EN;
847         if (data->blocks > 1) {
848                 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
849                         mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12;
850                 else
851                         mode |= SDHCI_TRNS_MULTI;
852         }
853         if (data->flags & MMC_DATA_READ)
854                 mode |= SDHCI_TRNS_READ;
855         if (host->flags & SDHCI_REQ_USE_DMA)
856                 mode |= SDHCI_TRNS_DMA;
857
858         sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
859 }
860
861 static void sdhci_finish_data(struct sdhci_host *host)
862 {
863         struct mmc_data *data;
864
865         BUG_ON(!host->data);
866
867         data = host->data;
868         host->data = NULL;
869
870         if (host->flags & SDHCI_REQ_USE_DMA) {
871                 if (host->flags & SDHCI_USE_ADMA)
872                         sdhci_adma_table_post(host, data);
873                 else {
874                         dma_unmap_sg(mmc_dev(host->mmc), data->sg,
875                                 data->sg_len, (data->flags & MMC_DATA_READ) ?
876                                         DMA_FROM_DEVICE : DMA_TO_DEVICE);
877                 }
878         }
879
880         /*
881          * The specification states that the block count register must
882          * be updated, but it does not specify at what point in the
883          * data flow. That makes the register entirely useless to read
884          * back so we have to assume that nothing made it to the card
885          * in the event of an error.
886          */
887         if (data->error)
888                 data->bytes_xfered = 0;
889         else
890                 data->bytes_xfered = data->blksz * data->blocks;
891
892         if (data->stop) {
893                 /*
894                  * The controller needs a reset of internal state machines
895                  * upon error conditions.
896                  */
897                 if (data->error) {
898                         sdhci_reset(host, SDHCI_RESET_CMD);
899                         sdhci_reset(host, SDHCI_RESET_DATA);
900                 }
901
902                 sdhci_send_command(host, data->stop);
903         } else
904                 tasklet_schedule(&host->finish_tasklet);
905 }
906
907 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
908 {
909         int flags;
910         u32 mask;
911         unsigned long timeout;
912
913         WARN_ON(host->cmd);
914
915         /* Wait max 10 ms */
916         timeout = 10;
917
918         mask = SDHCI_CMD_INHIBIT;
919         if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
920                 mask |= SDHCI_DATA_INHIBIT;
921
922         /* We shouldn't wait for data inihibit for stop commands, even
923            though they might use busy signaling */
924         if (host->mrq->data && (cmd == host->mrq->data->stop))
925                 mask &= ~SDHCI_DATA_INHIBIT;
926
927         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
928                 if (timeout == 0) {
929                         printk(KERN_ERR "%s: Controller never released "
930                                 "inhibit bit(s).\n", mmc_hostname(host->mmc));
931                         sdhci_dumpregs(host);
932                         cmd->error = -EIO;
933                         tasklet_schedule(&host->finish_tasklet);
934                         return;
935                 }
936                 timeout--;
937                 mdelay(1);
938         }
939
940         mod_timer(&host->timer, jiffies + 10 * HZ);
941
942         host->cmd = cmd;
943
944         sdhci_prepare_data(host, cmd);
945
946         sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
947
948         sdhci_set_transfer_mode(host, cmd->data);
949
950         if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
951                 printk(KERN_ERR "%s: Unsupported response type!\n",
952                         mmc_hostname(host->mmc));
953                 cmd->error = -EINVAL;
954                 tasklet_schedule(&host->finish_tasklet);
955                 return;
956         }
957
958         if (!(cmd->flags & MMC_RSP_PRESENT))
959                 flags = SDHCI_CMD_RESP_NONE;
960         else if (cmd->flags & MMC_RSP_136)
961                 flags = SDHCI_CMD_RESP_LONG;
962         else if (cmd->flags & MMC_RSP_BUSY)
963                 flags = SDHCI_CMD_RESP_SHORT_BUSY;
964         else
965                 flags = SDHCI_CMD_RESP_SHORT;
966
967         if (cmd->flags & MMC_RSP_CRC)
968                 flags |= SDHCI_CMD_CRC;
969         if (cmd->flags & MMC_RSP_OPCODE)
970                 flags |= SDHCI_CMD_INDEX;
971         if (cmd->data)
972                 flags |= SDHCI_CMD_DATA;
973
974         sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
975 }
976
977 static void sdhci_finish_command(struct sdhci_host *host)
978 {
979         int i;
980
981         BUG_ON(host->cmd == NULL);
982
983         if (host->cmd->flags & MMC_RSP_PRESENT) {
984                 if (host->cmd->flags & MMC_RSP_136) {
985                         /* CRC is stripped so we need to do some shifting. */
986                         for (i = 0;i < 4;i++) {
987                                 host->cmd->resp[i] = sdhci_readl(host,
988                                         SDHCI_RESPONSE + (3-i)*4) << 8;
989                                 if (i != 3)
990                                         host->cmd->resp[i] |=
991                                                 sdhci_readb(host,
992                                                 SDHCI_RESPONSE + (3-i)*4-1);
993                         }
994                 } else {
995                         host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
996                 }
997         }
998
999         host->cmd->error = 0;
1000
1001         if (host->data && host->data_early)
1002                 sdhci_finish_data(host);
1003
1004         if (!host->cmd->data)
1005                 tasklet_schedule(&host->finish_tasklet);
1006
1007         host->cmd = NULL;
1008 }
1009
1010 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1011 {
1012         int div;
1013         u16 clk;
1014         unsigned long timeout;
1015
1016         if (clock == host->clock)
1017                 return;
1018
1019         if (host->ops->set_clock) {
1020                 host->ops->set_clock(host, clock);
1021                 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
1022                         return;
1023         }
1024
1025         sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1026
1027         if (clock == 0)
1028                 goto out;
1029
1030         if (host->version >= SDHCI_SPEC_300) {
1031                 /* Version 3.00 divisors must be a multiple of 2. */
1032                 if (host->max_clk <= clock)
1033                         div = 1;
1034                 else {
1035                         for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
1036                                 if ((host->max_clk / div) <= clock)
1037                                         break;
1038                         }
1039                 }
1040         } else {
1041                 /* Version 2.00 divisors must be a power of 2. */
1042                 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1043                         if ((host->max_clk / div) <= clock)
1044                                 break;
1045                 }
1046         }
1047         div >>= 1;
1048
1049         clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1050         clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1051                 << SDHCI_DIVIDER_HI_SHIFT;
1052         clk |= SDHCI_CLOCK_INT_EN;
1053         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1054
1055         /* Wait max 20 ms */
1056         timeout = 20;
1057         while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1058                 & SDHCI_CLOCK_INT_STABLE)) {
1059                 if (timeout == 0) {
1060                         printk(KERN_ERR "%s: Internal clock never "
1061                                 "stabilised.\n", mmc_hostname(host->mmc));
1062                         sdhci_dumpregs(host);
1063                         return;
1064                 }
1065                 timeout--;
1066                 mdelay(1);
1067         }
1068
1069         clk |= SDHCI_CLOCK_CARD_EN;
1070         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1071
1072 out:
1073         host->clock = clock;
1074 }
1075
1076 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1077 {
1078         u8 pwr = 0;
1079
1080         if (power != (unsigned short)-1) {
1081                 switch (1 << power) {
1082                 case MMC_VDD_165_195:
1083                         pwr = SDHCI_POWER_180;
1084                         break;
1085                 case MMC_VDD_29_30:
1086                 case MMC_VDD_30_31:
1087                         pwr = SDHCI_POWER_300;
1088                         break;
1089                 case MMC_VDD_32_33:
1090                 case MMC_VDD_33_34:
1091                         pwr = SDHCI_POWER_330;
1092                         break;
1093                 default:
1094                         BUG();
1095                 }
1096         }
1097
1098         if (host->pwr == pwr)
1099                 return;
1100
1101         host->pwr = pwr;
1102
1103         if (pwr == 0) {
1104                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1105                 return;
1106         }
1107
1108         /*
1109          * Spec says that we should clear the power reg before setting
1110          * a new value. Some controllers don't seem to like this though.
1111          */
1112         if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1113                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1114
1115         /*
1116          * At least the Marvell CaFe chip gets confused if we set the voltage
1117          * and set turn on power at the same time, so set the voltage first.
1118          */
1119         if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1120                 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1121
1122         pwr |= SDHCI_POWER_ON;
1123
1124         sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1125
1126         /*
1127          * Some controllers need an extra 10ms delay of 10ms before they
1128          * can apply clock after applying power
1129          */
1130         if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1131                 mdelay(10);
1132 }
1133
1134 /*****************************************************************************\
1135  *                                                                           *
1136  * MMC callbacks                                                             *
1137  *                                                                           *
1138 \*****************************************************************************/
1139
1140 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1141 {
1142         struct sdhci_host *host;
1143         bool present;
1144         unsigned long flags;
1145
1146         host = mmc_priv(mmc);
1147
1148         spin_lock_irqsave(&host->lock, flags);
1149
1150         WARN_ON(host->mrq != NULL);
1151
1152 #ifndef SDHCI_USE_LEDS_CLASS
1153         sdhci_activate_led(host);
1154 #endif
1155         if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
1156                 if (mrq->stop) {
1157                         mrq->data->stop = NULL;
1158                         mrq->stop = NULL;
1159                 }
1160         }
1161
1162         host->mrq = mrq;
1163
1164         /* If polling, assume that the card is always present. */
1165         if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1166                 present = true;
1167         else
1168                 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1169                                 SDHCI_CARD_PRESENT;
1170
1171         if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1172                 host->mrq->cmd->error = -ENOMEDIUM;
1173                 tasklet_schedule(&host->finish_tasklet);
1174         } else
1175                 sdhci_send_command(host, mrq->cmd);
1176
1177         mmiowb();
1178         spin_unlock_irqrestore(&host->lock, flags);
1179 }
1180
1181 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1182 {
1183         struct sdhci_host *host;
1184         unsigned long flags;
1185         u8 ctrl;
1186
1187         host = mmc_priv(mmc);
1188
1189         spin_lock_irqsave(&host->lock, flags);
1190
1191         if (host->flags & SDHCI_DEVICE_DEAD)
1192                 goto out;
1193
1194         /*
1195          * Reset the chip on each power off.
1196          * Should clear out any weird states.
1197          */
1198         if (ios->power_mode == MMC_POWER_OFF) {
1199                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1200                 sdhci_reinit(host);
1201         }
1202
1203         sdhci_set_clock(host, ios->clock);
1204
1205         if (ios->power_mode == MMC_POWER_OFF)
1206                 sdhci_set_power(host, -1);
1207         else
1208                 sdhci_set_power(host, ios->vdd);
1209
1210         if (host->ops->platform_send_init_74_clocks)
1211                 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1212
1213         /*
1214          * If your platform has 8-bit width support but is not a v3 controller,
1215          * or if it requires special setup code, you should implement that in
1216          * platform_8bit_width().
1217          */
1218         if (host->ops->platform_8bit_width)
1219                 host->ops->platform_8bit_width(host, ios->bus_width);
1220         else {
1221                 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1222                 if (ios->bus_width == MMC_BUS_WIDTH_8) {
1223                         ctrl &= ~SDHCI_CTRL_4BITBUS;
1224                         if (host->version >= SDHCI_SPEC_300)
1225                                 ctrl |= SDHCI_CTRL_8BITBUS;
1226                 } else {
1227                         if (host->version >= SDHCI_SPEC_300)
1228                                 ctrl &= ~SDHCI_CTRL_8BITBUS;
1229                         if (ios->bus_width == MMC_BUS_WIDTH_4)
1230                                 ctrl |= SDHCI_CTRL_4BITBUS;
1231                         else
1232                                 ctrl &= ~SDHCI_CTRL_4BITBUS;
1233                 }
1234                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1235         }
1236
1237         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1238
1239         if ((ios->timing == MMC_TIMING_SD_HS ||
1240              ios->timing == MMC_TIMING_MMC_HS)
1241             && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1242                 ctrl |= SDHCI_CTRL_HISPD;
1243         else
1244                 ctrl &= ~SDHCI_CTRL_HISPD;
1245
1246         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1247
1248         if (host->version >= SDHCI_SPEC_300) {
1249                 u16 ctrl_2;
1250
1251                 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1252                 if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1253                         /*
1254                          * We only need to set Driver Strength if the
1255                          * preset value enable is not set.
1256                          */
1257                         ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1258                         if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1259                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1260                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1261                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1262
1263                         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1264                 }
1265         }
1266
1267         /*
1268          * Some (ENE) controllers go apeshit on some ios operation,
1269          * signalling timeout and CRC errors even on CMD0. Resetting
1270          * it on each ios seems to solve the problem.
1271          */
1272         if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1273                 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1274
1275 out:
1276         mmiowb();
1277         spin_unlock_irqrestore(&host->lock, flags);
1278 }
1279
1280 static int check_ro(struct sdhci_host *host)
1281 {
1282         unsigned long flags;
1283         int is_readonly;
1284
1285         spin_lock_irqsave(&host->lock, flags);
1286
1287         if (host->flags & SDHCI_DEVICE_DEAD)
1288                 is_readonly = 0;
1289         else if (host->ops->get_ro)
1290                 is_readonly = host->ops->get_ro(host);
1291         else
1292                 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1293                                 & SDHCI_WRITE_PROTECT);
1294
1295         spin_unlock_irqrestore(&host->lock, flags);
1296
1297         /* This quirk needs to be replaced by a callback-function later */
1298         return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1299                 !is_readonly : is_readonly;
1300 }
1301
1302 #define SAMPLE_COUNT    5
1303
1304 static int sdhci_get_ro(struct mmc_host *mmc)
1305 {
1306         struct sdhci_host *host;
1307         int i, ro_count;
1308
1309         host = mmc_priv(mmc);
1310
1311         if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1312                 return check_ro(host);
1313
1314         ro_count = 0;
1315         for (i = 0; i < SAMPLE_COUNT; i++) {
1316                 if (check_ro(host)) {
1317                         if (++ro_count > SAMPLE_COUNT / 2)
1318                                 return 1;
1319                 }
1320                 msleep(30);
1321         }
1322         return 0;
1323 }
1324
1325 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1326 {
1327         struct sdhci_host *host;
1328         unsigned long flags;
1329
1330         host = mmc_priv(mmc);
1331
1332         spin_lock_irqsave(&host->lock, flags);
1333
1334         if (host->flags & SDHCI_DEVICE_DEAD)
1335                 goto out;
1336
1337         if (enable)
1338                 sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1339         else
1340                 sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1341 out:
1342         mmiowb();
1343
1344         spin_unlock_irqrestore(&host->lock, flags);
1345 }
1346
1347 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1348         struct mmc_ios *ios)
1349 {
1350         struct sdhci_host *host;
1351         u8 pwr;
1352         u16 clk, ctrl;
1353         u32 present_state;
1354
1355         host = mmc_priv(mmc);
1356
1357         /*
1358          * Signal Voltage Switching is only applicable for Host Controllers
1359          * v3.00 and above.
1360          */
1361         if (host->version < SDHCI_SPEC_300)
1362                 return 0;
1363
1364         /*
1365          * We first check whether the request is to set signalling voltage
1366          * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
1367          */
1368         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1369         if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1370                 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1371                 ctrl &= ~SDHCI_CTRL_VDD_180;
1372                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1373
1374                 /* Wait for 5ms */
1375                 usleep_range(5000, 5500);
1376
1377                 /* 3.3V regulator output should be stable within 5 ms */
1378                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1379                 if (!(ctrl & SDHCI_CTRL_VDD_180))
1380                         return 0;
1381                 else {
1382                         printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
1383                                 "signalling voltage failed\n");
1384                         return -EIO;
1385                 }
1386         } else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
1387                   (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1388                 /* Stop SDCLK */
1389                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1390                 clk &= ~SDHCI_CLOCK_CARD_EN;
1391                 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1392
1393                 /* Check whether DAT[3:0] is 0000 */
1394                 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1395                 if (!((present_state & SDHCI_DATA_LVL_MASK) >>
1396                        SDHCI_DATA_LVL_SHIFT)) {
1397                         /*
1398                          * Enable 1.8V Signal Enable in the Host Control2
1399                          * register
1400                          */
1401                         ctrl |= SDHCI_CTRL_VDD_180;
1402                         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1403
1404                         /* Wait for 5ms */
1405                         usleep_range(5000, 5500);
1406
1407                         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1408                         if (ctrl & SDHCI_CTRL_VDD_180) {
1409                                 /* Provide SDCLK again and wait for 1ms*/
1410                                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1411                                 clk |= SDHCI_CLOCK_CARD_EN;
1412                                 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1413                                 usleep_range(1000, 1500);
1414
1415                                 /*
1416                                  * If DAT[3:0] level is 1111b, then the card
1417                                  * was successfully switched to 1.8V signaling.
1418                                  */
1419                                 present_state = sdhci_readl(host,
1420                                                         SDHCI_PRESENT_STATE);
1421                                 if ((present_state & SDHCI_DATA_LVL_MASK) ==
1422                                      SDHCI_DATA_LVL_MASK)
1423                                         return 0;
1424                         }
1425                 }
1426
1427                 /*
1428                  * If we are here, that means the switch to 1.8V signaling
1429                  * failed. We power cycle the card, and retry initialization
1430                  * sequence by setting S18R to 0.
1431                  */
1432                 pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
1433                 pwr &= ~SDHCI_POWER_ON;
1434                 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1435
1436                 /* Wait for 1ms as per the spec */
1437                 usleep_range(1000, 1500);
1438                 pwr |= SDHCI_POWER_ON;
1439                 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1440
1441                 printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
1442                         "voltage failed, retrying with S18R set to 0\n");
1443                 return -EAGAIN;
1444         } else
1445                 /* No signal voltage switch required */
1446                 return 0;
1447 }
1448
1449 static const struct mmc_host_ops sdhci_ops = {
1450         .request        = sdhci_request,
1451         .set_ios        = sdhci_set_ios,
1452         .get_ro         = sdhci_get_ro,
1453         .enable_sdio_irq = sdhci_enable_sdio_irq,
1454         .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
1455 };
1456
1457 /*****************************************************************************\
1458  *                                                                           *
1459  * Tasklets                                                                  *
1460  *                                                                           *
1461 \*****************************************************************************/
1462
1463 static void sdhci_tasklet_card(unsigned long param)
1464 {
1465         struct sdhci_host *host;
1466         unsigned long flags;
1467
1468         host = (struct sdhci_host*)param;
1469
1470         spin_lock_irqsave(&host->lock, flags);
1471
1472         if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
1473                 if (host->mrq) {
1474                         printk(KERN_ERR "%s: Card removed during transfer!\n",
1475                                 mmc_hostname(host->mmc));
1476                         printk(KERN_ERR "%s: Resetting controller.\n",
1477                                 mmc_hostname(host->mmc));
1478
1479                         sdhci_reset(host, SDHCI_RESET_CMD);
1480                         sdhci_reset(host, SDHCI_RESET_DATA);
1481
1482                         host->mrq->cmd->error = -ENOMEDIUM;
1483                         tasklet_schedule(&host->finish_tasklet);
1484                 }
1485         }
1486
1487         spin_unlock_irqrestore(&host->lock, flags);
1488
1489         mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1490 }
1491
1492 static void sdhci_tasklet_finish(unsigned long param)
1493 {
1494         struct sdhci_host *host;
1495         unsigned long flags;
1496         struct mmc_request *mrq;
1497
1498         host = (struct sdhci_host*)param;
1499
1500         /*
1501          * If this tasklet gets rescheduled while running, it will
1502          * be run again afterwards but without any active request.
1503          */
1504         if (!host->mrq)
1505                 return;
1506
1507         spin_lock_irqsave(&host->lock, flags);
1508
1509         del_timer(&host->timer);
1510
1511         mrq = host->mrq;
1512
1513         /*
1514          * The controller needs a reset of internal state machines
1515          * upon error conditions.
1516          */
1517         if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1518             ((mrq->cmd && mrq->cmd->error) ||
1519                  (mrq->data && (mrq->data->error ||
1520                   (mrq->data->stop && mrq->data->stop->error))) ||
1521                    (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1522
1523                 /* Some controllers need this kick or reset won't work here */
1524                 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1525                         unsigned int clock;
1526
1527                         /* This is to force an update */
1528                         clock = host->clock;
1529                         host->clock = 0;
1530                         sdhci_set_clock(host, clock);
1531                 }
1532
1533                 /* Spec says we should do both at the same time, but Ricoh
1534                    controllers do not like that. */
1535                 sdhci_reset(host, SDHCI_RESET_CMD);
1536                 sdhci_reset(host, SDHCI_RESET_DATA);
1537         }
1538
1539         host->mrq = NULL;
1540         host->cmd = NULL;
1541         host->data = NULL;
1542
1543 #ifndef SDHCI_USE_LEDS_CLASS
1544         sdhci_deactivate_led(host);
1545 #endif
1546
1547         mmiowb();
1548         spin_unlock_irqrestore(&host->lock, flags);
1549
1550         mmc_request_done(host->mmc, mrq);
1551 }
1552
1553 static void sdhci_timeout_timer(unsigned long data)
1554 {
1555         struct sdhci_host *host;
1556         unsigned long flags;
1557
1558         host = (struct sdhci_host*)data;
1559
1560         spin_lock_irqsave(&host->lock, flags);
1561
1562         if (host->mrq) {
1563                 printk(KERN_ERR "%s: Timeout waiting for hardware "
1564                         "interrupt.\n", mmc_hostname(host->mmc));
1565                 sdhci_dumpregs(host);
1566
1567                 if (host->data) {
1568                         host->data->error = -ETIMEDOUT;
1569                         sdhci_finish_data(host);
1570                 } else {
1571                         if (host->cmd)
1572                                 host->cmd->error = -ETIMEDOUT;
1573                         else
1574                                 host->mrq->cmd->error = -ETIMEDOUT;
1575
1576                         tasklet_schedule(&host->finish_tasklet);
1577                 }
1578         }
1579
1580         mmiowb();
1581         spin_unlock_irqrestore(&host->lock, flags);
1582 }
1583
1584 /*****************************************************************************\
1585  *                                                                           *
1586  * Interrupt handling                                                        *
1587  *                                                                           *
1588 \*****************************************************************************/
1589
1590 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1591 {
1592         BUG_ON(intmask == 0);
1593
1594         if (!host->cmd) {
1595                 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1596                         "though no command operation was in progress.\n",
1597                         mmc_hostname(host->mmc), (unsigned)intmask);
1598                 sdhci_dumpregs(host);
1599                 return;
1600         }
1601
1602         if (intmask & SDHCI_INT_TIMEOUT)
1603                 host->cmd->error = -ETIMEDOUT;
1604         else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1605                         SDHCI_INT_INDEX))
1606                 host->cmd->error = -EILSEQ;
1607
1608         if (host->cmd->error) {
1609                 tasklet_schedule(&host->finish_tasklet);
1610                 return;
1611         }
1612
1613         /*
1614          * The host can send and interrupt when the busy state has
1615          * ended, allowing us to wait without wasting CPU cycles.
1616          * Unfortunately this is overloaded on the "data complete"
1617          * interrupt, so we need to take some care when handling
1618          * it.
1619          *
1620          * Note: The 1.0 specification is a bit ambiguous about this
1621          *       feature so there might be some problems with older
1622          *       controllers.
1623          */
1624         if (host->cmd->flags & MMC_RSP_BUSY) {
1625                 if (host->cmd->data)
1626                         DBG("Cannot wait for busy signal when also "
1627                                 "doing a data transfer");
1628                 else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
1629                         return;
1630
1631                 /* The controller does not support the end-of-busy IRQ,
1632                  * fall through and take the SDHCI_INT_RESPONSE */
1633         }
1634
1635         if (intmask & SDHCI_INT_RESPONSE)
1636                 sdhci_finish_command(host);
1637 }
1638
1639 #ifdef CONFIG_MMC_DEBUG
1640 static void sdhci_show_adma_error(struct sdhci_host *host)
1641 {
1642         const char *name = mmc_hostname(host->mmc);
1643         u8 *desc = host->adma_desc;
1644         __le32 *dma;
1645         __le16 *len;
1646         u8 attr;
1647
1648         sdhci_dumpregs(host);
1649
1650         while (true) {
1651                 dma = (__le32 *)(desc + 4);
1652                 len = (__le16 *)(desc + 2);
1653                 attr = *desc;
1654
1655                 DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
1656                     name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
1657
1658                 desc += 8;
1659
1660                 if (attr & 2)
1661                         break;
1662         }
1663 }
1664 #else
1665 static void sdhci_show_adma_error(struct sdhci_host *host) { }
1666 #endif
1667
1668 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1669 {
1670         BUG_ON(intmask == 0);
1671
1672         if (!host->data) {
1673                 /*
1674                  * The "data complete" interrupt is also used to
1675                  * indicate that a busy state has ended. See comment
1676                  * above in sdhci_cmd_irq().
1677                  */
1678                 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1679                         if (intmask & SDHCI_INT_DATA_END) {
1680                                 sdhci_finish_command(host);
1681                                 return;
1682                         }
1683                 }
1684
1685                 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1686                         "though no data operation was in progress.\n",
1687                         mmc_hostname(host->mmc), (unsigned)intmask);
1688                 sdhci_dumpregs(host);
1689
1690                 return;
1691         }
1692
1693         if (intmask & SDHCI_INT_DATA_TIMEOUT)
1694                 host->data->error = -ETIMEDOUT;
1695         else if (intmask & SDHCI_INT_DATA_END_BIT)
1696                 host->data->error = -EILSEQ;
1697         else if ((intmask & SDHCI_INT_DATA_CRC) &&
1698                 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
1699                         != MMC_BUS_TEST_R)
1700                 host->data->error = -EILSEQ;
1701         else if (intmask & SDHCI_INT_ADMA_ERROR) {
1702                 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
1703                 sdhci_show_adma_error(host);
1704                 host->data->error = -EIO;
1705         }
1706
1707         if (host->data->error)
1708                 sdhci_finish_data(host);
1709         else {
1710                 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1711                         sdhci_transfer_pio(host);
1712
1713                 /*
1714                  * We currently don't do anything fancy with DMA
1715                  * boundaries, but as we can't disable the feature
1716                  * we need to at least restart the transfer.
1717                  *
1718                  * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
1719                  * should return a valid address to continue from, but as
1720                  * some controllers are faulty, don't trust them.
1721                  */
1722                 if (intmask & SDHCI_INT_DMA_END) {
1723                         u32 dmastart, dmanow;
1724                         dmastart = sg_dma_address(host->data->sg);
1725                         dmanow = dmastart + host->data->bytes_xfered;
1726                         /*
1727                          * Force update to the next DMA block boundary.
1728                          */
1729                         dmanow = (dmanow &
1730                                 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
1731                                 SDHCI_DEFAULT_BOUNDARY_SIZE;
1732                         host->data->bytes_xfered = dmanow - dmastart;
1733                         DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
1734                                 " next 0x%08x\n",
1735                                 mmc_hostname(host->mmc), dmastart,
1736                                 host->data->bytes_xfered, dmanow);
1737                         sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
1738                 }
1739
1740                 if (intmask & SDHCI_INT_DATA_END) {
1741                         if (host->cmd) {
1742                                 /*
1743                                  * Data managed to finish before the
1744                                  * command completed. Make sure we do
1745                                  * things in the proper order.
1746                                  */
1747                                 host->data_early = 1;
1748                         } else {
1749                                 sdhci_finish_data(host);
1750                         }
1751                 }
1752         }
1753 }
1754
1755 static irqreturn_t sdhci_irq(int irq, void *dev_id)
1756 {
1757         irqreturn_t result;
1758         struct sdhci_host* host = dev_id;
1759         u32 intmask;
1760         int cardint = 0;
1761
1762         spin_lock(&host->lock);
1763
1764         intmask = sdhci_readl(host, SDHCI_INT_STATUS);
1765
1766         if (!intmask || intmask == 0xffffffff) {
1767                 result = IRQ_NONE;
1768                 goto out;
1769         }
1770
1771         DBG("*** %s got interrupt: 0x%08x\n",
1772                 mmc_hostname(host->mmc), intmask);
1773
1774         if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1775                 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
1776                         SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
1777                 tasklet_schedule(&host->card_tasklet);
1778         }
1779
1780         intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1781
1782         if (intmask & SDHCI_INT_CMD_MASK) {
1783                 sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
1784                         SDHCI_INT_STATUS);
1785                 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1786         }
1787
1788         if (intmask & SDHCI_INT_DATA_MASK) {
1789                 sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
1790                         SDHCI_INT_STATUS);
1791                 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1792         }
1793
1794         intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1795
1796         intmask &= ~SDHCI_INT_ERROR;
1797
1798         if (intmask & SDHCI_INT_BUS_POWER) {
1799                 printk(KERN_ERR "%s: Card is consuming too much power!\n",
1800                         mmc_hostname(host->mmc));
1801                 sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
1802         }
1803
1804         intmask &= ~SDHCI_INT_BUS_POWER;
1805
1806         if (intmask & SDHCI_INT_CARD_INT)
1807                 cardint = 1;
1808
1809         intmask &= ~SDHCI_INT_CARD_INT;
1810
1811         if (intmask) {
1812                 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1813                         mmc_hostname(host->mmc), intmask);
1814                 sdhci_dumpregs(host);
1815
1816                 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
1817         }
1818
1819         result = IRQ_HANDLED;
1820
1821         mmiowb();
1822 out:
1823         spin_unlock(&host->lock);
1824
1825         /*
1826          * We have to delay this as it calls back into the driver.
1827          */
1828         if (cardint)
1829                 mmc_signal_sdio_irq(host->mmc);
1830
1831         return result;
1832 }
1833
1834 /*****************************************************************************\
1835  *                                                                           *
1836  * Suspend/resume                                                            *
1837  *                                                                           *
1838 \*****************************************************************************/
1839
1840 #ifdef CONFIG_PM
1841
1842 int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1843 {
1844         int ret;
1845
1846         sdhci_disable_card_detection(host);
1847
1848         ret = mmc_suspend_host(host->mmc);
1849         if (ret)
1850                 return ret;
1851
1852         free_irq(host->irq, host);
1853
1854         if (host->vmmc)
1855                 ret = regulator_disable(host->vmmc);
1856
1857         return ret;
1858 }
1859
1860 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1861
1862 int sdhci_resume_host(struct sdhci_host *host)
1863 {
1864         int ret;
1865
1866         if (host->vmmc) {
1867                 int ret = regulator_enable(host->vmmc);
1868                 if (ret)
1869                         return ret;
1870         }
1871
1872
1873         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1874                 if (host->ops->enable_dma)
1875                         host->ops->enable_dma(host);
1876         }
1877
1878         ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1879                           mmc_hostname(host->mmc), host);
1880         if (ret)
1881                 return ret;
1882
1883         sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
1884         mmiowb();
1885
1886         ret = mmc_resume_host(host->mmc);
1887         sdhci_enable_card_detection(host);
1888
1889         return ret;
1890 }
1891
1892 EXPORT_SYMBOL_GPL(sdhci_resume_host);
1893
1894 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
1895 {
1896         u8 val;
1897         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
1898         val |= SDHCI_WAKE_ON_INT;
1899         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
1900 }
1901
1902 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
1903
1904 #endif /* CONFIG_PM */
1905
1906 /*****************************************************************************\
1907  *                                                                           *
1908  * Device allocation/registration                                            *
1909  *                                                                           *
1910 \*****************************************************************************/
1911
1912 struct sdhci_host *sdhci_alloc_host(struct device *dev,
1913         size_t priv_size)
1914 {
1915         struct mmc_host *mmc;
1916         struct sdhci_host *host;
1917
1918         WARN_ON(dev == NULL);
1919
1920         mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1921         if (!mmc)
1922                 return ERR_PTR(-ENOMEM);
1923
1924         host = mmc_priv(mmc);
1925         host->mmc = mmc;
1926
1927         return host;
1928 }
1929
1930 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1931
1932 int sdhci_add_host(struct sdhci_host *host)
1933 {
1934         struct mmc_host *mmc;
1935         u32 caps[2];
1936         u32 max_current_caps;
1937         unsigned int ocr_avail;
1938         int ret;
1939
1940         WARN_ON(host == NULL);
1941         if (host == NULL)
1942                 return -EINVAL;
1943
1944         mmc = host->mmc;
1945
1946         if (debug_quirks)
1947                 host->quirks = debug_quirks;
1948
1949         sdhci_reset(host, SDHCI_RESET_ALL);
1950
1951         host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
1952         host->version = (host->version & SDHCI_SPEC_VER_MASK)
1953                                 >> SDHCI_SPEC_VER_SHIFT;
1954         if (host->version > SDHCI_SPEC_300) {
1955                 printk(KERN_ERR "%s: Unknown controller version (%d). "
1956                         "You may experience problems.\n", mmc_hostname(mmc),
1957                         host->version);
1958         }
1959
1960         caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
1961                 sdhci_readl(host, SDHCI_CAPABILITIES);
1962
1963         caps[1] = (host->version >= SDHCI_SPEC_300) ?
1964                 sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
1965
1966         if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1967                 host->flags |= SDHCI_USE_SDMA;
1968         else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
1969                 DBG("Controller doesn't have SDMA capability\n");
1970         else
1971                 host->flags |= SDHCI_USE_SDMA;
1972
1973         if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1974                 (host->flags & SDHCI_USE_SDMA)) {
1975                 DBG("Disabling DMA as it is marked broken\n");
1976                 host->flags &= ~SDHCI_USE_SDMA;
1977         }
1978
1979         if ((host->version >= SDHCI_SPEC_200) &&
1980                 (caps[0] & SDHCI_CAN_DO_ADMA2))
1981                 host->flags |= SDHCI_USE_ADMA;
1982
1983         if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1984                 (host->flags & SDHCI_USE_ADMA)) {
1985                 DBG("Disabling ADMA as it is marked broken\n");
1986                 host->flags &= ~SDHCI_USE_ADMA;
1987         }
1988
1989         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1990                 if (host->ops->enable_dma) {
1991                         if (host->ops->enable_dma(host)) {
1992                                 printk(KERN_WARNING "%s: No suitable DMA "
1993                                         "available. Falling back to PIO.\n",
1994                                         mmc_hostname(mmc));
1995                                 host->flags &=
1996                                         ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
1997                         }
1998                 }
1999         }
2000
2001         if (host->flags & SDHCI_USE_ADMA) {
2002                 /*
2003                  * We need to allocate descriptors for all sg entries
2004                  * (128) and potentially one alignment transfer for
2005                  * each of those entries.
2006                  */
2007                 host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
2008                 host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
2009                 if (!host->adma_desc || !host->align_buffer) {
2010                         kfree(host->adma_desc);
2011                         kfree(host->align_buffer);
2012                         printk(KERN_WARNING "%s: Unable to allocate ADMA "
2013                                 "buffers. Falling back to standard DMA.\n",
2014                                 mmc_hostname(mmc));
2015                         host->flags &= ~SDHCI_USE_ADMA;
2016                 }
2017         }
2018
2019         /*
2020          * If we use DMA, then it's up to the caller to set the DMA
2021          * mask, but PIO does not need the hw shim so we set a new
2022          * mask here in that case.
2023          */
2024         if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
2025                 host->dma_mask = DMA_BIT_MASK(64);
2026                 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
2027         }
2028
2029         if (host->version >= SDHCI_SPEC_300)
2030                 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2031                         >> SDHCI_CLOCK_BASE_SHIFT;
2032         else
2033                 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2034                         >> SDHCI_CLOCK_BASE_SHIFT;
2035
2036         host->max_clk *= 1000000;
2037         if (host->max_clk == 0 || host->quirks &
2038                         SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
2039                 if (!host->ops->get_max_clock) {
2040                         printk(KERN_ERR
2041                                "%s: Hardware doesn't specify base clock "
2042                                "frequency.\n", mmc_hostname(mmc));
2043                         return -ENODEV;
2044                 }
2045                 host->max_clk = host->ops->get_max_clock(host);
2046         }
2047
2048         host->timeout_clk =
2049                 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2050         if (host->timeout_clk == 0) {
2051                 if (host->ops->get_timeout_clock) {
2052                         host->timeout_clk = host->ops->get_timeout_clock(host);
2053                 } else if (!(host->quirks &
2054                                 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2055                         printk(KERN_ERR
2056                                "%s: Hardware doesn't specify timeout clock "
2057                                "frequency.\n", mmc_hostname(mmc));
2058                         return -ENODEV;
2059                 }
2060         }
2061         if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2062                 host->timeout_clk *= 1000;
2063
2064         /*
2065          * Set host parameters.
2066          */
2067         mmc->ops = &sdhci_ops;
2068         if (host->ops->get_min_clock)
2069                 mmc->f_min = host->ops->get_min_clock(host);
2070         else if (host->version >= SDHCI_SPEC_300)
2071                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
2072         else
2073                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2074
2075         mmc->f_max = host->max_clk;
2076         mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE;
2077
2078         /*
2079          * A controller may support 8-bit width, but the board itself
2080          * might not have the pins brought out.  Boards that support
2081          * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
2082          * their platform code before calling sdhci_add_host(), and we
2083          * won't assume 8-bit width for hosts without that CAP.
2084          */
2085         if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
2086                 mmc->caps |= MMC_CAP_4_BIT_DATA;
2087
2088         if (caps[0] & SDHCI_CAN_DO_HISPD)
2089                 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2090
2091         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2092             mmc_card_is_removable(mmc))
2093                 mmc->caps |= MMC_CAP_NEEDS_POLL;
2094
2095         /* UHS-I mode(s) supported by the host controller. */
2096         if (host->version >= SDHCI_SPEC_300)
2097                 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
2098
2099         /* SDR104 supports also implies SDR50 support */
2100         if (caps[1] & SDHCI_SUPPORT_SDR104)
2101                 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
2102         else if (caps[1] & SDHCI_SUPPORT_SDR50)
2103                 mmc->caps |= MMC_CAP_UHS_SDR50;
2104
2105         if (caps[1] & SDHCI_SUPPORT_DDR50)
2106                 mmc->caps |= MMC_CAP_UHS_DDR50;
2107
2108         /* Driver Type(s) (A, C, D) supported by the host */
2109         if (caps[1] & SDHCI_DRIVER_TYPE_A)
2110                 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
2111         if (caps[1] & SDHCI_DRIVER_TYPE_C)
2112                 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
2113         if (caps[1] & SDHCI_DRIVER_TYPE_D)
2114                 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
2115
2116         ocr_avail = 0;
2117         /*
2118          * According to SD Host Controller spec v3.00, if the Host System
2119          * can afford more than 150mA, Host Driver should set XPC to 1. Also
2120          * the value is meaningful only if Voltage Support in the Capabilities
2121          * register is set. The actual current value is 4 times the register
2122          * value.
2123          */
2124         max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
2125
2126         if (caps[0] & SDHCI_CAN_VDD_330) {
2127                 int max_current_330;
2128
2129                 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
2130
2131                 max_current_330 = ((max_current_caps &
2132                                    SDHCI_MAX_CURRENT_330_MASK) >>
2133                                    SDHCI_MAX_CURRENT_330_SHIFT) *
2134                                    SDHCI_MAX_CURRENT_MULTIPLIER;
2135
2136                 if (max_current_330 > 150)
2137                         mmc->caps |= MMC_CAP_SET_XPC_330;
2138         }
2139         if (caps[0] & SDHCI_CAN_VDD_300) {
2140                 int max_current_300;
2141
2142                 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
2143
2144                 max_current_300 = ((max_current_caps &
2145                                    SDHCI_MAX_CURRENT_300_MASK) >>
2146                                    SDHCI_MAX_CURRENT_300_SHIFT) *
2147                                    SDHCI_MAX_CURRENT_MULTIPLIER;
2148
2149                 if (max_current_300 > 150)
2150                         mmc->caps |= MMC_CAP_SET_XPC_300;
2151         }
2152         if (caps[0] & SDHCI_CAN_VDD_180) {
2153                 int max_current_180;
2154
2155                 ocr_avail |= MMC_VDD_165_195;
2156
2157                 max_current_180 = ((max_current_caps &
2158                                    SDHCI_MAX_CURRENT_180_MASK) >>
2159                                    SDHCI_MAX_CURRENT_180_SHIFT) *
2160                                    SDHCI_MAX_CURRENT_MULTIPLIER;
2161
2162                 if (max_current_180 > 150)
2163                         mmc->caps |= MMC_CAP_SET_XPC_180;
2164         }
2165
2166         mmc->ocr_avail = ocr_avail;
2167         mmc->ocr_avail_sdio = ocr_avail;
2168         if (host->ocr_avail_sdio)
2169                 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
2170         mmc->ocr_avail_sd = ocr_avail;
2171         if (host->ocr_avail_sd)
2172                 mmc->ocr_avail_sd &= host->ocr_avail_sd;
2173         else /* normal SD controllers don't support 1.8V */
2174                 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
2175         mmc->ocr_avail_mmc = ocr_avail;
2176         if (host->ocr_avail_mmc)
2177                 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
2178
2179         if (mmc->ocr_avail == 0) {
2180                 printk(KERN_ERR "%s: Hardware doesn't report any "
2181                         "support voltages.\n", mmc_hostname(mmc));
2182                 return -ENODEV;
2183         }
2184
2185         spin_lock_init(&host->lock);
2186
2187         /*
2188          * Maximum number of segments. Depends on if the hardware
2189          * can do scatter/gather or not.
2190          */
2191         if (host->flags & SDHCI_USE_ADMA)
2192                 mmc->max_segs = 128;
2193         else if (host->flags & SDHCI_USE_SDMA)
2194                 mmc->max_segs = 1;
2195         else /* PIO */
2196                 mmc->max_segs = 128;
2197
2198         /*
2199          * Maximum number of sectors in one transfer. Limited by DMA boundary
2200          * size (512KiB).
2201          */
2202         mmc->max_req_size = 524288;
2203
2204         /*
2205          * Maximum segment size. Could be one segment with the maximum number
2206          * of bytes. When doing hardware scatter/gather, each entry cannot
2207          * be larger than 64 KiB though.
2208          */
2209         if (host->flags & SDHCI_USE_ADMA) {
2210                 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
2211                         mmc->max_seg_size = 65535;
2212                 else
2213                         mmc->max_seg_size = 65536;
2214         } else {
2215                 mmc->max_seg_size = mmc->max_req_size;
2216         }
2217
2218         /*
2219          * Maximum block size. This varies from controller to controller and
2220          * is specified in the capabilities register.
2221          */
2222         if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
2223                 mmc->max_blk_size = 2;
2224         } else {
2225                 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
2226                                 SDHCI_MAX_BLOCK_SHIFT;
2227                 if (mmc->max_blk_size >= 3) {
2228                         printk(KERN_WARNING "%s: Invalid maximum block size, "
2229                                 "assuming 512 bytes\n", mmc_hostname(mmc));
2230                         mmc->max_blk_size = 0;
2231                 }
2232         }
2233
2234         mmc->max_blk_size = 512 << mmc->max_blk_size;
2235
2236         /*
2237          * Maximum block count.
2238          */
2239         mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
2240
2241         /*
2242          * Init tasklets.
2243          */
2244         tasklet_init(&host->card_tasklet,
2245                 sdhci_tasklet_card, (unsigned long)host);
2246         tasklet_init(&host->finish_tasklet,
2247                 sdhci_tasklet_finish, (unsigned long)host);
2248
2249         setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
2250
2251         ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
2252                 mmc_hostname(mmc), host);
2253         if (ret)
2254                 goto untasklet;
2255
2256         host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
2257         if (IS_ERR(host->vmmc)) {
2258                 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
2259                 host->vmmc = NULL;
2260         } else {
2261                 regulator_enable(host->vmmc);
2262         }
2263
2264         sdhci_init(host, 0);
2265
2266 #ifdef CONFIG_MMC_DEBUG
2267         sdhci_dumpregs(host);
2268 #endif
2269
2270 #ifdef SDHCI_USE_LEDS_CLASS
2271         snprintf(host->led_name, sizeof(host->led_name),
2272                 "%s::", mmc_hostname(mmc));
2273         host->led.name = host->led_name;
2274         host->led.brightness = LED_OFF;
2275         host->led.default_trigger = mmc_hostname(mmc);
2276         host->led.brightness_set = sdhci_led_control;
2277
2278         ret = led_classdev_register(mmc_dev(mmc), &host->led);
2279         if (ret)
2280                 goto reset;
2281 #endif
2282
2283         mmiowb();
2284
2285         mmc_add_host(mmc);
2286
2287         printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
2288                 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
2289                 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
2290                 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
2291
2292         sdhci_enable_card_detection(host);
2293
2294         return 0;
2295
2296 #ifdef SDHCI_USE_LEDS_CLASS
2297 reset:
2298         sdhci_reset(host, SDHCI_RESET_ALL);
2299         free_irq(host->irq, host);
2300 #endif
2301 untasklet:
2302         tasklet_kill(&host->card_tasklet);
2303         tasklet_kill(&host->finish_tasklet);
2304
2305         return ret;
2306 }
2307
2308 EXPORT_SYMBOL_GPL(sdhci_add_host);
2309
2310 void sdhci_remove_host(struct sdhci_host *host, int dead)
2311 {
2312         unsigned long flags;
2313
2314         if (dead) {
2315                 spin_lock_irqsave(&host->lock, flags);
2316
2317                 host->flags |= SDHCI_DEVICE_DEAD;
2318
2319                 if (host->mrq) {
2320                         printk(KERN_ERR "%s: Controller removed during "
2321                                 " transfer!\n", mmc_hostname(host->mmc));
2322
2323                         host->mrq->cmd->error = -ENOMEDIUM;
2324                         tasklet_schedule(&host->finish_tasklet);
2325                 }
2326
2327                 spin_unlock_irqrestore(&host->lock, flags);
2328         }
2329
2330         sdhci_disable_card_detection(host);
2331
2332         mmc_remove_host(host->mmc);
2333
2334 #ifdef SDHCI_USE_LEDS_CLASS
2335         led_classdev_unregister(&host->led);
2336 #endif
2337
2338         if (!dead)
2339                 sdhci_reset(host, SDHCI_RESET_ALL);
2340
2341         free_irq(host->irq, host);
2342
2343         del_timer_sync(&host->timer);
2344
2345         tasklet_kill(&host->card_tasklet);
2346         tasklet_kill(&host->finish_tasklet);
2347
2348         if (host->vmmc) {
2349                 regulator_disable(host->vmmc);
2350                 regulator_put(host->vmmc);
2351         }
2352
2353         kfree(host->adma_desc);
2354         kfree(host->align_buffer);
2355
2356         host->adma_desc = NULL;
2357         host->align_buffer = NULL;
2358 }
2359
2360 EXPORT_SYMBOL_GPL(sdhci_remove_host);
2361
2362 void sdhci_free_host(struct sdhci_host *host)
2363 {
2364         mmc_free_host(host->mmc);
2365 }
2366
2367 EXPORT_SYMBOL_GPL(sdhci_free_host);
2368
2369 /*****************************************************************************\
2370  *                                                                           *
2371  * Driver init/exit                                                          *
2372  *                                                                           *
2373 \*****************************************************************************/
2374
2375 static int __init sdhci_drv_init(void)
2376 {
2377         printk(KERN_INFO DRIVER_NAME
2378                 ": Secure Digital Host Controller Interface driver\n");
2379         printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2380
2381         return 0;
2382 }
2383
2384 static void __exit sdhci_drv_exit(void)
2385 {
2386 }
2387
2388 module_init(sdhci_drv_init);
2389 module_exit(sdhci_drv_exit);
2390
2391 module_param(debug_quirks, uint, 0444);
2392
2393 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2394 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
2395 MODULE_LICENSE("GPL");
2396
2397 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");