2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 #include <linux/kernel.h>
24 #include <linux/init.h>
25 #include <linux/ioport.h>
27 #include <linux/of_device.h>
28 #include <linux/of_gpio.h>
29 #include <linux/platform_device.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/dmaengine.h>
34 #include <linux/highmem.h>
35 #include <linux/clk.h>
36 #include <linux/err.h>
37 #include <linux/completion.h>
38 #include <linux/mmc/host.h>
39 #include <linux/mmc/mmc.h>
40 #include <linux/mmc/sdio.h>
41 #include <linux/gpio.h>
42 #include <linux/regulator/consumer.h>
43 #include <linux/module.h>
44 #include <linux/fsl/mxs-dma.h>
45 #include <linux/pinctrl/consumer.h>
46 #include <linux/stmp_device.h>
47 #include <linux/mmc/mxs-mmc.h>
48 #include <linux/spi/mxs-spi.h>
50 #define DRIVER_NAME "mxs-mmc"
52 #define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
53 BM_SSP_CTRL1_RESP_ERR_IRQ | \
54 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
55 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
56 BM_SSP_CTRL1_DATA_CRC_IRQ | \
57 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
58 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
59 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
61 /* card detect polling timeout */
62 #define MXS_MMC_DETECT_TIMEOUT (HZ/2)
68 struct mmc_request *mrq;
69 struct mmc_command *cmd;
70 struct mmc_data *data;
73 struct dma_chan *dmach;
74 struct mxs_dma_data dma_data;
76 enum dma_transfer_direction slave_dirn;
77 u32 ssp_pio_words[SSP_PIO_NUM];
79 unsigned char bus_width;
86 static int mxs_mmc_get_ro(struct mmc_host *mmc)
88 struct mxs_mmc_host *host = mmc_priv(mmc);
91 if (!gpio_is_valid(host->wp_gpio))
94 ret = gpio_get_value(host->wp_gpio);
96 if (host->wp_inverted)
102 static int mxs_mmc_get_cd(struct mmc_host *mmc)
104 struct mxs_mmc_host *host = mmc_priv(mmc);
105 struct mxs_ssp *ssp = &host->ssp;
107 return !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
108 BM_SSP_STATUS_CARD_DETECT);
111 static void mxs_mmc_reset(struct mxs_mmc_host *host)
113 struct mxs_ssp *ssp = &host->ssp;
116 stmp_reset_block(ssp->base);
118 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
119 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
120 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
121 BM_SSP_CTRL1_DMA_ENABLE |
122 BM_SSP_CTRL1_POLARITY |
123 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
124 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
125 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
126 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
127 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
129 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
130 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
131 BF_SSP(0, TIMING_CLOCK_RATE),
132 ssp->base + HW_SSP_TIMING(ssp));
134 if (host->sdio_irq_en) {
135 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
136 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
139 writel(ctrl0, ssp->base + HW_SSP_CTRL0);
140 writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
143 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
144 struct mmc_command *cmd);
146 static void mxs_mmc_request_done(struct mxs_mmc_host *host)
148 struct mmc_command *cmd = host->cmd;
149 struct mmc_data *data = host->data;
150 struct mmc_request *mrq = host->mrq;
151 struct mxs_ssp *ssp = &host->ssp;
153 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
154 if (mmc_resp_type(cmd) & MMC_RSP_136) {
155 cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
156 cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
157 cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
158 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
160 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
165 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
166 data->sg_len, host->dma_dir);
168 * If there was an error on any block, we mark all
169 * data blocks as being in error.
172 data->bytes_xfered = data->blocks * data->blksz;
174 data->bytes_xfered = 0;
178 mxs_mmc_start_cmd(host, mrq->stop);
184 mmc_request_done(host->mmc, mrq);
187 static void mxs_mmc_dma_irq_callback(void *param)
189 struct mxs_mmc_host *host = param;
191 mxs_mmc_request_done(host);
194 static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
196 struct mxs_mmc_host *host = dev_id;
197 struct mmc_command *cmd = host->cmd;
198 struct mmc_data *data = host->data;
199 struct mxs_ssp *ssp = &host->ssp;
202 spin_lock(&host->lock);
204 stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
205 writel(stat & MXS_MMC_IRQ_BITS,
206 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
208 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
209 mmc_signal_sdio_irq(host->mmc);
211 spin_unlock(&host->lock);
213 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
214 cmd->error = -ETIMEDOUT;
215 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
219 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
220 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
221 data->error = -ETIMEDOUT;
222 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
223 data->error = -EILSEQ;
224 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
225 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
232 static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
233 struct mxs_mmc_host *host, unsigned long flags)
235 struct dma_async_tx_descriptor *desc;
236 struct mmc_data *data = host->data;
237 struct scatterlist * sgl;
242 dma_map_sg(mmc_dev(host->mmc), data->sg,
243 data->sg_len, host->dma_dir);
245 sg_len = data->sg_len;
248 sgl = (struct scatterlist *) host->ssp_pio_words;
249 sg_len = SSP_PIO_NUM;
252 desc = dmaengine_prep_slave_sg(host->dmach,
253 sgl, sg_len, host->slave_dirn, flags);
255 desc->callback = mxs_mmc_dma_irq_callback;
256 desc->callback_param = host;
259 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
260 data->sg_len, host->dma_dir);
266 static void mxs_mmc_bc(struct mxs_mmc_host *host)
268 struct mmc_command *cmd = host->cmd;
269 struct dma_async_tx_descriptor *desc;
270 u32 ctrl0, cmd0, cmd1;
272 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
273 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
276 if (host->sdio_irq_en) {
277 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
278 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
281 host->ssp_pio_words[0] = ctrl0;
282 host->ssp_pio_words[1] = cmd0;
283 host->ssp_pio_words[2] = cmd1;
284 host->dma_dir = DMA_NONE;
285 host->slave_dirn = DMA_TRANS_NONE;
286 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
290 dmaengine_submit(desc);
291 dma_async_issue_pending(host->dmach);
295 dev_warn(mmc_dev(host->mmc),
296 "%s: failed to prep dma\n", __func__);
299 static void mxs_mmc_ac(struct mxs_mmc_host *host)
301 struct mmc_command *cmd = host->cmd;
302 struct dma_async_tx_descriptor *desc;
303 u32 ignore_crc, get_resp, long_resp;
304 u32 ctrl0, cmd0, cmd1;
306 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
307 0 : BM_SSP_CTRL0_IGNORE_CRC;
308 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
309 BM_SSP_CTRL0_GET_RESP : 0;
310 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
311 BM_SSP_CTRL0_LONG_RESP : 0;
313 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
314 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
317 if (host->sdio_irq_en) {
318 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
319 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
322 host->ssp_pio_words[0] = ctrl0;
323 host->ssp_pio_words[1] = cmd0;
324 host->ssp_pio_words[2] = cmd1;
325 host->dma_dir = DMA_NONE;
326 host->slave_dirn = DMA_TRANS_NONE;
327 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
331 dmaengine_submit(desc);
332 dma_async_issue_pending(host->dmach);
336 dev_warn(mmc_dev(host->mmc),
337 "%s: failed to prep dma\n", __func__);
340 static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
342 const unsigned int ssp_timeout_mul = 4096;
344 * Calculate ticks in ms since ns are large numbers
347 const unsigned int clock_per_ms = clock_rate / 1000;
348 const unsigned int ms = ns / 1000;
349 const unsigned int ticks = ms * clock_per_ms;
350 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
352 WARN_ON(ssp_ticks == 0);
356 static void mxs_mmc_adtc(struct mxs_mmc_host *host)
358 struct mmc_command *cmd = host->cmd;
359 struct mmc_data *data = cmd->data;
360 struct dma_async_tx_descriptor *desc;
361 struct scatterlist *sgl = data->sg, *sg;
362 unsigned int sg_len = data->sg_len;
365 unsigned short dma_data_dir, timeout;
366 enum dma_transfer_direction slave_dirn;
367 unsigned int data_size = 0, log2_blksz;
368 unsigned int blocks = data->blocks;
370 struct mxs_ssp *ssp = &host->ssp;
372 u32 ignore_crc, get_resp, long_resp, read;
373 u32 ctrl0, cmd0, cmd1, val;
375 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
376 0 : BM_SSP_CTRL0_IGNORE_CRC;
377 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
378 BM_SSP_CTRL0_GET_RESP : 0;
379 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
380 BM_SSP_CTRL0_LONG_RESP : 0;
382 if (data->flags & MMC_DATA_WRITE) {
383 dma_data_dir = DMA_TO_DEVICE;
384 slave_dirn = DMA_MEM_TO_DEV;
387 dma_data_dir = DMA_FROM_DEVICE;
388 slave_dirn = DMA_DEV_TO_MEM;
389 read = BM_SSP_CTRL0_READ;
392 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
393 ignore_crc | get_resp | long_resp |
394 BM_SSP_CTRL0_DATA_XFER | read |
395 BM_SSP_CTRL0_WAIT_FOR_IRQ |
398 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
400 /* get logarithm to base 2 of block size for setting register */
401 log2_blksz = ilog2(data->blksz);
404 * take special care of the case that data size from data->sg
405 * is not equal to blocks x blksz
407 for_each_sg(sgl, sg, sg_len, i)
408 data_size += sg->length;
410 if (data_size != data->blocks * data->blksz)
413 /* xfer count, block size and count need to be set differently */
414 if (ssp_is_old(ssp)) {
415 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
416 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
417 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
419 writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
420 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
421 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
422 ssp->base + HW_SSP_BLOCK_SIZE);
425 if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
426 (cmd->opcode == SD_IO_RW_EXTENDED))
427 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
431 if (host->sdio_irq_en) {
432 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
433 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
436 /* set the timeout count */
437 timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
438 val = readl(ssp->base + HW_SSP_TIMING(ssp));
439 val &= ~(BM_SSP_TIMING_TIMEOUT);
440 val |= BF_SSP(timeout, TIMING_TIMEOUT);
441 writel(val, ssp->base + HW_SSP_TIMING(ssp));
444 host->ssp_pio_words[0] = ctrl0;
445 host->ssp_pio_words[1] = cmd0;
446 host->ssp_pio_words[2] = cmd1;
447 host->dma_dir = DMA_NONE;
448 host->slave_dirn = DMA_TRANS_NONE;
449 desc = mxs_mmc_prep_dma(host, 0);
454 WARN_ON(host->data != NULL);
456 host->dma_dir = dma_data_dir;
457 host->slave_dirn = slave_dirn;
458 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
462 dmaengine_submit(desc);
463 dma_async_issue_pending(host->dmach);
466 dev_warn(mmc_dev(host->mmc),
467 "%s: failed to prep dma\n", __func__);
470 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
471 struct mmc_command *cmd)
475 switch (mmc_cmd_type(cmd)) {
489 dev_warn(mmc_dev(host->mmc),
490 "%s: unknown MMC command\n", __func__);
495 static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
497 struct mxs_mmc_host *host = mmc_priv(mmc);
499 WARN_ON(host->mrq != NULL);
501 mxs_mmc_start_cmd(host, mrq->cmd);
504 static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
506 struct mxs_ssp *ssp = &host->ssp;
507 unsigned int ssp_clk, ssp_sck;
508 u32 clock_divide, clock_rate;
511 ssp_clk = clk_get_rate(ssp->clk);
513 for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
514 clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
515 clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
516 if (clock_rate <= 255)
520 if (clock_divide > 254) {
521 dev_err(mmc_dev(host->mmc),
522 "%s: cannot set clock to %d\n", __func__, rate);
526 ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
528 val = readl(ssp->base + HW_SSP_TIMING(ssp));
529 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
530 val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
531 val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
532 writel(val, ssp->base + HW_SSP_TIMING(ssp));
534 ssp->clk_rate = ssp_sck;
536 dev_dbg(mmc_dev(host->mmc),
537 "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
538 __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
541 static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
543 struct mxs_mmc_host *host = mmc_priv(mmc);
545 if (ios->bus_width == MMC_BUS_WIDTH_8)
547 else if (ios->bus_width == MMC_BUS_WIDTH_4)
553 mxs_mmc_set_clk_rate(host, ios->clock);
556 static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
558 struct mxs_mmc_host *host = mmc_priv(mmc);
559 struct mxs_ssp *ssp = &host->ssp;
562 spin_lock_irqsave(&host->lock, flags);
564 host->sdio_irq_en = enable;
567 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
568 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
569 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
570 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
572 if (readl(ssp->base + HW_SSP_STATUS(ssp)) &
573 BM_SSP_STATUS_SDIO_IRQ)
574 mmc_signal_sdio_irq(host->mmc);
577 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
578 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
579 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
580 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
583 spin_unlock_irqrestore(&host->lock, flags);
586 static const struct mmc_host_ops mxs_mmc_ops = {
587 .request = mxs_mmc_request,
588 .get_ro = mxs_mmc_get_ro,
589 .get_cd = mxs_mmc_get_cd,
590 .set_ios = mxs_mmc_set_ios,
591 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
594 static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
596 struct mxs_mmc_host *host = param;
598 if (!mxs_dma_is_apbh(chan))
601 if (chan->chan_id != host->dma_channel)
604 chan->private = &host->dma_data;
609 static struct platform_device_id mxs_ssp_ids[] = {
612 .driver_data = IMX23_SSP,
615 .driver_data = IMX28_SSP,
620 MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
622 static const struct of_device_id mxs_mmc_dt_ids[] = {
623 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
624 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
627 MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
629 static int mxs_mmc_probe(struct platform_device *pdev)
631 const struct of_device_id *of_id =
632 of_match_device(mxs_mmc_dt_ids, &pdev->dev);
633 struct device_node *np = pdev->dev.of_node;
634 struct mxs_mmc_host *host;
635 struct mmc_host *mmc;
636 struct resource *iores, *dmares;
637 struct mxs_mmc_platform_data *pdata;
638 struct pinctrl *pinctrl;
639 int ret = 0, irq_err, irq_dma;
641 struct regulator *reg_vmmc;
642 enum of_gpio_flags flags;
645 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
646 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
647 irq_err = platform_get_irq(pdev, 0);
648 irq_dma = platform_get_irq(pdev, 1);
649 if (!iores || irq_err < 0 || irq_dma < 0)
652 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
656 host = mmc_priv(mmc);
658 ssp->dev = &pdev->dev;
659 ssp->base = devm_request_and_ioremap(&pdev->dev, iores);
661 ret = -EADDRNOTAVAIL;
666 ssp->devid = (enum mxs_ssp_id) of_id->data;
668 * TODO: This is a temporary solution and should be changed
669 * to use generic DMA binding later when the helpers get in.
671 ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
674 dev_err(mmc_dev(host->mmc),
675 "failed to get dma channel\n");
679 ssp->devid = pdev->id_entry->driver_data;
680 host->dma_channel = dmares->start;
684 host->sdio_irq_en = 0;
686 reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
687 if (!IS_ERR(reg_vmmc)) {
688 ret = regulator_enable(reg_vmmc);
691 "Failed to enable vmmc regulator: %d\n", ret);
696 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
697 if (IS_ERR(pinctrl)) {
698 ret = PTR_ERR(pinctrl);
702 ssp->clk = clk_get(&pdev->dev, NULL);
703 if (IS_ERR(ssp->clk)) {
704 ret = PTR_ERR(ssp->clk);
707 clk_prepare_enable(ssp->clk);
712 dma_cap_set(DMA_SLAVE, mask);
713 host->dma_data.chan_irq = irq_dma;
714 host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
716 dev_err(mmc_dev(host->mmc),
717 "%s: failed to request dma\n", __func__);
721 /* set mmc core parameters */
722 mmc->ops = &mxs_mmc_ops;
723 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
724 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
726 pdata = mmc_dev(host->mmc)->platform_data;
729 of_property_read_u32(np, "bus-width", &bus_width);
731 mmc->caps |= MMC_CAP_4_BIT_DATA;
732 else if (bus_width == 8)
733 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
734 host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0,
736 if (flags & OF_GPIO_ACTIVE_LOW)
737 host->wp_inverted = 1;
739 if (pdata->flags & SLOTF_8_BIT_CAPABLE)
740 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
741 if (pdata->flags & SLOTF_4_BIT_CAPABLE)
742 mmc->caps |= MMC_CAP_4_BIT_DATA;
743 host->wp_gpio = pdata->wp_gpio;
747 mmc->f_max = 288000000;
748 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
751 mmc->max_blk_size = 1 << 0xf;
752 mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
753 mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
754 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
756 platform_set_drvdata(pdev, mmc);
758 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
763 spin_lock_init(&host->lock);
765 ret = mmc_add_host(mmc);
769 dev_info(mmc_dev(host->mmc), "initialized\n");
775 dma_release_channel(host->dmach);
777 clk_disable_unprepare(ssp->clk);
784 static int mxs_mmc_remove(struct platform_device *pdev)
786 struct mmc_host *mmc = platform_get_drvdata(pdev);
787 struct mxs_mmc_host *host = mmc_priv(mmc);
788 struct mxs_ssp *ssp = &host->ssp;
790 mmc_remove_host(mmc);
792 platform_set_drvdata(pdev, NULL);
795 dma_release_channel(host->dmach);
797 clk_disable_unprepare(ssp->clk);
806 static int mxs_mmc_suspend(struct device *dev)
808 struct mmc_host *mmc = dev_get_drvdata(dev);
809 struct mxs_mmc_host *host = mmc_priv(mmc);
810 struct mxs_ssp *ssp = &host->ssp;
813 ret = mmc_suspend_host(mmc);
815 clk_disable_unprepare(ssp->clk);
820 static int mxs_mmc_resume(struct device *dev)
822 struct mmc_host *mmc = dev_get_drvdata(dev);
823 struct mxs_mmc_host *host = mmc_priv(mmc);
824 struct mxs_ssp *ssp = &host->ssp;
827 clk_prepare_enable(ssp->clk);
829 ret = mmc_resume_host(mmc);
834 static const struct dev_pm_ops mxs_mmc_pm_ops = {
835 .suspend = mxs_mmc_suspend,
836 .resume = mxs_mmc_resume,
840 static struct platform_driver mxs_mmc_driver = {
841 .probe = mxs_mmc_probe,
842 .remove = mxs_mmc_remove,
843 .id_table = mxs_ssp_ids,
846 .owner = THIS_MODULE,
848 .pm = &mxs_mmc_pm_ops,
850 .of_match_table = mxs_mmc_dt_ids,
854 module_platform_driver(mxs_mmc_driver);
856 MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
857 MODULE_AUTHOR("Freescale Semiconductor");
858 MODULE_LICENSE("GPL");