Merge branch 'develop' of /home/rockchip/kernel into develop
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / card / block.c
1 /*
2  * Block driver for media (i.e., flash cards)
3  *
4  * Copyright 2002 Hewlett-Packard Company
5  * Copyright 2005-2008 Pierre Ossman
6  *
7  * Use consistent with the GNU GPL is permitted,
8  * provided that this copyright notice is
9  * preserved in its entirety in all copies and derived works.
10  *
11  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13  * FITNESS FOR ANY PARTICULAR PURPOSE.
14  *
15  * Many thanks to Alessandro Rubini and Jonathan Corbet!
16  *
17  * Author:  Andrew Christian
18  *          28 May 2002
19  */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/errno.h>
27 #include <linux/hdreg.h>
28 #include <linux/kdev_t.h>
29 #include <linux/blkdev.h>
30 #include <linux/mutex.h>
31 #include <linux/scatterlist.h>
32 #include <linux/string_helpers.h>
33
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/mmc.h>
37 #include <linux/mmc/sd.h>
38
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
41
42 #include "queue.h"
43
44 MODULE_ALIAS("mmc:block");
45
46 #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD)
47 static DEFINE_MUTEX(block_mutex); //added by xbw at 2011-04-21
48 #endif
49
50 /*
51  * max 8 partitions per card
52  */
53 #define MMC_SHIFT       3
54 #define MMC_NUM_MINORS  (256 >> MMC_SHIFT)
55
56 static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS);
57
58 /*
59  * There is one mmc_blk_data per slot.
60  */
61 struct mmc_blk_data {
62         spinlock_t      lock;
63         struct gendisk  *disk;
64         struct mmc_queue queue;
65
66         unsigned int    usage;
67         unsigned int    read_only;
68 };
69
70 static DEFINE_MUTEX(open_lock);
71
72 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
73 {
74         struct mmc_blk_data *md;
75
76         mutex_lock(&open_lock);
77         md = disk->private_data;
78         if (md && md->usage == 0)
79                 md = NULL;
80         if (md)
81                 md->usage++;
82         mutex_unlock(&open_lock);
83
84         return md;
85 }
86
87 static void mmc_blk_put(struct mmc_blk_data *md)
88 {
89         mutex_lock(&open_lock);
90         md->usage--;
91         if (md->usage == 0) {
92                 int devmaj = MAJOR(disk_devt(md->disk));
93                 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
94
95                 if (!devmaj)
96                         devidx = md->disk->first_minor >> MMC_SHIFT;
97
98                 blk_cleanup_queue(md->queue.queue);
99
100                 __clear_bit(devidx, dev_use);
101
102                 put_disk(md->disk);
103                 kfree(md);
104         }
105         mutex_unlock(&open_lock);
106 }
107
108 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
109 {
110         struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
111         int ret = -ENXIO;
112         
113 #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD)
114         mutex_lock(&block_mutex); //added by xbw at 2011-04-21
115 #endif
116
117         if (md) {
118                 if (md->usage == 2)
119                         check_disk_change(bdev);
120                 ret = 0;
121
122                 if ((mode & FMODE_WRITE) && md->read_only) {
123                         mmc_blk_put(md);
124                         ret = -EROFS;
125                 }
126         }
127 #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD)
128         mutex_unlock(&block_mutex);
129 #endif  
130
131         return ret;
132 }
133
134 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
135 {
136         struct mmc_blk_data *md = disk->private_data;
137         
138 #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD)
139         mutex_lock(&block_mutex); //added by xbw at 2011-04-21
140 #endif
141
142         mmc_blk_put(md);
143
144 #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD)       
145         mutex_unlock(&block_mutex);
146 #endif
147
148         return 0;
149 }
150
151 static int
152 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
153 {
154         geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
155         geo->heads = 4;
156         geo->sectors = 16;
157         return 0;
158 }
159
160 static const struct block_device_operations mmc_bdops = {
161         .open                   = mmc_blk_open,
162         .release                = mmc_blk_release,
163         .getgeo                 = mmc_blk_getgeo,
164         .owner                  = THIS_MODULE,
165 };
166
167 struct mmc_blk_request {
168         struct mmc_request      mrq;
169         struct mmc_command      cmd;
170         struct mmc_command      stop;
171         struct mmc_data         data;
172 };
173
174 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
175 {
176         int err;
177         u32 result;
178         __be32 *blocks;
179
180         struct mmc_request mrq;
181         struct mmc_command cmd;
182         struct mmc_data data;
183         unsigned int timeout_us;
184
185         struct scatterlist sg;
186
187         memset(&cmd, 0, sizeof(struct mmc_command));
188
189         cmd.opcode = MMC_APP_CMD;
190         cmd.arg = card->rca << 16;
191         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
192
193         err = mmc_wait_for_cmd(card->host, &cmd, 0);
194         if (err)
195                 return (u32)-1;
196         if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
197                 return (u32)-1;
198
199         memset(&cmd, 0, sizeof(struct mmc_command));
200
201         cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
202         cmd.arg = 0;
203         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
204
205         memset(&data, 0, sizeof(struct mmc_data));
206
207         data.timeout_ns = card->csd.tacc_ns * 100;
208         data.timeout_clks = card->csd.tacc_clks * 100;
209
210         timeout_us = data.timeout_ns / 1000;
211         timeout_us += data.timeout_clks * 1000 /
212                 (card->host->ios.clock / 1000);
213
214         if (timeout_us > 100000) {
215                 data.timeout_ns = 100000000;
216                 data.timeout_clks = 0;
217         }
218
219         data.blksz = 4;
220         data.blocks = 1;
221         data.flags = MMC_DATA_READ;
222         data.sg = &sg;
223         data.sg_len = 1;
224
225         memset(&mrq, 0, sizeof(struct mmc_request));
226
227         mrq.cmd = &cmd;
228         mrq.data = &data;
229
230         blocks = kmalloc(4, GFP_KERNEL);
231         if (!blocks)
232                 return (u32)-1;
233
234         sg_init_one(&sg, blocks, 4);
235
236         mmc_wait_for_req(card->host, &mrq);
237
238         result = ntohl(*blocks);
239         kfree(blocks);
240
241         if (cmd.error || data.error)
242                 result = (u32)-1;
243
244         return result;
245 }
246
247 #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD) //Deleted by xbw@2011-03-21
248 //static u32 get_card_status(struct mmc_card *card, struct request *req)
249 //{
250 //   return 0;
251 //}
252
253 #else
254 static u32 get_card_status(struct mmc_card *card, struct request *req)
255 {
256         struct mmc_command cmd;
257         int err;
258
259         memset(&cmd, 0, sizeof(struct mmc_command));
260         cmd.opcode = MMC_SEND_STATUS;
261         if (!mmc_host_is_spi(card->host))
262                 cmd.arg = card->rca << 16;
263         cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
264         err = mmc_wait_for_cmd(card->host, &cmd, 0);
265         if (err)
266                 printk(KERN_DEBUG "%s: error %d sending status comand",
267                        req->rq_disk->disk_name, err);
268         return cmd.resp[0];
269 }
270 #endif
271
272 static int
273 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
274 {
275         struct mmc_command cmd;
276         int err;
277
278         /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
279         if (mmc_card_blockaddr(card))
280                 return 0;
281
282         mmc_claim_host(card->host);
283         cmd.opcode = MMC_SET_BLOCKLEN;
284         cmd.arg = 512;
285         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
286         err = mmc_wait_for_cmd(card->host, &cmd, 5);
287         mmc_release_host(card->host);
288
289         if (err) {
290                 printk(KERN_ERR "%s: unable to set block size to %d: %d\n",
291                         md->disk->disk_name, cmd.arg, err);
292                 return -EINVAL;
293         }
294
295         return 0;
296 }
297
298
299 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
300 {
301         struct mmc_blk_data *md = mq->data;
302         struct mmc_card *card = md->queue.card;
303         struct mmc_blk_request brq;
304         int ret = 1, disable_multi = 0;
305
306 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
307         if (mmc_bus_needs_resume(card->host)) {
308                 mmc_resume_bus(card->host);
309                 mmc_blk_set_blksize(md, card);
310         }
311 #endif
312
313         mmc_claim_host(card->host);
314
315         do {
316             #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD) 
317                 //struct mmc_command cmd;//Deleted by xbw@2011-03-21
318                 #else
319                 struct mmc_command cmd; 
320                 #endif
321                 
322                 u32 readcmd, writecmd, status = 0;
323
324                 memset(&brq, 0, sizeof(struct mmc_blk_request));
325                 brq.mrq.cmd = &brq.cmd;
326                 brq.mrq.data = &brq.data;
327                 
328                 #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD)
329                 brq.cmd.retries = 2; //suppot retry read-write; added by xbw@2011-03-21
330         #endif
331         
332                 brq.cmd.arg = blk_rq_pos(req);
333                 if (!mmc_card_blockaddr(card))
334                         brq.cmd.arg <<= 9;
335                 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
336                 brq.data.blksz = 512;
337                 brq.stop.opcode = MMC_STOP_TRANSMISSION;
338                 brq.stop.arg = 0;
339                 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
340                 brq.data.blocks = blk_rq_sectors(req);
341
342                 /*
343                  * The block layer doesn't support all sector count
344                  * restrictions, so we need to be prepared for too big
345                  * requests.
346                  */
347                 if (brq.data.blocks > card->host->max_blk_count)
348                         brq.data.blocks = card->host->max_blk_count;
349
350                 /*
351                  * After a read error, we redo the request one sector at a time
352                  * in order to accurately determine which sectors can be read
353                  * successfully.
354                  */
355                 if (disable_multi && brq.data.blocks > 1)
356                         brq.data.blocks = 1;
357
358                 if (brq.data.blocks > 1) {
359                         /* SPI multiblock writes terminate using a special
360                          * token, not a STOP_TRANSMISSION request.
361                          */
362                         if (!mmc_host_is_spi(card->host)
363                                         || rq_data_dir(req) == READ)
364                                 brq.mrq.stop = &brq.stop;
365                         readcmd = MMC_READ_MULTIPLE_BLOCK;
366                         writecmd = MMC_WRITE_MULTIPLE_BLOCK;
367                 } else {
368                         brq.mrq.stop = NULL;
369                         readcmd = MMC_READ_SINGLE_BLOCK;
370                         writecmd = MMC_WRITE_BLOCK;
371                 }
372
373                 if (rq_data_dir(req) == READ) {
374                         brq.cmd.opcode = readcmd;
375                         brq.data.flags |= MMC_DATA_READ;
376                 } else {
377                         brq.cmd.opcode = writecmd;
378                         brq.data.flags |= MMC_DATA_WRITE;
379                 }
380
381                 mmc_set_data_timeout(&brq.data, card);
382
383                 brq.data.sg = mq->sg;
384                 brq.data.sg_len = mmc_queue_map_sg(mq);
385
386                 /*
387                  * Adjust the sg list so it is the same size as the
388                  * request.
389                  */
390                 if (brq.data.blocks != blk_rq_sectors(req)) {
391                         int i, data_size = brq.data.blocks << 9;
392                         struct scatterlist *sg;
393
394                         for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
395                                 data_size -= sg->length;
396                                 if (data_size <= 0) {
397                                         sg->length += data_size;
398                                         i++;
399                                         break;
400                                 }
401                         }
402                         brq.data.sg_len = i;
403                 }
404
405                 mmc_queue_bounce_pre(mq);
406
407                 mmc_wait_for_req(card->host, &brq.mrq);
408
409                 mmc_queue_bounce_post(mq);
410
411         #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD) 
412         //not turn CMD18 to CMD17. deleted by xbw at 2011-04-21
413
414         #else
415
416                 /*
417                  * Check for errors here, but don't jump to cmd_err
418                  * until later as we need to wait for the card to leave
419                  * programming mode even when things go wrong.
420                  */
421                 if (brq.cmd.error || brq.data.error || brq.stop.error) {
422                         if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
423                                 /* Redo read one sector at a time */
424                                 printk(KERN_DEBUG "%s: retrying using single "
425                                        "block read\n", req->rq_disk->disk_name);
426                                 disable_multi = 1;
427                                 continue;
428                         }
429                         status = get_card_status(card, req);
430                 } else if (disable_multi == 1) {
431                         disable_multi = 0;
432                 }   
433         #endif
434
435                 if (brq.cmd.error) {
436                         printk(KERN_DEBUG "%s: error %d sending read/write "
437                                "command, response %#x, card status %#x\n",
438                                req->rq_disk->disk_name, brq.cmd.error,
439                                brq.cmd.resp[0], status);
440                 }
441
442                 if (brq.data.error) {
443                         if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
444                                 /* 'Stop' response contains card status */
445                                 status = brq.mrq.stop->resp[0];
446                         printk(KERN_DEBUG "%s: error %d transferring data,"
447                                " sector %u, nr %u, card status %#x\n",
448                                req->rq_disk->disk_name, brq.data.error,
449                                (unsigned)blk_rq_pos(req),
450                                (unsigned)blk_rq_sectors(req), status);
451                 }
452
453                 if (brq.stop.error) {
454                         printk(KERN_DEBUG "%s: error %d sending stop command, "
455                                "response %#x, card status %#x\n",
456                                req->rq_disk->disk_name, brq.stop.error,
457                                brq.stop.resp[0], status);
458                 }
459
460  #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD)  
461  //Deleted by xbw@2011-03-21
462
463  #else
464                 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
465                         do {
466                                 int err;
467
468                                 cmd.opcode = MMC_SEND_STATUS;
469                                 cmd.arg = card->rca << 16;
470                                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
471                                 err = mmc_wait_for_cmd(card->host, &cmd, 5);
472                                 if (err) {
473                                         printk(KERN_DEBUG "%s: error %d requesting status\n",
474                                                req->rq_disk->disk_name, err);
475                                         goto cmd_err;
476                                 }
477                                 /*
478                                  * Some cards mishandle the status bits,
479                                  * so make sure to check both the busy
480                                  * indication and the card state.
481                                  */
482                         } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
483                                 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
484
485 #if 0
486                         if (cmd.resp[0] & ~0x00000900)
487                                 printk(KERN_ERR "%s: status = %08x\n",
488                                        req->rq_disk->disk_name, cmd.resp[0]);
489                         if (mmc_decode_status(cmd.resp))
490                                 goto cmd_err;
491 #endif
492                 }
493 #endif          
494
495                 if (brq.cmd.error || brq.stop.error || brq.data.error) {                
496                         if (rq_data_dir(req) == READ) {
497                                 /*
498                                  * After an error, we redo I/O one sector at a
499                                  * time, so we only reach here after trying to
500                                  * read a single sector.
501                                  */
502                                 spin_lock_irq(&md->lock);
503                                 ret = __blk_end_request(req, -EIO, brq.data.blksz);
504                                 spin_unlock_irq(&md->lock);
505                                 continue;
506                         }
507                         goto cmd_err;
508                 }
509
510                 /*
511                  * A block was successfully transferred.
512                  */
513                 spin_lock_irq(&md->lock);
514                 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
515                 spin_unlock_irq(&md->lock);
516         } while (ret);
517
518         mmc_release_host(card->host);
519
520         return 1;
521
522  cmd_err:
523         /*
524          * If this is an SD card and we're writing, we can first
525          * mark the known good sectors as ok.
526          *
527          * If the card is not SD, we can still ok written sectors
528          * as reported by the controller (which might be less than
529          * the real number of written sectors, but never more).
530          */
531         if (mmc_card_sd(card)) {
532                 u32 blocks;
533
534                 blocks = mmc_sd_num_wr_blocks(card);
535                 if (blocks != (u32)-1) {
536                         spin_lock_irq(&md->lock);
537                         ret = __blk_end_request(req, 0, blocks << 9);
538                         spin_unlock_irq(&md->lock);
539                 }
540         } else {
541                 spin_lock_irq(&md->lock);
542                 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
543                 spin_unlock_irq(&md->lock);
544         }
545
546         mmc_release_host(card->host);
547
548         spin_lock_irq(&md->lock);
549         while (ret)
550                 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
551         spin_unlock_irq(&md->lock);
552
553         return 0;
554 }
555
556
557 static inline int mmc_blk_readonly(struct mmc_card *card)
558 {
559         return mmc_card_readonly(card) ||
560                !(card->csd.cmdclass & CCC_BLOCK_WRITE);
561 }
562
563 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
564 {
565         struct mmc_blk_data *md;
566         int devidx, ret;
567
568         devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
569         if (devidx >= MMC_NUM_MINORS)
570                 return ERR_PTR(-ENOSPC);
571         __set_bit(devidx, dev_use);
572
573         md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
574         if (!md) {
575                 ret = -ENOMEM;
576                 goto out;
577         }
578
579
580         /*
581          * Set the read-only status based on the supported commands
582          * and the write protect switch.
583          */
584         md->read_only = mmc_blk_readonly(card);
585
586         md->disk = alloc_disk(1 << MMC_SHIFT);
587         if (md->disk == NULL) {
588                 ret = -ENOMEM;
589                 goto err_kfree;
590         }
591
592         spin_lock_init(&md->lock);
593         md->usage = 1;
594
595         ret = mmc_init_queue(&md->queue, card, &md->lock);
596         if (ret)
597                 goto err_putdisk;
598
599         md->queue.issue_fn = mmc_blk_issue_rq;
600         md->queue.data = md;
601
602         md->disk->major = MMC_BLOCK_MAJOR;
603         md->disk->first_minor = devidx << MMC_SHIFT;
604         md->disk->fops = &mmc_bdops;
605         md->disk->private_data = md;
606         md->disk->queue = md->queue.queue;
607         md->disk->driverfs_dev = &card->dev;
608
609         /*
610          * As discussed on lkml, GENHD_FL_REMOVABLE should:
611          *
612          * - be set for removable media with permanent block devices
613          * - be unset for removable block devices with permanent media
614          *
615          * Since MMC block devices clearly fall under the second
616          * case, we do not set GENHD_FL_REMOVABLE.  Userspace
617          * should use the block device creation/destruction hotplug
618          * messages to tell when the card is present.
619          */
620
621         sprintf(md->disk->disk_name, "mmcblk%d", devidx);
622
623 #if defined(CONFIG_SDMMC_RK29) && !defined(CONFIG_SDMMC_RK29_OLD)  
624         printk("%s..%d **** devidx=%d, dev_use[0]=%lu, disk_name=%s *** ==xbw[%s]==\n",\
625             __FUNCTION__,__LINE__, devidx, dev_use[0], md->disk->disk_name,mmc_hostname(card->host));
626 #endif
627     
628         blk_queue_logical_block_size(md->queue.queue, 512);
629
630         if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
631                 /*
632                  * The EXT_CSD sector count is in number or 512 byte
633                  * sectors.
634                  */
635                 set_capacity(md->disk, card->ext_csd.sectors);
636         } else {
637                 /*
638                  * The CSD capacity field is in units of read_blkbits.
639                  * set_capacity takes units of 512 bytes.
640                  */
641                 set_capacity(md->disk,
642                         card->csd.capacity << (card->csd.read_blkbits - 9));
643         }
644         return md;
645
646  err_putdisk:
647         put_disk(md->disk);
648  err_kfree:
649         kfree(md);
650  out:
651         return ERR_PTR(ret);
652 }
653
654 static int mmc_blk_probe(struct mmc_card *card)
655 {
656         struct mmc_blk_data *md;
657         int err;
658
659         char cap_str[10];
660
661         /*
662          * Check that the card supports the command class(es) we need.
663          */
664         if (!(card->csd.cmdclass & CCC_BLOCK_READ))
665                 return -ENODEV;
666
667         md = mmc_blk_alloc(card);
668         if (IS_ERR(md))
669                 return PTR_ERR(md);
670
671         err = mmc_blk_set_blksize(md, card);
672         if (err)
673                 goto out;
674
675         string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
676                         cap_str, sizeof(cap_str));
677         printk(KERN_INFO "%s: %s %s %s %s\n",
678                 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
679                 cap_str, md->read_only ? "(ro)" : "");
680
681         mmc_set_drvdata(card, md);
682 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
683         mmc_set_bus_resume_policy(card->host, 1);
684 #endif
685         add_disk(md->disk);
686         return 0;
687
688  out:
689         mmc_cleanup_queue(&md->queue);
690         mmc_blk_put(md);
691
692         return err;
693 }
694
695 static void mmc_blk_remove(struct mmc_card *card)
696 {
697         struct mmc_blk_data *md = mmc_get_drvdata(card);
698
699         if (md) {
700                 /* Stop new requests from getting into the queue */
701                 del_gendisk(md->disk);
702
703                 /* Then flush out any already in there */
704                 mmc_cleanup_queue(&md->queue);
705
706                 mmc_blk_put(md);
707         }
708         mmc_set_drvdata(card, NULL);
709 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
710         mmc_set_bus_resume_policy(card->host, 0);
711 #endif
712 }
713
714 #ifdef CONFIG_PM
715 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
716 {
717         struct mmc_blk_data *md = mmc_get_drvdata(card);
718
719         if (md) {
720                 mmc_queue_suspend(&md->queue);
721         }
722         return 0;
723 }
724
725 static int mmc_blk_resume(struct mmc_card *card)
726 {
727         struct mmc_blk_data *md = mmc_get_drvdata(card);
728
729         if (md) {
730 #ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
731                 mmc_blk_set_blksize(md, card);
732 #endif
733                 mmc_queue_resume(&md->queue);
734         }
735         return 0;
736 }
737 #else
738 #define mmc_blk_suspend NULL
739 #define mmc_blk_resume  NULL
740 #endif
741
742 static struct mmc_driver mmc_driver = {
743         .drv            = {
744                 .name   = "mmcblk",
745         },
746         .probe          = mmc_blk_probe,
747         .remove         = mmc_blk_remove,
748         .suspend        = mmc_blk_suspend,
749         .resume         = mmc_blk_resume,
750 };
751
752 static int __init mmc_blk_init(void)
753 {
754         int res;
755
756         res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
757         if (res)
758                 goto out;
759
760         res = mmc_register_driver(&mmc_driver);
761         if (res)
762                 goto out2;
763
764         return 0;
765  out2:
766         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
767  out:
768         return res;
769 }
770
771 static void __exit mmc_blk_exit(void)
772 {
773         mmc_unregister_driver(&mmc_driver);
774         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
775 }
776
777 module_init(mmc_blk_init);
778 module_exit(mmc_blk_exit);
779
780 MODULE_LICENSE("GPL");
781 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
782