MTD:MTD MERGE READ AND WRITE FOR ONE REQ(MAX SIZE 1024KB). ZYF
[firefly-linux-kernel-4.4.55.git] / drivers / mtd / mtd_blkdevs.c
1 /*
2  * Interface to Linux block layer for MTD 'translation layers'.
3  *
4  * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/fs.h>
27 #include <linux/mtd/blktrans.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/blkdev.h>
30 #include <linux/blkpg.h>
31 #include <linux/spinlock.h>
32 #include <linux/hdreg.h>
33 #include <linux/init.h>
34 #include <linux/mutex.h>
35 #include <linux/kthread.h>
36 #include <asm/uaccess.h>
37
38 #include "mtdcore.h"
39
40 static LIST_HEAD(blktrans_majors);
41 static DEFINE_MUTEX(blktrans_ref_mutex);
42
43 #define MTD_MERGE                       1
44 static void blktrans_dev_release(struct kref *kref)
45 {
46         struct mtd_blktrans_dev *dev =
47                 container_of(kref, struct mtd_blktrans_dev, ref);
48
49         dev->disk->private_data = NULL;
50         blk_cleanup_queue(dev->rq);
51         put_disk(dev->disk);
52         list_del(&dev->list);
53         kfree(dev);
54 }
55
56 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
57 {
58         struct mtd_blktrans_dev *dev;
59
60         mutex_lock(&blktrans_ref_mutex);
61         dev = disk->private_data;
62
63         if (!dev)
64                 goto unlock;
65         kref_get(&dev->ref);
66 unlock:
67         mutex_unlock(&blktrans_ref_mutex);
68         return dev;
69 }
70
71 static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
72 {
73         mutex_lock(&blktrans_ref_mutex);
74         kref_put(&dev->ref, blktrans_dev_release);
75         mutex_unlock(&blktrans_ref_mutex);
76 }
77
78 #if(MTD_MERGE == 0)
79 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
80                                struct mtd_blktrans_dev *dev,
81                                struct request *req)
82 {
83         unsigned long block, nsect;
84         char *buf;
85 #if 0
86         block = blk_rq_pos(req) << 9 >> tr->blkshift;
87         nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
88 #else  //modify by zyf for cap>=4GB 20110120
89         block = blk_rq_pos(req);
90         nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
91     if(tr->blkshift != 9)
92     {
93         if(tr->blkshift > 9)
94         {
95             block = blk_rq_pos(req) >> (tr->blkshift - 9);
96         }
97         else
98         {
99                 block = blk_rq_pos(req) << (9 - tr->blkshift);
100         }
101     }
102 #endif
103
104         buf = req->buffer;
105
106         if (req->cmd_type != REQ_TYPE_FS)
107                 return -EIO;
108
109         if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
110             get_capacity(req->rq_disk))
111                 return -EIO;
112
113         if (req->cmd_flags & REQ_DISCARD)
114                 return tr->discard(dev, block, nsect);
115
116         switch(rq_data_dir(req)) {
117         case READ:
118                 //for (; nsect > 0; nsect--, block++, buf += tr->blksize)
119                         if (tr->readsect(dev, block,nsect, buf))
120                                 return -EIO;
121                 rq_flush_dcache_pages(req);
122                 return 0;
123         case WRITE:
124                 if (!tr->writesect)
125                         return -EIO;
126
127                 rq_flush_dcache_pages(req);
128                 //for (; nsect > 0; nsect--, block++, buf += tr->blksize)
129                         if (tr->writesect(dev, block,nsect, buf))
130                                 return -EIO;
131                 return 0;
132         default:
133                 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
134                 return -EIO;
135         }
136 }
137
138 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
139 {
140         if (kthread_should_stop())
141                 return 1;
142
143         return dev->bg_stop;
144 }
145 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
146
147 static int mtd_blktrans_thread(void *arg)
148 {
149         struct mtd_blktrans_dev *dev = arg;
150         struct mtd_blktrans_ops *tr = dev->tr;
151         struct request_queue *rq = dev->rq;
152         struct request *req = NULL;
153         int background_done = 0;
154         
155         spin_lock_irq(rq->queue_lock);
156
157         while (!kthread_should_stop()) {
158                 int res;
159
160                 dev->bg_stop = false;
161                 if (!req && !(req = blk_fetch_request(rq))) {
162                         if (tr->background && !background_done) {
163                                 spin_unlock_irq(rq->queue_lock);
164                                 mutex_lock(&dev->lock);
165                                 tr->background(dev);
166                                 mutex_unlock(&dev->lock);
167                                 spin_lock_irq(rq->queue_lock);
168                                 /*
169                                  * Do background processing just once per idle
170                                  * period.
171                                  */
172                                 background_done = !dev->bg_stop;
173                                 continue;
174                         }
175                         set_current_state(TASK_INTERRUPTIBLE);
176
177                         if (kthread_should_stop())
178                                 set_current_state(TASK_RUNNING);
179
180                         spin_unlock_irq(rq->queue_lock);
181                         schedule();
182                         spin_lock_irq(rq->queue_lock);
183                         continue;
184                 }
185
186                 spin_unlock_irq(rq->queue_lock);
187
188                 mutex_lock(&dev->lock);
189                 res = do_blktrans_request(dev->tr, dev, req);
190                 mutex_unlock(&dev->lock);
191
192                 spin_lock_irq(rq->queue_lock);
193
194                 if (!__blk_end_request_cur(req, res))
195                         req = NULL;
196
197                 background_done = 0;
198         }
199
200         if (req)
201                 __blk_end_request_all(req, -EIO);
202
203         spin_unlock_irq(rq->queue_lock);
204
205         return 0;
206 }
207 #else
208
209 #define MTD_RW_SECTORS  (2048)     // 2048 (BLK_SAFE_MAX_SECTORS+1)
210 static char * mtd_rw_buffer;       //[MTD_RW_SECTORS*512]   __attribute__((aligned(4096)));
211 struct mutex  mtd_rw_buffer_lock;
212 static int req_check_buffer_align(struct request *req,char **pbuf)
213 {
214     int    nr_vec = 0;
215     struct bio_vec *bv;
216     struct req_iterator iter;
217     char *buffer;
218     void *firstbuf = 0;
219     char *nextbuffer = 0;
220         unsigned long block, nsect;
221         block = blk_rq_pos(req);
222         nsect = blk_rq_cur_bytes(req) >> 9;
223         rq_for_each_segment(bv, req, iter)
224         {
225             buffer = page_address(bv->bv_page) + bv->bv_offset;
226             if( firstbuf == 0 )
227             {
228             firstbuf = buffer;
229             }
230         nr_vec++;
231         if(nextbuffer!=0)
232         {
233             if(nextbuffer!=buffer)
234             {
235                 return 0;
236             }    
237         }
238         nextbuffer = buffer+bv->bv_len;
239         }
240         *pbuf = firstbuf;
241         return 1;
242 }
243
244
245 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
246 {
247         if (kthread_should_stop())
248                 return 1;
249
250         return dev->bg_stop;
251 }
252 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
253
254 static int mtd_blktrans_thread(void *arg)
255 {
256         struct mtd_blktrans_dev *dev = arg;
257         struct mtd_blktrans_ops *tr = dev->tr;
258         struct request_queue *rq = dev->rq;
259         struct request *req = NULL;
260         int background_done = 0;
261         
262         unsigned long block, data_len;
263         char *buf = NULL;
264         struct req_iterator rq_iter;
265         struct bio_vec *bvec;
266         int cmd_flag;
267         
268     set_user_nice(current,-20);
269         spin_lock_irq(rq->queue_lock);
270
271         while (!kthread_should_stop()) {
272                 int res;
273
274                 dev->bg_stop = false;
275                 if (!req && !(req = blk_fetch_request(rq))) {
276                         if (tr->background && !background_done) {
277                                 spin_unlock_irq(rq->queue_lock);
278                                 mutex_lock(&dev->lock);
279                                 tr->background(dev);
280                                 mutex_unlock(&dev->lock);
281                                 spin_lock_irq(rq->queue_lock);
282                                 /*
283                                  * Do background processing just once per idle
284                                  * period.
285                                  */
286                                 background_done = !dev->bg_stop;
287                                 continue;
288                         }
289                         set_current_state(TASK_INTERRUPTIBLE);
290
291                         if (kthread_should_stop())
292                                 set_current_state(TASK_RUNNING);
293
294                         spin_unlock_irq(rq->queue_lock);
295                         schedule();
296                         spin_lock_irq(rq->queue_lock);
297                         continue;
298                 }
299         if ((req->cmd_type != REQ_TYPE_FS) || (blk_rq_pos(req) + blk_rq_sectors(req) > get_capacity(req->rq_disk)))
300         {
301             __blk_end_request_all(req, -EIO);
302                 req = NULL;
303                     background_done = 0;
304                 continue;
305         }
306                 spin_unlock_irq(rq->queue_lock);
307         mutex_lock(&dev->lock);
308         
309             block = blk_rq_pos(req);
310             data_len = 0;
311                 buf = 0;
312                 res = 0;
313             cmd_flag = rq_data_dir(req);
314             //i = 0;
315         if(cmd_flag == READ && mtd_rw_buffer)
316         {
317                 unsigned long nsect;
318             buf = mtd_rw_buffer;
319             req_check_buffer_align(req,&buf);
320             nsect = req->__data_len >> 9;
321             if( nsect > MTD_RW_SECTORS ) {
322                 printk("%s..%d::nsect=%d,too large , may be error!\n",__FILE__,__LINE__, nsect );
323                 nsect = MTD_RW_SECTORS;
324                 while(1);
325             }
326                         if(buf == mtd_rw_buffer )
327                 mutex_lock(&mtd_rw_buffer_lock);
328                 if (tr->readsect(dev, block,nsect, buf))
329                     res = -EIO;
330             if( buf == mtd_rw_buffer ) 
331             {
332                 char *p = buf;
333                     rq_for_each_segment(bvec, req, rq_iter) 
334                 { 
335                     memcpy( page_address(bvec->bv_page) + bvec->bv_offset , p , bvec->bv_len );
336                     flush_dcache_page(bvec->bv_page); //zyf rq_flush_dcache_pages(req);
337                     p += bvec->bv_len;
338                 }
339                 mutex_unlock(&mtd_rw_buffer_lock);
340             }
341             //rq_flush_dcache_pages(req);
342         }
343         else
344         {    
345                 rq_for_each_segment(bvec, req, rq_iter) 
346                 {
347                 //printk("%d buf = %x, lba = %llx , nsec=%x ,offset = %x\n",i,page_address(bvec->bv_page) + bvec->bv_offset,((rq_iter.bio)->bi_sector),(bvec->bv_len),(bvec->bv_offset));
348                 //i++;
349                 flush_dcache_page(bvec->bv_page); //zyf rq_flush_dcache_pages(req);
350                 if((page_address(bvec->bv_page) + bvec->bv_offset) == (buf + data_len))
351                 {
352                     data_len += bvec->bv_len;
353                 }
354                 else
355                 {
356                     if(data_len)
357                     {
358                         //printk("buf = %x, lba = %lx , nsec=%x \n",buf,block,data_len);
359                         switch(cmd_flag)
360                         {
361                         case READ:
362                                         if (tr->readsect(dev, block,data_len>>9, buf))
363                                                 res = -EIO;
364                                 //rq_flush_dcache_pages(req);
365                                 break;
366                         case WRITE:
367                                 //if (!tr->writesect)
368                                 //      res = -EIO;
369                                 //rq_flush_dcache_pages(req);
370                                         if (tr->writesect(dev, block,data_len>>9, buf))
371                                                 res = -EIO;
372                                 break;
373                         default:
374                                 //printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
375                                         res = -EIO;
376                                 break;
377                         }
378                     }
379                                 block += data_len>>9;
380                                 buf = (page_address(bvec->bv_page) + bvec->bv_offset);
381                                 data_len = bvec->bv_len;
382                 }
383                 }
384     
385             if(data_len)
386             {
387                 //printk("buf = %x, lba = %lx , nsec=%x \n",buf,block,data_len);
388                 switch(cmd_flag)
389                 {
390                 case READ:
391                                 if (tr->readsect(dev, block,data_len>>9, buf))
392                                         res = -EIO;
393                         //rq_flush_dcache_pages(req);
394                         break;
395                 case WRITE:
396                         //if (!tr->writesect)
397                         //      res = -EIO;
398                         //rq_flush_dcache_pages(req);
399                                 if (tr->writesect(dev, block,data_len>>9, buf))
400                                         res = -EIO;
401                         break;
402                 default:
403                         //printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
404                                 res = -EIO;
405                         break;
406                 }
407             }
408         }
409         mutex_unlock(&dev->lock);
410                 spin_lock_irq(rq->queue_lock);
411                 //printk("__blk_end_request_all %d\n",res);
412                 __blk_end_request_all(req, res);
413                 req = NULL;
414                 background_done = 0;
415         }
416
417         if (req)
418                 __blk_end_request_all(req, -EIO);
419
420         spin_unlock_irq(rq->queue_lock);
421
422         return 0;
423 }
424 #endif
425 static void mtd_blktrans_request(struct request_queue *rq)
426 {
427         struct mtd_blktrans_dev *dev;
428         struct request *req = NULL;
429
430         dev = rq->queuedata;
431
432         if (!dev)
433                 while ((req = blk_fetch_request(rq)) != NULL)
434                         __blk_end_request_all(req, -ENODEV);
435         else {
436                 dev->bg_stop = true;
437                 wake_up_process(dev->thread);
438         }
439 }
440
441 static int blktrans_open(struct block_device *bdev, fmode_t mode)
442 {
443         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
444         int ret = 0;
445
446         if (!dev)
447                 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
448
449         mutex_lock(&dev->lock);
450
451         if (dev->open)
452                 goto unlock;
453
454         kref_get(&dev->ref);
455         __module_get(dev->tr->owner);
456
457         if (!dev->mtd)
458                 goto unlock;
459
460         if (dev->tr->open) {
461                 ret = dev->tr->open(dev);
462                 if (ret)
463                         goto error_put;
464         }
465
466         ret = __get_mtd_device(dev->mtd);
467         if (ret)
468                 goto error_release;
469
470 unlock:
471         dev->open++;
472         mutex_unlock(&dev->lock);
473         blktrans_dev_put(dev);
474         return ret;
475
476 error_release:
477         if (dev->tr->release)
478                 dev->tr->release(dev);
479 error_put:
480         module_put(dev->tr->owner);
481         kref_put(&dev->ref, blktrans_dev_release);
482         mutex_unlock(&dev->lock);
483         blktrans_dev_put(dev);
484         return ret;
485 }
486
487 static int blktrans_release(struct gendisk *disk, fmode_t mode)
488 {
489         struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
490         int ret = 0;
491
492         if (!dev)
493                 return ret;
494
495         mutex_lock(&dev->lock);
496
497         if (--dev->open)
498                 goto unlock;
499
500         kref_put(&dev->ref, blktrans_dev_release);
501         module_put(dev->tr->owner);
502
503         if (dev->mtd) {
504                 ret = dev->tr->release ? dev->tr->release(dev) : 0;
505                 __put_mtd_device(dev->mtd);
506         }
507 unlock:
508         mutex_unlock(&dev->lock);
509         blktrans_dev_put(dev);
510         return ret;
511 }
512
513 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
514 {
515         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
516         int ret = -ENXIO;
517
518         if (!dev)
519                 return ret;
520
521         mutex_lock(&dev->lock);
522
523         if (!dev->mtd)
524                 goto unlock;
525
526         ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
527 unlock:
528         mutex_unlock(&dev->lock);
529         blktrans_dev_put(dev);
530         return ret;
531 }
532
533 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
534                               unsigned int cmd, unsigned long arg)
535 {
536         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
537         int ret = -ENXIO;
538
539         if (!dev)
540                 return ret;
541
542         mutex_lock(&dev->lock);
543
544         if (!dev->mtd)
545                 goto unlock;
546
547         switch (cmd) {
548         case BLKFLSBUF:
549                 ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
550                 break;
551         default:
552                 ret = -ENOTTY;
553         }
554 unlock:
555         mutex_unlock(&dev->lock);
556         blktrans_dev_put(dev);
557         return ret;
558 }
559
560 static const struct block_device_operations mtd_blktrans_ops = {
561         .owner          = THIS_MODULE,
562         .open           = blktrans_open,
563         .release        = blktrans_release,
564         .ioctl          = blktrans_ioctl,
565         .getgeo         = blktrans_getgeo,
566 };
567
568 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
569 {
570         struct mtd_blktrans_ops *tr = new->tr;
571         struct mtd_blktrans_dev *d;
572         int last_devnum = -1;
573         struct gendisk *gd;
574         int ret;
575
576         if (mutex_trylock(&mtd_table_mutex)) {
577                 mutex_unlock(&mtd_table_mutex);
578                 BUG();
579         }
580
581         mutex_lock(&blktrans_ref_mutex);
582         list_for_each_entry(d, &tr->devs, list) {
583                 if (new->devnum == -1) {
584                         /* Use first free number */
585                         if (d->devnum != last_devnum+1) {
586                                 /* Found a free devnum. Plug it in here */
587                                 new->devnum = last_devnum+1;
588                                 list_add_tail(&new->list, &d->list);
589                                 goto added;
590                         }
591                 } else if (d->devnum == new->devnum) {
592                         /* Required number taken */
593                         mutex_unlock(&blktrans_ref_mutex);
594                         return -EBUSY;
595                 } else if (d->devnum > new->devnum) {
596                         /* Required number was free */
597                         list_add_tail(&new->list, &d->list);
598                         goto added;
599                 }
600                 last_devnum = d->devnum;
601         }
602
603         ret = -EBUSY;
604         if (new->devnum == -1)
605                 new->devnum = last_devnum+1;
606
607         /* Check that the device and any partitions will get valid
608          * minor numbers and that the disk naming code below can cope
609          * with this number. */
610         if (new->devnum > (MINORMASK >> tr->part_bits) ||
611             (tr->part_bits && new->devnum >= 27 * 26)) {
612                 mutex_unlock(&blktrans_ref_mutex);
613                 goto error1;
614         }
615
616         list_add_tail(&new->list, &tr->devs);
617  added:
618         mutex_unlock(&blktrans_ref_mutex);
619
620         mutex_init(&new->lock);
621         kref_init(&new->ref);
622         if (!tr->writesect)
623                 new->readonly = 1;
624
625         /* Create gendisk */
626         ret = -ENOMEM;
627         gd = alloc_disk(1 << tr->part_bits);
628
629         if (!gd)
630                 goto error2;
631
632         new->disk = gd;
633         gd->private_data = new;
634         gd->major = tr->major;
635         gd->first_minor = (new->devnum) << tr->part_bits;
636         gd->fops = &mtd_blktrans_ops;
637
638         if (tr->part_bits)
639                 if (new->devnum < 26)
640                         snprintf(gd->disk_name, sizeof(gd->disk_name),
641                                  "%s%c", tr->name, 'a' + new->devnum);
642                 else
643                         snprintf(gd->disk_name, sizeof(gd->disk_name),
644                                  "%s%c%c", tr->name,
645                                  'a' - 1 + new->devnum / 26,
646                                  'a' + new->devnum % 26);
647         else
648                 snprintf(gd->disk_name, sizeof(gd->disk_name),
649                          "%s%d", tr->name, new->devnum);
650
651         /* 2.5 has capacity in units of 512 bytes while still
652            having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
653         //set_capacity(gd, (new->size * tr->blksize) >> 9);
654         set_capacity(gd, (new->size >> 9) * tr->blksize);   //modify by zyf for cap>=4GB 20110120
655
656         /* Create the request queue */
657         spin_lock_init(&new->queue_lock);
658         new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
659
660         if (!new->rq)
661                 goto error3;
662
663         new->rq->queuedata = new;
664         blk_queue_logical_block_size(new->rq, tr->blksize);
665         
666 #if (MTD_MERGE == 1)
667     blk_queue_max_hw_sectors(new->rq,MTD_RW_SECTORS);
668         //blk_queue_max_segment_size(new->rq,MTD_RW_SECTORS);
669         blk_queue_max_segments(new->rq, MTD_RW_SECTORS);// /PAGE_CACHE_SIZE
670 #endif
671
672         if (tr->discard) {
673                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
674                 new->rq->limits.max_discard_sectors = UINT_MAX;
675         }
676
677         gd->queue = new->rq;
678
679         /* Create processing thread */
680         /* TODO: workqueue ? */
681         new->thread = kthread_run(mtd_blktrans_thread, new,
682                         "%s%d", tr->name, new->mtd->index);
683         if (IS_ERR(new->thread)) {
684                 ret = PTR_ERR(new->thread);
685                 goto error4;
686         }
687         gd->driverfs_dev = &new->mtd->dev;
688
689         if (new->readonly)
690                 set_disk_ro(gd, 1);
691
692         add_disk(gd);
693
694         if (new->disk_attributes) {
695                 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
696                                         new->disk_attributes);
697                 WARN_ON(ret);
698         }
699         return 0;
700 error4:
701         blk_cleanup_queue(new->rq);
702 error3:
703         put_disk(new->disk);
704 error2:
705         list_del(&new->list);
706 error1:
707         return ret;
708 }
709
710 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
711 {
712         unsigned long flags;
713
714         if (mutex_trylock(&mtd_table_mutex)) {
715                 mutex_unlock(&mtd_table_mutex);
716                 BUG();
717         }
718
719         if (old->disk_attributes)
720                 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
721                                                 old->disk_attributes);
722
723         /* Stop new requests to arrive */
724         del_gendisk(old->disk);
725
726
727         /* Stop the thread */
728         kthread_stop(old->thread);
729
730         /* Kill current requests */
731         spin_lock_irqsave(&old->queue_lock, flags);
732         old->rq->queuedata = NULL;
733         blk_start_queue(old->rq);
734         spin_unlock_irqrestore(&old->queue_lock, flags);
735
736         /* If the device is currently open, tell trans driver to close it,
737                 then put mtd device, and don't touch it again */
738         mutex_lock(&old->lock);
739         if (old->open) {
740                 if (old->tr->release)
741                         old->tr->release(old);
742                 __put_mtd_device(old->mtd);
743         }
744
745         old->mtd = NULL;
746
747         mutex_unlock(&old->lock);
748         blktrans_dev_put(old);
749         return 0;
750 }
751
752 static void blktrans_notify_remove(struct mtd_info *mtd)
753 {
754         struct mtd_blktrans_ops *tr;
755         struct mtd_blktrans_dev *dev, *next;
756
757         list_for_each_entry(tr, &blktrans_majors, list)
758                 list_for_each_entry_safe(dev, next, &tr->devs, list)
759                         if (dev->mtd == mtd)
760                                 tr->remove_dev(dev);
761 }
762
763 static void blktrans_notify_add(struct mtd_info *mtd)
764 {
765         struct mtd_blktrans_ops *tr;
766
767         if (mtd->type == MTD_ABSENT)
768                 return;
769
770         list_for_each_entry(tr, &blktrans_majors, list)
771                 tr->add_mtd(tr, mtd);
772 }
773
774 static struct mtd_notifier blktrans_notifier = {
775         .add = blktrans_notify_add,
776         .remove = blktrans_notify_remove,
777 };
778
779 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
780 {
781         struct mtd_info *mtd;
782         int ret;
783 #if(MTD_MERGE != 0)
784         mutex_init(&mtd_rw_buffer_lock);
785     mtd_rw_buffer = kmalloc(MTD_RW_SECTORS*512, GFP_KERNEL | GFP_DMA);
786 #endif
787         /* Register the notifier if/when the first device type is
788            registered, to prevent the link/init ordering from fucking
789            us over. */
790         if (!blktrans_notifier.list.next)
791                 register_mtd_user(&blktrans_notifier);
792
793
794         mutex_lock(&mtd_table_mutex);
795
796         ret = register_blkdev(tr->major, tr->name);
797         if (ret < 0) {
798                 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
799                        tr->name, tr->major, ret);
800                 mutex_unlock(&mtd_table_mutex);
801                 return ret;
802         }
803
804         if (ret)
805                 tr->major = ret;
806
807         tr->blkshift = ffs(tr->blksize) - 1;
808
809         INIT_LIST_HEAD(&tr->devs);
810         list_add(&tr->list, &blktrans_majors);
811
812         mtd_for_each_device(mtd)
813                 if (mtd->type != MTD_ABSENT)
814                         tr->add_mtd(tr, mtd);
815
816         mutex_unlock(&mtd_table_mutex);
817         return 0;
818 }
819
820 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
821 {
822         struct mtd_blktrans_dev *dev, *next;
823
824         mutex_lock(&mtd_table_mutex);
825
826         /* Remove it from the list of active majors */
827         list_del(&tr->list);
828
829         list_for_each_entry_safe(dev, next, &tr->devs, list)
830                 tr->remove_dev(dev);
831
832         unregister_blkdev(tr->major, tr->name);
833         mutex_unlock(&mtd_table_mutex);
834
835         BUG_ON(!list_empty(&tr->devs));
836         return 0;
837 }
838
839 static void __exit mtd_blktrans_exit(void)
840 {
841         /* No race here -- if someone's currently in register_mtd_blktrans
842            we're screwed anyway. */
843         if (blktrans_notifier.list.next)
844                 unregister_mtd_user(&blktrans_notifier);
845 }
846
847 module_exit(mtd_blktrans_exit);
848
849 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
850 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
851 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
852 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
853
854 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
855 MODULE_LICENSE("GPL");
856 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");