MTD:MTD MERGE READ AND WRITE FOR ONE REQ. ZYF
authorZhaoyifeng <zyf@rock-chips.com>
Wed, 17 Apr 2013 07:59:10 +0000 (15:59 +0800)
committerZhaoyifeng <zyf@rock-chips.com>
Wed, 17 Apr 2013 07:59:10 +0000 (15:59 +0800)
drivers/mtd/mtd_blkdevs.c

index 444d0bd8f23d0193fb11ad22534a57fcb543c659..f647b046bad331b260154cfa6762d864a43e6aff 100755 (executable)
@@ -40,6 +40,9 @@
 static LIST_HEAD(blktrans_majors);
 static DEFINE_MUTEX(blktrans_ref_mutex);
 
+#define MTD_MERGE                       1
+
+
 static void blktrans_dev_release(struct kref *kref)
 {
        struct mtd_blktrans_dev *dev =
@@ -74,7 +77,7 @@ static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
        mutex_unlock(&blktrans_ref_mutex);
 }
 
-
+#if(MTD_MERGE == 0)
 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
                               struct mtd_blktrans_dev *dev,
                               struct request *req)
@@ -150,7 +153,7 @@ static int mtd_blktrans_thread(void *arg)
        struct request_queue *rq = dev->rq;
        struct request *req = NULL;
        int background_done = 0;
-
+       
        spin_lock_irq(rq->queue_lock);
 
        while (!kthread_should_stop()) {
@@ -203,7 +206,224 @@ static int mtd_blktrans_thread(void *arg)
 
        return 0;
 }
+#else
+
+#define MTD_RW_SECTORS  (BLK_SAFE_MAX_SECTORS+1)
+static char * mtd_rw_buffer[MTD_RW_SECTORS*512]   __attribute__((aligned(4096)));
+struct mutex mtd_rw_buffer_lock;
+static int req_check_buffer_align(struct request *req,char **pbuf)
+{
+    int    nr_vec = 0;
+    struct bio_vec *bv;
+    struct req_iterator iter;
+    char *buffer;
+    void *firstbuf = 0;
+    char *nextbuffer = 0;
+       unsigned long block, nsect;
+       block = blk_rq_pos(req);
+       nsect = blk_rq_cur_bytes(req) >> 9;
+       rq_for_each_segment(bv, req, iter)
+       {
+           buffer = page_address(bv->bv_page) + bv->bv_offset;
+           if( firstbuf == 0 )
+           {
+            firstbuf = buffer;
+           }
+        nr_vec++;
+        if(nextbuffer!=0)
+        {
+            if(nextbuffer!=buffer)
+            {
+                return 0;
+            }    
+        }
+        nextbuffer = buffer+bv->bv_len;
+       }
+       *pbuf = firstbuf;
+       return 1;
+}
+
+
+int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
+{
+       if (kthread_should_stop())
+               return 1;
+
+       return dev->bg_stop;
+}
+EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
+
+static int mtd_blktrans_thread(void *arg)
+{
+       struct mtd_blktrans_dev *dev = arg;
+       struct mtd_blktrans_ops *tr = dev->tr;
+       struct request_queue *rq = dev->rq;
+       struct request *req = NULL;
+       int background_done = 0;
+       
+       unsigned long block, data_len;
+       char *buf = NULL;
+       struct req_iterator rq_iter;
+       struct bio_vec *bvec;
+       int cmd_flag;
+       
+    set_user_nice(current,-20);
+       spin_lock_irq(rq->queue_lock);
+
+       while (!kthread_should_stop()) {
+               int res;
+
+               dev->bg_stop = false;
+               if (!req && !(req = blk_fetch_request(rq))) {
+                       if (tr->background && !background_done) {
+                               spin_unlock_irq(rq->queue_lock);
+                               mutex_lock(&dev->lock);
+                               tr->background(dev);
+                               mutex_unlock(&dev->lock);
+                               spin_lock_irq(rq->queue_lock);
+                               /*
+                                * Do background processing just once per idle
+                                * period.
+                                */
+                               background_done = !dev->bg_stop;
+                               continue;
+                       }
+                       set_current_state(TASK_INTERRUPTIBLE);
+
+                       if (kthread_should_stop())
+                               set_current_state(TASK_RUNNING);
 
+                       spin_unlock_irq(rq->queue_lock);
+                       schedule();
+                       spin_lock_irq(rq->queue_lock);
+                       continue;
+               }
+       if ((req->cmd_type != REQ_TYPE_FS) || (blk_rq_pos(req) + blk_rq_sectors(req) > get_capacity(req->rq_disk)))
+       {
+            __blk_end_request_all(req, -EIO);
+               req = NULL;
+                   background_done = 0;
+               continue;
+       }
+               spin_unlock_irq(rq->queue_lock);
+        mutex_lock(&dev->lock);
+        
+           block = blk_rq_pos(req);
+           data_len = 0;
+               buf = 0;
+               res = 0;
+           cmd_flag = rq_data_dir(req);
+           //i = 0;
+        if(cmd_flag == READ)
+        {
+               unsigned long nsect;
+            buf = mtd_rw_buffer;
+            req_check_buffer_align(req,&buf);
+            nsect = req->__data_len >> 9;
+            if( nsect > MTD_RW_SECTORS ) {
+                printk("%s..%d::nsect=%d,too large , may be error!\n",__FILE__,__LINE__, nsect );
+                nsect = MTD_RW_SECTORS;
+                while(1);
+            }
+                       if(buf == mtd_rw_buffer )
+               mutex_lock(&mtd_rw_buffer_lock);
+               if (tr->readsect(dev, block,nsect, buf))
+                   res = -EIO;
+            if( buf == mtd_rw_buffer ) 
+            {
+                char *p = buf;
+                   rq_for_each_segment(bvec, req, rq_iter) 
+               { 
+                    memcpy( page_address(bvec->bv_page) + bvec->bv_offset , p , bvec->bv_len );
+                    flush_dcache_page(bvec->bv_page); //zyf rq_flush_dcache_pages(req);
+                    p += bvec->bv_len;
+               }
+               mutex_unlock(&mtd_rw_buffer_lock);
+            }
+            //rq_flush_dcache_pages(req);
+        }
+        else
+        {    
+               rq_for_each_segment(bvec, req, rq_iter) 
+               {
+               //printk("%d buf = %x, lba = %llx , nsec=%x ,offset = %x\n",i,page_address(bvec->bv_page) + bvec->bv_offset,((rq_iter.bio)->bi_sector),(bvec->bv_len),(bvec->bv_offset));
+                //i++;
+                flush_dcache_page(bvec->bv_page); //zyf rq_flush_dcache_pages(req);
+                if((page_address(bvec->bv_page) + bvec->bv_offset) == (buf + data_len))
+                {
+                    data_len += bvec->bv_len;
+                }
+                else
+                {
+                    if(data_len)
+                    {
+                       //printk("buf = %x, lba = %lx , nsec=%x \n",buf,block,data_len);
+                       switch(cmd_flag)
+                       {
+                       case READ:
+                                       if (tr->readsect(dev, block,data_len>>9, buf))
+                                               res = -EIO;
+                               //rq_flush_dcache_pages(req);
+                               break;
+                       case WRITE:
+                               //if (!tr->writesect)
+                               //      res = -EIO;
+                               //rq_flush_dcache_pages(req);
+                                       if (tr->writesect(dev, block,data_len>>9, buf))
+                                               res = -EIO;
+                               break;
+                       default:
+                               //printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+                                       res = -EIO;
+                               break;
+                       }
+                    }
+                               block += data_len>>9;
+                               buf = (page_address(bvec->bv_page) + bvec->bv_offset);
+                               data_len = bvec->bv_len;
+                }
+               }
+    
+            if(data_len)
+            {
+               //printk("buf = %x, lba = %lx , nsec=%x \n",buf,block,data_len);
+               switch(cmd_flag)
+               {
+               case READ:
+                               if (tr->readsect(dev, block,data_len>>9, buf))
+                                       res = -EIO;
+                       //rq_flush_dcache_pages(req);
+                       break;
+               case WRITE:
+                       //if (!tr->writesect)
+                       //      res = -EIO;
+                       //rq_flush_dcache_pages(req);
+                               if (tr->writesect(dev, block,data_len>>9, buf))
+                                       res = -EIO;
+                       break;
+               default:
+                       //printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+                               res = -EIO;
+                       break;
+               }
+            }
+        }
+        mutex_unlock(&dev->lock);
+               spin_lock_irq(rq->queue_lock);
+               //printk("__blk_end_request_all %d\n",res);
+               __blk_end_request_all(req, res);
+               req = NULL;
+               background_done = 0;
+       }
+
+       if (req)
+               __blk_end_request_all(req, -EIO);
+
+       spin_unlock_irq(rq->queue_lock);
+
+       return 0;
+}
+#endif
 static void mtd_blktrans_request(struct request_queue *rq)
 {
        struct mtd_blktrans_dev *dev;
@@ -556,7 +776,9 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
 {
        struct mtd_info *mtd;
        int ret;
-
+#if(MTD_MERGE != 0)
+       mutex_init(&mtd_rw_buffer_lock);
+#endif
        /* Register the notifier if/when the first device type is
           registered, to prevent the link/init ordering from fucking
           us over. */