Merge tag 'kvm-3.10-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[firefly-linux-kernel-4.4.55.git] / drivers / mtd / nand / omap2.c
1 /*
2  * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3  * Copyright © 2004 Micron Technology Inc.
4  * Copyright © 2004 David Brownell
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/platform_device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/delay.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/jiffies.h>
18 #include <linux/sched.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/omap-dma.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27
28 #ifdef CONFIG_MTD_NAND_OMAP_BCH
29 #include <linux/bch.h>
30 #include <linux/platform_data/elm.h>
31 #endif
32
33 #include <linux/platform_data/mtd-nand-omap2.h>
34
35 #define DRIVER_NAME     "omap2-nand"
36 #define OMAP_NAND_TIMEOUT_MS    5000
37
38 #define NAND_Ecc_P1e            (1 << 0)
39 #define NAND_Ecc_P2e            (1 << 1)
40 #define NAND_Ecc_P4e            (1 << 2)
41 #define NAND_Ecc_P8e            (1 << 3)
42 #define NAND_Ecc_P16e           (1 << 4)
43 #define NAND_Ecc_P32e           (1 << 5)
44 #define NAND_Ecc_P64e           (1 << 6)
45 #define NAND_Ecc_P128e          (1 << 7)
46 #define NAND_Ecc_P256e          (1 << 8)
47 #define NAND_Ecc_P512e          (1 << 9)
48 #define NAND_Ecc_P1024e         (1 << 10)
49 #define NAND_Ecc_P2048e         (1 << 11)
50
51 #define NAND_Ecc_P1o            (1 << 16)
52 #define NAND_Ecc_P2o            (1 << 17)
53 #define NAND_Ecc_P4o            (1 << 18)
54 #define NAND_Ecc_P8o            (1 << 19)
55 #define NAND_Ecc_P16o           (1 << 20)
56 #define NAND_Ecc_P32o           (1 << 21)
57 #define NAND_Ecc_P64o           (1 << 22)
58 #define NAND_Ecc_P128o          (1 << 23)
59 #define NAND_Ecc_P256o          (1 << 24)
60 #define NAND_Ecc_P512o          (1 << 25)
61 #define NAND_Ecc_P1024o         (1 << 26)
62 #define NAND_Ecc_P2048o         (1 << 27)
63
64 #define TF(value)       (value ? 1 : 0)
65
66 #define P2048e(a)       (TF(a & NAND_Ecc_P2048e)        << 0)
67 #define P2048o(a)       (TF(a & NAND_Ecc_P2048o)        << 1)
68 #define P1e(a)          (TF(a & NAND_Ecc_P1e)           << 2)
69 #define P1o(a)          (TF(a & NAND_Ecc_P1o)           << 3)
70 #define P2e(a)          (TF(a & NAND_Ecc_P2e)           << 4)
71 #define P2o(a)          (TF(a & NAND_Ecc_P2o)           << 5)
72 #define P4e(a)          (TF(a & NAND_Ecc_P4e)           << 6)
73 #define P4o(a)          (TF(a & NAND_Ecc_P4o)           << 7)
74
75 #define P8e(a)          (TF(a & NAND_Ecc_P8e)           << 0)
76 #define P8o(a)          (TF(a & NAND_Ecc_P8o)           << 1)
77 #define P16e(a)         (TF(a & NAND_Ecc_P16e)          << 2)
78 #define P16o(a)         (TF(a & NAND_Ecc_P16o)          << 3)
79 #define P32e(a)         (TF(a & NAND_Ecc_P32e)          << 4)
80 #define P32o(a)         (TF(a & NAND_Ecc_P32o)          << 5)
81 #define P64e(a)         (TF(a & NAND_Ecc_P64e)          << 6)
82 #define P64o(a)         (TF(a & NAND_Ecc_P64o)          << 7)
83
84 #define P128e(a)        (TF(a & NAND_Ecc_P128e)         << 0)
85 #define P128o(a)        (TF(a & NAND_Ecc_P128o)         << 1)
86 #define P256e(a)        (TF(a & NAND_Ecc_P256e)         << 2)
87 #define P256o(a)        (TF(a & NAND_Ecc_P256o)         << 3)
88 #define P512e(a)        (TF(a & NAND_Ecc_P512e)         << 4)
89 #define P512o(a)        (TF(a & NAND_Ecc_P512o)         << 5)
90 #define P1024e(a)       (TF(a & NAND_Ecc_P1024e)        << 6)
91 #define P1024o(a)       (TF(a & NAND_Ecc_P1024o)        << 7)
92
93 #define P8e_s(a)        (TF(a & NAND_Ecc_P8e)           << 0)
94 #define P8o_s(a)        (TF(a & NAND_Ecc_P8o)           << 1)
95 #define P16e_s(a)       (TF(a & NAND_Ecc_P16e)          << 2)
96 #define P16o_s(a)       (TF(a & NAND_Ecc_P16o)          << 3)
97 #define P1e_s(a)        (TF(a & NAND_Ecc_P1e)           << 4)
98 #define P1o_s(a)        (TF(a & NAND_Ecc_P1o)           << 5)
99 #define P2e_s(a)        (TF(a & NAND_Ecc_P2e)           << 6)
100 #define P2o_s(a)        (TF(a & NAND_Ecc_P2o)           << 7)
101
102 #define P4e_s(a)        (TF(a & NAND_Ecc_P4e)           << 0)
103 #define P4o_s(a)        (TF(a & NAND_Ecc_P4o)           << 1)
104
105 #define PREFETCH_CONFIG1_CS_SHIFT       24
106 #define ECC_CONFIG_CS_SHIFT             1
107 #define CS_MASK                         0x7
108 #define ENABLE_PREFETCH                 (0x1 << 7)
109 #define DMA_MPU_MODE_SHIFT              2
110 #define ECCSIZE0_SHIFT                  12
111 #define ECCSIZE1_SHIFT                  22
112 #define ECC1RESULTSIZE                  0x1
113 #define ECCCLEAR                        0x100
114 #define ECC1                            0x1
115 #define PREFETCH_FIFOTHRESHOLD_MAX      0x40
116 #define PREFETCH_FIFOTHRESHOLD(val)     ((val) << 8)
117 #define PREFETCH_STATUS_COUNT(val)      (val & 0x00003fff)
118 #define PREFETCH_STATUS_FIFO_CNT(val)   ((val >> 24) & 0x7F)
119 #define STATUS_BUFF_EMPTY               0x00000001
120
121 #define OMAP24XX_DMA_GPMC               4
122
123 #define BCH8_MAX_ERROR          8       /* upto 8 bit correctable */
124 #define BCH4_MAX_ERROR          4       /* upto 4 bit correctable */
125
126 #define SECTOR_BYTES            512
127 /* 4 bit padding to make byte aligned, 56 = 52 + 4 */
128 #define BCH4_BIT_PAD            4
129 #define BCH8_ECC_MAX            ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8)
130 #define BCH4_ECC_MAX            ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)
131
132 /* GPMC ecc engine settings for read */
133 #define BCH_WRAPMODE_1          1       /* BCH wrap mode 1 */
134 #define BCH8R_ECC_SIZE0         0x1a    /* ecc_size0 = 26 */
135 #define BCH8R_ECC_SIZE1         0x2     /* ecc_size1 = 2 */
136 #define BCH4R_ECC_SIZE0         0xd     /* ecc_size0 = 13 */
137 #define BCH4R_ECC_SIZE1         0x3     /* ecc_size1 = 3 */
138
139 /* GPMC ecc engine settings for write */
140 #define BCH_WRAPMODE_6          6       /* BCH wrap mode 6 */
141 #define BCH_ECC_SIZE0           0x0     /* ecc_size0 = 0, no oob protection */
142 #define BCH_ECC_SIZE1           0x20    /* ecc_size1 = 32 */
143
144 #ifdef CONFIG_MTD_NAND_OMAP_BCH
145 static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
146         0xac, 0x6b, 0xff, 0x99, 0x7b};
147 static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
148 #endif
149
150 /* oob info generated runtime depending on ecc algorithm and layout selected */
151 static struct nand_ecclayout omap_oobinfo;
152 /* Define some generic bad / good block scan pattern which are used
153  * while scanning a device for factory marked good / bad blocks
154  */
155 static uint8_t scan_ff_pattern[] = { 0xff };
156 static struct nand_bbt_descr bb_descrip_flashbased = {
157         .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
158         .offs = 0,
159         .len = 1,
160         .pattern = scan_ff_pattern,
161 };
162
163
164 struct omap_nand_info {
165         struct nand_hw_control          controller;
166         struct omap_nand_platform_data  *pdata;
167         struct mtd_info                 mtd;
168         struct nand_chip                nand;
169         struct platform_device          *pdev;
170
171         int                             gpmc_cs;
172         unsigned long                   phys_base;
173         unsigned long                   mem_size;
174         struct completion               comp;
175         struct dma_chan                 *dma;
176         int                             gpmc_irq_fifo;
177         int                             gpmc_irq_count;
178         enum {
179                 OMAP_NAND_IO_READ = 0,  /* read */
180                 OMAP_NAND_IO_WRITE,     /* write */
181         } iomode;
182         u_char                          *buf;
183         int                                     buf_len;
184         struct gpmc_nand_regs           reg;
185
186 #ifdef CONFIG_MTD_NAND_OMAP_BCH
187         struct bch_control             *bch;
188         struct nand_ecclayout           ecclayout;
189         bool                            is_elm_used;
190         struct device                   *elm_dev;
191         struct device_node              *of_node;
192 #endif
193 };
194
195 /**
196  * omap_prefetch_enable - configures and starts prefetch transfer
197  * @cs: cs (chip select) number
198  * @fifo_th: fifo threshold to be used for read/ write
199  * @dma_mode: dma mode enable (1) or disable (0)
200  * @u32_count: number of bytes to be transferred
201  * @is_write: prefetch read(0) or write post(1) mode
202  */
203 static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
204         unsigned int u32_count, int is_write, struct omap_nand_info *info)
205 {
206         u32 val;
207
208         if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
209                 return -1;
210
211         if (readl(info->reg.gpmc_prefetch_control))
212                 return -EBUSY;
213
214         /* Set the amount of bytes to be prefetched */
215         writel(u32_count, info->reg.gpmc_prefetch_config2);
216
217         /* Set dma/mpu mode, the prefetch read / post write and
218          * enable the engine. Set which cs is has requested for.
219          */
220         val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
221                 PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
222                 (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
223         writel(val, info->reg.gpmc_prefetch_config1);
224
225         /*  Start the prefetch engine */
226         writel(0x1, info->reg.gpmc_prefetch_control);
227
228         return 0;
229 }
230
231 /**
232  * omap_prefetch_reset - disables and stops the prefetch engine
233  */
234 static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
235 {
236         u32 config1;
237
238         /* check if the same module/cs is trying to reset */
239         config1 = readl(info->reg.gpmc_prefetch_config1);
240         if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
241                 return -EINVAL;
242
243         /* Stop the PFPW engine */
244         writel(0x0, info->reg.gpmc_prefetch_control);
245
246         /* Reset/disable the PFPW engine */
247         writel(0x0, info->reg.gpmc_prefetch_config1);
248
249         return 0;
250 }
251
252 /**
253  * omap_hwcontrol - hardware specific access to control-lines
254  * @mtd: MTD device structure
255  * @cmd: command to device
256  * @ctrl:
257  * NAND_NCE: bit 0 -> don't care
258  * NAND_CLE: bit 1 -> Command Latch
259  * NAND_ALE: bit 2 -> Address Latch
260  *
261  * NOTE: boards may use different bits for these!!
262  */
263 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
264 {
265         struct omap_nand_info *info = container_of(mtd,
266                                         struct omap_nand_info, mtd);
267
268         if (cmd != NAND_CMD_NONE) {
269                 if (ctrl & NAND_CLE)
270                         writeb(cmd, info->reg.gpmc_nand_command);
271
272                 else if (ctrl & NAND_ALE)
273                         writeb(cmd, info->reg.gpmc_nand_address);
274
275                 else /* NAND_NCE */
276                         writeb(cmd, info->reg.gpmc_nand_data);
277         }
278 }
279
280 /**
281  * omap_read_buf8 - read data from NAND controller into buffer
282  * @mtd: MTD device structure
283  * @buf: buffer to store date
284  * @len: number of bytes to read
285  */
286 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
287 {
288         struct nand_chip *nand = mtd->priv;
289
290         ioread8_rep(nand->IO_ADDR_R, buf, len);
291 }
292
293 /**
294  * omap_write_buf8 - write buffer to NAND controller
295  * @mtd: MTD device structure
296  * @buf: data buffer
297  * @len: number of bytes to write
298  */
299 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
300 {
301         struct omap_nand_info *info = container_of(mtd,
302                                                 struct omap_nand_info, mtd);
303         u_char *p = (u_char *)buf;
304         u32     status = 0;
305
306         while (len--) {
307                 iowrite8(*p++, info->nand.IO_ADDR_W);
308                 /* wait until buffer is available for write */
309                 do {
310                         status = readl(info->reg.gpmc_status) &
311                                         STATUS_BUFF_EMPTY;
312                 } while (!status);
313         }
314 }
315
316 /**
317  * omap_read_buf16 - read data from NAND controller into buffer
318  * @mtd: MTD device structure
319  * @buf: buffer to store date
320  * @len: number of bytes to read
321  */
322 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
323 {
324         struct nand_chip *nand = mtd->priv;
325
326         ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
327 }
328
329 /**
330  * omap_write_buf16 - write buffer to NAND controller
331  * @mtd: MTD device structure
332  * @buf: data buffer
333  * @len: number of bytes to write
334  */
335 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
336 {
337         struct omap_nand_info *info = container_of(mtd,
338                                                 struct omap_nand_info, mtd);
339         u16 *p = (u16 *) buf;
340         u32     status = 0;
341         /* FIXME try bursts of writesw() or DMA ... */
342         len >>= 1;
343
344         while (len--) {
345                 iowrite16(*p++, info->nand.IO_ADDR_W);
346                 /* wait until buffer is available for write */
347                 do {
348                         status = readl(info->reg.gpmc_status) &
349                                         STATUS_BUFF_EMPTY;
350                 } while (!status);
351         }
352 }
353
354 /**
355  * omap_read_buf_pref - read data from NAND controller into buffer
356  * @mtd: MTD device structure
357  * @buf: buffer to store date
358  * @len: number of bytes to read
359  */
360 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
361 {
362         struct omap_nand_info *info = container_of(mtd,
363                                                 struct omap_nand_info, mtd);
364         uint32_t r_count = 0;
365         int ret = 0;
366         u32 *p = (u32 *)buf;
367
368         /* take care of subpage reads */
369         if (len % 4) {
370                 if (info->nand.options & NAND_BUSWIDTH_16)
371                         omap_read_buf16(mtd, buf, len % 4);
372                 else
373                         omap_read_buf8(mtd, buf, len % 4);
374                 p = (u32 *) (buf + len % 4);
375                 len -= len % 4;
376         }
377
378         /* configure and start prefetch transfer */
379         ret = omap_prefetch_enable(info->gpmc_cs,
380                         PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
381         if (ret) {
382                 /* PFPW engine is busy, use cpu copy method */
383                 if (info->nand.options & NAND_BUSWIDTH_16)
384                         omap_read_buf16(mtd, (u_char *)p, len);
385                 else
386                         omap_read_buf8(mtd, (u_char *)p, len);
387         } else {
388                 do {
389                         r_count = readl(info->reg.gpmc_prefetch_status);
390                         r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
391                         r_count = r_count >> 2;
392                         ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
393                         p += r_count;
394                         len -= r_count << 2;
395                 } while (len);
396                 /* disable and stop the PFPW engine */
397                 omap_prefetch_reset(info->gpmc_cs, info);
398         }
399 }
400
401 /**
402  * omap_write_buf_pref - write buffer to NAND controller
403  * @mtd: MTD device structure
404  * @buf: data buffer
405  * @len: number of bytes to write
406  */
407 static void omap_write_buf_pref(struct mtd_info *mtd,
408                                         const u_char *buf, int len)
409 {
410         struct omap_nand_info *info = container_of(mtd,
411                                                 struct omap_nand_info, mtd);
412         uint32_t w_count = 0;
413         int i = 0, ret = 0;
414         u16 *p = (u16 *)buf;
415         unsigned long tim, limit;
416         u32 val;
417
418         /* take care of subpage writes */
419         if (len % 2 != 0) {
420                 writeb(*buf, info->nand.IO_ADDR_W);
421                 p = (u16 *)(buf + 1);
422                 len--;
423         }
424
425         /*  configure and start prefetch transfer */
426         ret = omap_prefetch_enable(info->gpmc_cs,
427                         PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
428         if (ret) {
429                 /* PFPW engine is busy, use cpu copy method */
430                 if (info->nand.options & NAND_BUSWIDTH_16)
431                         omap_write_buf16(mtd, (u_char *)p, len);
432                 else
433                         omap_write_buf8(mtd, (u_char *)p, len);
434         } else {
435                 while (len) {
436                         w_count = readl(info->reg.gpmc_prefetch_status);
437                         w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
438                         w_count = w_count >> 1;
439                         for (i = 0; (i < w_count) && len; i++, len -= 2)
440                                 iowrite16(*p++, info->nand.IO_ADDR_W);
441                 }
442                 /* wait for data to flushed-out before reset the prefetch */
443                 tim = 0;
444                 limit = (loops_per_jiffy *
445                                         msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
446                 do {
447                         cpu_relax();
448                         val = readl(info->reg.gpmc_prefetch_status);
449                         val = PREFETCH_STATUS_COUNT(val);
450                 } while (val && (tim++ < limit));
451
452                 /* disable and stop the PFPW engine */
453                 omap_prefetch_reset(info->gpmc_cs, info);
454         }
455 }
456
457 /*
458  * omap_nand_dma_callback: callback on the completion of dma transfer
459  * @data: pointer to completion data structure
460  */
461 static void omap_nand_dma_callback(void *data)
462 {
463         complete((struct completion *) data);
464 }
465
466 /*
467  * omap_nand_dma_transfer: configure and start dma transfer
468  * @mtd: MTD device structure
469  * @addr: virtual address in RAM of source/destination
470  * @len: number of data bytes to be transferred
471  * @is_write: flag for read/write operation
472  */
473 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
474                                         unsigned int len, int is_write)
475 {
476         struct omap_nand_info *info = container_of(mtd,
477                                         struct omap_nand_info, mtd);
478         struct dma_async_tx_descriptor *tx;
479         enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
480                                                         DMA_FROM_DEVICE;
481         struct scatterlist sg;
482         unsigned long tim, limit;
483         unsigned n;
484         int ret;
485         u32 val;
486
487         if (addr >= high_memory) {
488                 struct page *p1;
489
490                 if (((size_t)addr & PAGE_MASK) !=
491                         ((size_t)(addr + len - 1) & PAGE_MASK))
492                         goto out_copy;
493                 p1 = vmalloc_to_page(addr);
494                 if (!p1)
495                         goto out_copy;
496                 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
497         }
498
499         sg_init_one(&sg, addr, len);
500         n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
501         if (n == 0) {
502                 dev_err(&info->pdev->dev,
503                         "Couldn't DMA map a %d byte buffer\n", len);
504                 goto out_copy;
505         }
506
507         tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
508                 is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
509                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
510         if (!tx)
511                 goto out_copy_unmap;
512
513         tx->callback = omap_nand_dma_callback;
514         tx->callback_param = &info->comp;
515         dmaengine_submit(tx);
516
517         /*  configure and start prefetch transfer */
518         ret = omap_prefetch_enable(info->gpmc_cs,
519                 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
520         if (ret)
521                 /* PFPW engine is busy, use cpu copy method */
522                 goto out_copy_unmap;
523
524         init_completion(&info->comp);
525         dma_async_issue_pending(info->dma);
526
527         /* setup and start DMA using dma_addr */
528         wait_for_completion(&info->comp);
529         tim = 0;
530         limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
531
532         do {
533                 cpu_relax();
534                 val = readl(info->reg.gpmc_prefetch_status);
535                 val = PREFETCH_STATUS_COUNT(val);
536         } while (val && (tim++ < limit));
537
538         /* disable and stop the PFPW engine */
539         omap_prefetch_reset(info->gpmc_cs, info);
540
541         dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
542         return 0;
543
544 out_copy_unmap:
545         dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
546 out_copy:
547         if (info->nand.options & NAND_BUSWIDTH_16)
548                 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
549                         : omap_write_buf16(mtd, (u_char *) addr, len);
550         else
551                 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
552                         : omap_write_buf8(mtd, (u_char *) addr, len);
553         return 0;
554 }
555
556 /**
557  * omap_read_buf_dma_pref - read data from NAND controller into buffer
558  * @mtd: MTD device structure
559  * @buf: buffer to store date
560  * @len: number of bytes to read
561  */
562 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
563 {
564         if (len <= mtd->oobsize)
565                 omap_read_buf_pref(mtd, buf, len);
566         else
567                 /* start transfer in DMA mode */
568                 omap_nand_dma_transfer(mtd, buf, len, 0x0);
569 }
570
571 /**
572  * omap_write_buf_dma_pref - write buffer to NAND controller
573  * @mtd: MTD device structure
574  * @buf: data buffer
575  * @len: number of bytes to write
576  */
577 static void omap_write_buf_dma_pref(struct mtd_info *mtd,
578                                         const u_char *buf, int len)
579 {
580         if (len <= mtd->oobsize)
581                 omap_write_buf_pref(mtd, buf, len);
582         else
583                 /* start transfer in DMA mode */
584                 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
585 }
586
587 /*
588  * omap_nand_irq - GPMC irq handler
589  * @this_irq: gpmc irq number
590  * @dev: omap_nand_info structure pointer is passed here
591  */
592 static irqreturn_t omap_nand_irq(int this_irq, void *dev)
593 {
594         struct omap_nand_info *info = (struct omap_nand_info *) dev;
595         u32 bytes;
596
597         bytes = readl(info->reg.gpmc_prefetch_status);
598         bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
599         bytes = bytes  & 0xFFFC; /* io in multiple of 4 bytes */
600         if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
601                 if (this_irq == info->gpmc_irq_count)
602                         goto done;
603
604                 if (info->buf_len && (info->buf_len < bytes))
605                         bytes = info->buf_len;
606                 else if (!info->buf_len)
607                         bytes = 0;
608                 iowrite32_rep(info->nand.IO_ADDR_W,
609                                                 (u32 *)info->buf, bytes >> 2);
610                 info->buf = info->buf + bytes;
611                 info->buf_len -= bytes;
612
613         } else {
614                 ioread32_rep(info->nand.IO_ADDR_R,
615                                                 (u32 *)info->buf, bytes >> 2);
616                 info->buf = info->buf + bytes;
617
618                 if (this_irq == info->gpmc_irq_count)
619                         goto done;
620         }
621
622         return IRQ_HANDLED;
623
624 done:
625         complete(&info->comp);
626
627         disable_irq_nosync(info->gpmc_irq_fifo);
628         disable_irq_nosync(info->gpmc_irq_count);
629
630         return IRQ_HANDLED;
631 }
632
633 /*
634  * omap_read_buf_irq_pref - read data from NAND controller into buffer
635  * @mtd: MTD device structure
636  * @buf: buffer to store date
637  * @len: number of bytes to read
638  */
639 static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
640 {
641         struct omap_nand_info *info = container_of(mtd,
642                                                 struct omap_nand_info, mtd);
643         int ret = 0;
644
645         if (len <= mtd->oobsize) {
646                 omap_read_buf_pref(mtd, buf, len);
647                 return;
648         }
649
650         info->iomode = OMAP_NAND_IO_READ;
651         info->buf = buf;
652         init_completion(&info->comp);
653
654         /*  configure and start prefetch transfer */
655         ret = omap_prefetch_enable(info->gpmc_cs,
656                         PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
657         if (ret)
658                 /* PFPW engine is busy, use cpu copy method */
659                 goto out_copy;
660
661         info->buf_len = len;
662
663         enable_irq(info->gpmc_irq_count);
664         enable_irq(info->gpmc_irq_fifo);
665
666         /* waiting for read to complete */
667         wait_for_completion(&info->comp);
668
669         /* disable and stop the PFPW engine */
670         omap_prefetch_reset(info->gpmc_cs, info);
671         return;
672
673 out_copy:
674         if (info->nand.options & NAND_BUSWIDTH_16)
675                 omap_read_buf16(mtd, buf, len);
676         else
677                 omap_read_buf8(mtd, buf, len);
678 }
679
680 /*
681  * omap_write_buf_irq_pref - write buffer to NAND controller
682  * @mtd: MTD device structure
683  * @buf: data buffer
684  * @len: number of bytes to write
685  */
686 static void omap_write_buf_irq_pref(struct mtd_info *mtd,
687                                         const u_char *buf, int len)
688 {
689         struct omap_nand_info *info = container_of(mtd,
690                                                 struct omap_nand_info, mtd);
691         int ret = 0;
692         unsigned long tim, limit;
693         u32 val;
694
695         if (len <= mtd->oobsize) {
696                 omap_write_buf_pref(mtd, buf, len);
697                 return;
698         }
699
700         info->iomode = OMAP_NAND_IO_WRITE;
701         info->buf = (u_char *) buf;
702         init_completion(&info->comp);
703
704         /* configure and start prefetch transfer : size=24 */
705         ret = omap_prefetch_enable(info->gpmc_cs,
706                 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
707         if (ret)
708                 /* PFPW engine is busy, use cpu copy method */
709                 goto out_copy;
710
711         info->buf_len = len;
712
713         enable_irq(info->gpmc_irq_count);
714         enable_irq(info->gpmc_irq_fifo);
715
716         /* waiting for write to complete */
717         wait_for_completion(&info->comp);
718
719         /* wait for data to flushed-out before reset the prefetch */
720         tim = 0;
721         limit = (loops_per_jiffy *  msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
722         do {
723                 val = readl(info->reg.gpmc_prefetch_status);
724                 val = PREFETCH_STATUS_COUNT(val);
725                 cpu_relax();
726         } while (val && (tim++ < limit));
727
728         /* disable and stop the PFPW engine */
729         omap_prefetch_reset(info->gpmc_cs, info);
730         return;
731
732 out_copy:
733         if (info->nand.options & NAND_BUSWIDTH_16)
734                 omap_write_buf16(mtd, buf, len);
735         else
736                 omap_write_buf8(mtd, buf, len);
737 }
738
739 /**
740  * gen_true_ecc - This function will generate true ECC value
741  * @ecc_buf: buffer to store ecc code
742  *
743  * This generated true ECC value can be used when correcting
744  * data read from NAND flash memory core
745  */
746 static void gen_true_ecc(u8 *ecc_buf)
747 {
748         u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
749                 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
750
751         ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
752                         P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
753         ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
754                         P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
755         ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
756                         P1e(tmp) | P2048o(tmp) | P2048e(tmp));
757 }
758
759 /**
760  * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
761  * @ecc_data1:  ecc code from nand spare area
762  * @ecc_data2:  ecc code from hardware register obtained from hardware ecc
763  * @page_data:  page data
764  *
765  * This function compares two ECC's and indicates if there is an error.
766  * If the error can be corrected it will be corrected to the buffer.
767  * If there is no error, %0 is returned. If there is an error but it
768  * was corrected, %1 is returned. Otherwise, %-1 is returned.
769  */
770 static int omap_compare_ecc(u8 *ecc_data1,      /* read from NAND memory */
771                             u8 *ecc_data2,      /* read from register */
772                             u8 *page_data)
773 {
774         uint    i;
775         u8      tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
776         u8      comp0_bit[8], comp1_bit[8], comp2_bit[8];
777         u8      ecc_bit[24];
778         u8      ecc_sum = 0;
779         u8      find_bit = 0;
780         uint    find_byte = 0;
781         int     isEccFF;
782
783         isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
784
785         gen_true_ecc(ecc_data1);
786         gen_true_ecc(ecc_data2);
787
788         for (i = 0; i <= 2; i++) {
789                 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
790                 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
791         }
792
793         for (i = 0; i < 8; i++) {
794                 tmp0_bit[i]     = *ecc_data1 % 2;
795                 *ecc_data1      = *ecc_data1 / 2;
796         }
797
798         for (i = 0; i < 8; i++) {
799                 tmp1_bit[i]      = *(ecc_data1 + 1) % 2;
800                 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
801         }
802
803         for (i = 0; i < 8; i++) {
804                 tmp2_bit[i]      = *(ecc_data1 + 2) % 2;
805                 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
806         }
807
808         for (i = 0; i < 8; i++) {
809                 comp0_bit[i]     = *ecc_data2 % 2;
810                 *ecc_data2       = *ecc_data2 / 2;
811         }
812
813         for (i = 0; i < 8; i++) {
814                 comp1_bit[i]     = *(ecc_data2 + 1) % 2;
815                 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
816         }
817
818         for (i = 0; i < 8; i++) {
819                 comp2_bit[i]     = *(ecc_data2 + 2) % 2;
820                 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
821         }
822
823         for (i = 0; i < 6; i++)
824                 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
825
826         for (i = 0; i < 8; i++)
827                 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
828
829         for (i = 0; i < 8; i++)
830                 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
831
832         ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
833         ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
834
835         for (i = 0; i < 24; i++)
836                 ecc_sum += ecc_bit[i];
837
838         switch (ecc_sum) {
839         case 0:
840                 /* Not reached because this function is not called if
841                  *  ECC values are equal
842                  */
843                 return 0;
844
845         case 1:
846                 /* Uncorrectable error */
847                 pr_debug("ECC UNCORRECTED_ERROR 1\n");
848                 return -1;
849
850         case 11:
851                 /* UN-Correctable error */
852                 pr_debug("ECC UNCORRECTED_ERROR B\n");
853                 return -1;
854
855         case 12:
856                 /* Correctable error */
857                 find_byte = (ecc_bit[23] << 8) +
858                             (ecc_bit[21] << 7) +
859                             (ecc_bit[19] << 6) +
860                             (ecc_bit[17] << 5) +
861                             (ecc_bit[15] << 4) +
862                             (ecc_bit[13] << 3) +
863                             (ecc_bit[11] << 2) +
864                             (ecc_bit[9]  << 1) +
865                             ecc_bit[7];
866
867                 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
868
869                 pr_debug("Correcting single bit ECC error at offset: "
870                                 "%d, bit: %d\n", find_byte, find_bit);
871
872                 page_data[find_byte] ^= (1 << find_bit);
873
874                 return 1;
875         default:
876                 if (isEccFF) {
877                         if (ecc_data2[0] == 0 &&
878                             ecc_data2[1] == 0 &&
879                             ecc_data2[2] == 0)
880                                 return 0;
881                 }
882                 pr_debug("UNCORRECTED_ERROR default\n");
883                 return -1;
884         }
885 }
886
887 /**
888  * omap_correct_data - Compares the ECC read with HW generated ECC
889  * @mtd: MTD device structure
890  * @dat: page data
891  * @read_ecc: ecc read from nand flash
892  * @calc_ecc: ecc read from HW ECC registers
893  *
894  * Compares the ecc read from nand spare area with ECC registers values
895  * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
896  * detection and correction. If there are no errors, %0 is returned. If
897  * there were errors and all of the errors were corrected, the number of
898  * corrected errors is returned. If uncorrectable errors exist, %-1 is
899  * returned.
900  */
901 static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
902                                 u_char *read_ecc, u_char *calc_ecc)
903 {
904         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
905                                                         mtd);
906         int blockCnt = 0, i = 0, ret = 0;
907         int stat = 0;
908
909         /* Ex NAND_ECC_HW12_2048 */
910         if ((info->nand.ecc.mode == NAND_ECC_HW) &&
911                         (info->nand.ecc.size  == 2048))
912                 blockCnt = 4;
913         else
914                 blockCnt = 1;
915
916         for (i = 0; i < blockCnt; i++) {
917                 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
918                         ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
919                         if (ret < 0)
920                                 return ret;
921                         /* keep track of the number of corrected errors */
922                         stat += ret;
923                 }
924                 read_ecc += 3;
925                 calc_ecc += 3;
926                 dat      += 512;
927         }
928         return stat;
929 }
930
931 /**
932  * omap_calcuate_ecc - Generate non-inverted ECC bytes.
933  * @mtd: MTD device structure
934  * @dat: The pointer to data on which ecc is computed
935  * @ecc_code: The ecc_code buffer
936  *
937  * Using noninverted ECC can be considered ugly since writing a blank
938  * page ie. padding will clear the ECC bytes. This is no problem as long
939  * nobody is trying to write data on the seemingly unused page. Reading
940  * an erased page will produce an ECC mismatch between generated and read
941  * ECC bytes that has to be dealt with separately.
942  */
943 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
944                                 u_char *ecc_code)
945 {
946         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
947                                                         mtd);
948         u32 val;
949
950         val = readl(info->reg.gpmc_ecc_config);
951         if (((val >> ECC_CONFIG_CS_SHIFT)  & ~CS_MASK) != info->gpmc_cs)
952                 return -EINVAL;
953
954         /* read ecc result */
955         val = readl(info->reg.gpmc_ecc1_result);
956         *ecc_code++ = val;          /* P128e, ..., P1e */
957         *ecc_code++ = val >> 16;    /* P128o, ..., P1o */
958         /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
959         *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
960
961         return 0;
962 }
963
964 /**
965  * omap_enable_hwecc - This function enables the hardware ecc functionality
966  * @mtd: MTD device structure
967  * @mode: Read/Write mode
968  */
969 static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
970 {
971         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
972                                                         mtd);
973         struct nand_chip *chip = mtd->priv;
974         unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
975         u32 val;
976
977         /* clear ecc and enable bits */
978         val = ECCCLEAR | ECC1;
979         writel(val, info->reg.gpmc_ecc_control);
980
981         /* program ecc and result sizes */
982         val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
983                          ECC1RESULTSIZE);
984         writel(val, info->reg.gpmc_ecc_size_config);
985
986         switch (mode) {
987         case NAND_ECC_READ:
988         case NAND_ECC_WRITE:
989                 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
990                 break;
991         case NAND_ECC_READSYN:
992                 writel(ECCCLEAR, info->reg.gpmc_ecc_control);
993                 break;
994         default:
995                 dev_info(&info->pdev->dev,
996                         "error: unrecognized Mode[%d]!\n", mode);
997                 break;
998         }
999
1000         /* (ECC 16 or 8 bit col) | ( CS  )  | ECC Enable */
1001         val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
1002         writel(val, info->reg.gpmc_ecc_config);
1003 }
1004
1005 /**
1006  * omap_wait - wait until the command is done
1007  * @mtd: MTD device structure
1008  * @chip: NAND Chip structure
1009  *
1010  * Wait function is called during Program and erase operations and
1011  * the way it is called from MTD layer, we should wait till the NAND
1012  * chip is ready after the programming/erase operation has completed.
1013  *
1014  * Erase can take up to 400ms and program up to 20ms according to
1015  * general NAND and SmartMedia specs
1016  */
1017 static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
1018 {
1019         struct nand_chip *this = mtd->priv;
1020         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1021                                                         mtd);
1022         unsigned long timeo = jiffies;
1023         int status, state = this->state;
1024
1025         if (state == FL_ERASING)
1026                 timeo += msecs_to_jiffies(400);
1027         else
1028                 timeo += msecs_to_jiffies(20);
1029
1030         writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
1031         while (time_before(jiffies, timeo)) {
1032                 status = readb(info->reg.gpmc_nand_data);
1033                 if (status & NAND_STATUS_READY)
1034                         break;
1035                 cond_resched();
1036         }
1037
1038         status = readb(info->reg.gpmc_nand_data);
1039         return status;
1040 }
1041
1042 /**
1043  * omap_dev_ready - calls the platform specific dev_ready function
1044  * @mtd: MTD device structure
1045  */
1046 static int omap_dev_ready(struct mtd_info *mtd)
1047 {
1048         unsigned int val = 0;
1049         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1050                                                         mtd);
1051
1052         val = readl(info->reg.gpmc_status);
1053
1054         if ((val & 0x100) == 0x100) {
1055                 return 1;
1056         } else {
1057                 return 0;
1058         }
1059 }
1060
1061 #ifdef CONFIG_MTD_NAND_OMAP_BCH
1062
1063 /**
1064  * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
1065  * @mtd: MTD device structure
1066  * @mode: Read/Write mode
1067  *
1068  * When using BCH, sector size is hardcoded to 512 bytes.
1069  * Using wrapping mode 6 both for reading and writing if ELM module not uses
1070  * for error correction.
1071  * On writing,
1072  * eccsize0 = 0  (no additional protected byte in spare area)
1073  * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1074  */
1075 static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1076 {
1077         int nerrors;
1078         unsigned int dev_width, nsectors;
1079         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1080                                                    mtd);
1081         struct nand_chip *chip = mtd->priv;
1082         u32 val, wr_mode;
1083         unsigned int ecc_size1, ecc_size0;
1084
1085         /* Using wrapping mode 6 for writing */
1086         wr_mode = BCH_WRAPMODE_6;
1087
1088         /*
1089          * ECC engine enabled for valid ecc_size0 nibbles
1090          * and disabled for ecc_size1 nibbles.
1091          */
1092         ecc_size0 = BCH_ECC_SIZE0;
1093         ecc_size1 = BCH_ECC_SIZE1;
1094
1095         /* Perform ecc calculation on 512-byte sector */
1096         nsectors = 1;
1097
1098         /* Update number of error correction */
1099         nerrors = info->nand.ecc.strength;
1100
1101         /* Multi sector reading/writing for NAND flash with page size < 4096 */
1102         if (info->is_elm_used && (mtd->writesize <= 4096)) {
1103                 if (mode == NAND_ECC_READ) {
1104                         /* Using wrapping mode 1 for reading */
1105                         wr_mode = BCH_WRAPMODE_1;
1106
1107                         /*
1108                          * ECC engine enabled for ecc_size0 nibbles
1109                          * and disabled for ecc_size1 nibbles.
1110                          */
1111                         ecc_size0 = (nerrors == 8) ?
1112                                 BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0;
1113                         ecc_size1 = (nerrors == 8) ?
1114                                 BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1;
1115                 }
1116
1117                 /* Perform ecc calculation for one page (< 4096) */
1118                 nsectors = info->nand.ecc.steps;
1119         }
1120
1121         writel(ECC1, info->reg.gpmc_ecc_control);
1122
1123         /* Configure ecc size for BCH */
1124         val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
1125         writel(val, info->reg.gpmc_ecc_size_config);
1126
1127         dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1128
1129         /* BCH configuration */
1130         val = ((1                        << 16) | /* enable BCH */
1131                (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
1132                (wr_mode                  <<  8) | /* wrap mode */
1133                (dev_width                <<  7) | /* bus width */
1134                (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
1135                (info->gpmc_cs            <<  1) | /* ECC CS */
1136                (0x1));                            /* enable ECC */
1137
1138         writel(val, info->reg.gpmc_ecc_config);
1139
1140         /* Clear ecc and enable bits */
1141         writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
1142 }
1143
1144 /**
1145  * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
1146  * @mtd: MTD device structure
1147  * @dat: The pointer to data on which ecc is computed
1148  * @ecc_code: The ecc_code buffer
1149  */
1150 static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
1151                                     u_char *ecc_code)
1152 {
1153         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1154                                                    mtd);
1155         unsigned long nsectors, val1, val2;
1156         int i;
1157
1158         nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1159
1160         for (i = 0; i < nsectors; i++) {
1161
1162                 /* Read hw-computed remainder */
1163                 val1 = readl(info->reg.gpmc_bch_result0[i]);
1164                 val2 = readl(info->reg.gpmc_bch_result1[i]);
1165
1166                 /*
1167                  * Add constant polynomial to remainder, in order to get an ecc
1168                  * sequence of 0xFFs for a buffer filled with 0xFFs; and
1169                  * left-justify the resulting polynomial.
1170                  */
1171                 *ecc_code++ = 0x28 ^ ((val2 >> 12) & 0xFF);
1172                 *ecc_code++ = 0x13 ^ ((val2 >>  4) & 0xFF);
1173                 *ecc_code++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
1174                 *ecc_code++ = 0x39 ^ ((val1 >> 20) & 0xFF);
1175                 *ecc_code++ = 0x96 ^ ((val1 >> 12) & 0xFF);
1176                 *ecc_code++ = 0xac ^ ((val1 >> 4) & 0xFF);
1177                 *ecc_code++ = 0x7f ^ ((val1 & 0xF) << 4);
1178         }
1179
1180         return 0;
1181 }
1182
1183 /**
1184  * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
1185  * @mtd: MTD device structure
1186  * @dat: The pointer to data on which ecc is computed
1187  * @ecc_code: The ecc_code buffer
1188  */
1189 static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
1190                                     u_char *ecc_code)
1191 {
1192         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1193                                                    mtd);
1194         unsigned long nsectors, val1, val2, val3, val4;
1195         int i;
1196
1197         nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1198
1199         for (i = 0; i < nsectors; i++) {
1200
1201                 /* Read hw-computed remainder */
1202                 val1 = readl(info->reg.gpmc_bch_result0[i]);
1203                 val2 = readl(info->reg.gpmc_bch_result1[i]);
1204                 val3 = readl(info->reg.gpmc_bch_result2[i]);
1205                 val4 = readl(info->reg.gpmc_bch_result3[i]);
1206
1207                 /*
1208                  * Add constant polynomial to remainder, in order to get an ecc
1209                  * sequence of 0xFFs for a buffer filled with 0xFFs.
1210                  */
1211                 *ecc_code++ = 0xef ^ (val4 & 0xFF);
1212                 *ecc_code++ = 0x51 ^ ((val3 >> 24) & 0xFF);
1213                 *ecc_code++ = 0x2e ^ ((val3 >> 16) & 0xFF);
1214                 *ecc_code++ = 0x09 ^ ((val3 >> 8) & 0xFF);
1215                 *ecc_code++ = 0xed ^ (val3 & 0xFF);
1216                 *ecc_code++ = 0x93 ^ ((val2 >> 24) & 0xFF);
1217                 *ecc_code++ = 0x9a ^ ((val2 >> 16) & 0xFF);
1218                 *ecc_code++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
1219                 *ecc_code++ = 0x97 ^ (val2 & 0xFF);
1220                 *ecc_code++ = 0x79 ^ ((val1 >> 24) & 0xFF);
1221                 *ecc_code++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
1222                 *ecc_code++ = 0x24 ^ ((val1 >> 8) & 0xFF);
1223                 *ecc_code++ = 0xb5 ^ (val1 & 0xFF);
1224         }
1225
1226         return 0;
1227 }
1228
1229 /**
1230  * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
1231  * @mtd:        MTD device structure
1232  * @dat:        The pointer to data on which ecc is computed
1233  * @ecc_code:   The ecc_code buffer
1234  *
1235  * Support calculating of BCH4/8 ecc vectors for the page
1236  */
1237 static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
1238                                     u_char *ecc_code)
1239 {
1240         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1241                                                    mtd);
1242         unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
1243         int i, eccbchtsel;
1244
1245         nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1246         /*
1247          * find BCH scheme used
1248          * 0 -> BCH4
1249          * 1 -> BCH8
1250          */
1251         eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3);
1252
1253         for (i = 0; i < nsectors; i++) {
1254
1255                 /* Read hw-computed remainder */
1256                 bch_val1 = readl(info->reg.gpmc_bch_result0[i]);
1257                 bch_val2 = readl(info->reg.gpmc_bch_result1[i]);
1258                 if (eccbchtsel) {
1259                         bch_val3 = readl(info->reg.gpmc_bch_result2[i]);
1260                         bch_val4 = readl(info->reg.gpmc_bch_result3[i]);
1261                 }
1262
1263                 if (eccbchtsel) {
1264                         /* BCH8 ecc scheme */
1265                         *ecc_code++ = (bch_val4 & 0xFF);
1266                         *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1267                         *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1268                         *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1269                         *ecc_code++ = (bch_val3 & 0xFF);
1270                         *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1271                         *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1272                         *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1273                         *ecc_code++ = (bch_val2 & 0xFF);
1274                         *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1275                         *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1276                         *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1277                         *ecc_code++ = (bch_val1 & 0xFF);
1278                         /*
1279                          * Setting 14th byte to zero to handle
1280                          * erased page & maintain compatibility
1281                          * with RBL
1282                          */
1283                         *ecc_code++ = 0x0;
1284                 } else {
1285                         /* BCH4 ecc scheme */
1286                         *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1287                         *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1288                         *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1289                                 ((bch_val1 >> 28) & 0xF);
1290                         *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1291                         *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1292                         *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1293                         *ecc_code++ = ((bch_val1 & 0xF) << 4);
1294                         /*
1295                          * Setting 8th byte to zero to handle
1296                          * erased page
1297                          */
1298                         *ecc_code++ = 0x0;
1299                 }
1300         }
1301
1302         return 0;
1303 }
1304
1305 /**
1306  * erased_sector_bitflips - count bit flips
1307  * @data:       data sector buffer
1308  * @oob:        oob buffer
1309  * @info:       omap_nand_info
1310  *
1311  * Check the bit flips in erased page falls below correctable level.
1312  * If falls below, report the page as erased with correctable bit
1313  * flip, else report as uncorrectable page.
1314  */
1315 static int erased_sector_bitflips(u_char *data, u_char *oob,
1316                 struct omap_nand_info *info)
1317 {
1318         int flip_bits = 0, i;
1319
1320         for (i = 0; i < info->nand.ecc.size; i++) {
1321                 flip_bits += hweight8(~data[i]);
1322                 if (flip_bits > info->nand.ecc.strength)
1323                         return 0;
1324         }
1325
1326         for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
1327                 flip_bits += hweight8(~oob[i]);
1328                 if (flip_bits > info->nand.ecc.strength)
1329                         return 0;
1330         }
1331
1332         /*
1333          * Bit flips falls in correctable level.
1334          * Fill data area with 0xFF
1335          */
1336         if (flip_bits) {
1337                 memset(data, 0xFF, info->nand.ecc.size);
1338                 memset(oob, 0xFF, info->nand.ecc.bytes);
1339         }
1340
1341         return flip_bits;
1342 }
1343
1344 /**
1345  * omap_elm_correct_data - corrects page data area in case error reported
1346  * @mtd:        MTD device structure
1347  * @data:       page data
1348  * @read_ecc:   ecc read from nand flash
1349  * @calc_ecc:   ecc read from HW ECC registers
1350  *
1351  * Calculated ecc vector reported as zero in case of non-error pages.
1352  * In case of error/erased pages non-zero error vector is reported.
1353  * In case of non-zero ecc vector, check read_ecc at fixed offset
1354  * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not.
1355  * To handle bit flips in this data, count the number of 0's in
1356  * read_ecc[x] and check if it greater than 4. If it is less, it is
1357  * programmed page, else erased page.
1358  *
1359  * 1. If page is erased, check with standard ecc vector (ecc vector
1360  * for erased page to find any bit flip). If check fails, bit flip
1361  * is present in erased page. Count the bit flips in erased page and
1362  * if it falls under correctable level, report page with 0xFF and
1363  * update the correctable bit information.
1364  * 2. If error is reported on programmed page, update elm error
1365  * vector and correct the page with ELM error correction routine.
1366  *
1367  */
1368 static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
1369                                 u_char *read_ecc, u_char *calc_ecc)
1370 {
1371         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1372                         mtd);
1373         int eccsteps = info->nand.ecc.steps;
1374         int i , j, stat = 0;
1375         int eccsize, eccflag, ecc_vector_size;
1376         struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
1377         u_char *ecc_vec = calc_ecc;
1378         u_char *spare_ecc = read_ecc;
1379         u_char *erased_ecc_vec;
1380         enum bch_ecc type;
1381         bool is_error_reported = false;
1382
1383         /* Initialize elm error vector to zero */
1384         memset(err_vec, 0, sizeof(err_vec));
1385
1386         if (info->nand.ecc.strength == BCH8_MAX_ERROR) {
1387                 type = BCH8_ECC;
1388                 erased_ecc_vec = bch8_vector;
1389         } else {
1390                 type = BCH4_ECC;
1391                 erased_ecc_vec = bch4_vector;
1392         }
1393
1394         ecc_vector_size = info->nand.ecc.bytes;
1395
1396         /*
1397          * Remove extra byte padding for BCH8 RBL
1398          * compatibility and erased page handling
1399          */
1400         eccsize = ecc_vector_size - 1;
1401
1402         for (i = 0; i < eccsteps ; i++) {
1403                 eccflag = 0;    /* initialize eccflag */
1404
1405                 /*
1406                  * Check any error reported,
1407                  * In case of error, non zero ecc reported.
1408                  */
1409
1410                 for (j = 0; (j < eccsize); j++) {
1411                         if (calc_ecc[j] != 0) {
1412                                 eccflag = 1; /* non zero ecc, error present */
1413                                 break;
1414                         }
1415                 }
1416
1417                 if (eccflag == 1) {
1418                         /*
1419                          * Set threshold to minimum of 4, half of ecc.strength/2
1420                          * to allow max bit flip in byte to 4
1421                          */
1422                         unsigned int threshold = min_t(unsigned int, 4,
1423                                         info->nand.ecc.strength / 2);
1424
1425                         /*
1426                          * Check data area is programmed by counting
1427                          * number of 0's at fixed offset in spare area.
1428                          * Checking count of 0's against threshold.
1429                          * In case programmed page expects at least threshold
1430                          * zeros in byte.
1431                          * If zeros are less than threshold for programmed page/
1432                          * zeros are more than threshold erased page, either
1433                          * case page reported as uncorrectable.
1434                          */
1435                         if (hweight8(~read_ecc[eccsize]) >= threshold) {
1436                                 /*
1437                                  * Update elm error vector as
1438                                  * data area is programmed
1439                                  */
1440                                 err_vec[i].error_reported = true;
1441                                 is_error_reported = true;
1442                         } else {
1443                                 /* Error reported in erased page */
1444                                 int bitflip_count;
1445                                 u_char *buf = &data[info->nand.ecc.size * i];
1446
1447                                 if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) {
1448                                         bitflip_count = erased_sector_bitflips(
1449                                                         buf, read_ecc, info);
1450
1451                                         if (bitflip_count)
1452                                                 stat += bitflip_count;
1453                                         else
1454                                                 return -EINVAL;
1455                                 }
1456                         }
1457                 }
1458
1459                 /* Update the ecc vector */
1460                 calc_ecc += ecc_vector_size;
1461                 read_ecc += ecc_vector_size;
1462         }
1463
1464         /* Check if any error reported */
1465         if (!is_error_reported)
1466                 return 0;
1467
1468         /* Decode BCH error using ELM module */
1469         elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
1470
1471         for (i = 0; i < eccsteps; i++) {
1472                 if (err_vec[i].error_reported) {
1473                         for (j = 0; j < err_vec[i].error_count; j++) {
1474                                 u32 bit_pos, byte_pos, error_max, pos;
1475
1476                                 if (type == BCH8_ECC)
1477                                         error_max = BCH8_ECC_MAX;
1478                                 else
1479                                         error_max = BCH4_ECC_MAX;
1480
1481                                 if (info->nand.ecc.strength == BCH8_MAX_ERROR)
1482                                         pos = err_vec[i].error_loc[j];
1483                                 else
1484                                         /* Add 4 to take care 4 bit padding */
1485                                         pos = err_vec[i].error_loc[j] +
1486                                                 BCH4_BIT_PAD;
1487
1488                                 /* Calculate bit position of error */
1489                                 bit_pos = pos % 8;
1490
1491                                 /* Calculate byte position of error */
1492                                 byte_pos = (error_max - pos - 1) / 8;
1493
1494                                 if (pos < error_max) {
1495                                         if (byte_pos < 512)
1496                                                 data[byte_pos] ^= 1 << bit_pos;
1497                                         else
1498                                                 spare_ecc[byte_pos - 512] ^=
1499                                                         1 << bit_pos;
1500                                 }
1501                                 /* else, not interested to correct ecc */
1502                         }
1503                 }
1504
1505                 /* Update number of correctable errors */
1506                 stat += err_vec[i].error_count;
1507
1508                 /* Update page data with sector size */
1509                 data += info->nand.ecc.size;
1510                 spare_ecc += ecc_vector_size;
1511         }
1512
1513         for (i = 0; i < eccsteps; i++)
1514                 /* Return error if uncorrectable error present */
1515                 if (err_vec[i].error_uncorrectable)
1516                         return -EINVAL;
1517
1518         return stat;
1519 }
1520
1521 /**
1522  * omap3_correct_data_bch - Decode received data and correct errors
1523  * @mtd: MTD device structure
1524  * @data: page data
1525  * @read_ecc: ecc read from nand flash
1526  * @calc_ecc: ecc read from HW ECC registers
1527  */
1528 static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
1529                                   u_char *read_ecc, u_char *calc_ecc)
1530 {
1531         int i, count;
1532         /* cannot correct more than 8 errors */
1533         unsigned int errloc[8];
1534         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1535                                                    mtd);
1536
1537         count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
1538                            errloc);
1539         if (count > 0) {
1540                 /* correct errors */
1541                 for (i = 0; i < count; i++) {
1542                         /* correct data only, not ecc bytes */
1543                         if (errloc[i] < 8*512)
1544                                 data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
1545                         pr_debug("corrected bitflip %u\n", errloc[i]);
1546                 }
1547         } else if (count < 0) {
1548                 pr_err("ecc unrecoverable error\n");
1549         }
1550         return count;
1551 }
1552
1553 /**
1554  * omap_write_page_bch - BCH ecc based write page function for entire page
1555  * @mtd:                mtd info structure
1556  * @chip:               nand chip info structure
1557  * @buf:                data buffer
1558  * @oob_required:       must write chip->oob_poi to OOB
1559  *
1560  * Custom write page method evolved to support multi sector writing in one shot
1561  */
1562 static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1563                                   const uint8_t *buf, int oob_required)
1564 {
1565         int i;
1566         uint8_t *ecc_calc = chip->buffers->ecccalc;
1567         uint32_t *eccpos = chip->ecc.layout->eccpos;
1568
1569         /* Enable GPMC ecc engine */
1570         chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
1571
1572         /* Write data */
1573         chip->write_buf(mtd, buf, mtd->writesize);
1574
1575         /* Update ecc vector from GPMC result registers */
1576         chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
1577
1578         for (i = 0; i < chip->ecc.total; i++)
1579                 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1580
1581         /* Write ecc vector to OOB area */
1582         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1583         return 0;
1584 }
1585
1586 /**
1587  * omap_read_page_bch - BCH ecc based page read function for entire page
1588  * @mtd:                mtd info structure
1589  * @chip:               nand chip info structure
1590  * @buf:                buffer to store read data
1591  * @oob_required:       caller requires OOB data read to chip->oob_poi
1592  * @page:               page number to read
1593  *
1594  * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
1595  * used for error correction.
1596  * Custom method evolved to support ELM error correction & multi sector
1597  * reading. On reading page data area is read along with OOB data with
1598  * ecc engine enabled. ecc vector updated after read of OOB data.
1599  * For non error pages ecc vector reported as zero.
1600  */
1601 static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1602                                 uint8_t *buf, int oob_required, int page)
1603 {
1604         uint8_t *ecc_calc = chip->buffers->ecccalc;
1605         uint8_t *ecc_code = chip->buffers->ecccode;
1606         uint32_t *eccpos = chip->ecc.layout->eccpos;
1607         uint8_t *oob = &chip->oob_poi[eccpos[0]];
1608         uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
1609         int stat;
1610         unsigned int max_bitflips = 0;
1611
1612         /* Enable GPMC ecc engine */
1613         chip->ecc.hwctl(mtd, NAND_ECC_READ);
1614
1615         /* Read data */
1616         chip->read_buf(mtd, buf, mtd->writesize);
1617
1618         /* Read oob bytes */
1619         chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
1620         chip->read_buf(mtd, oob, chip->ecc.total);
1621
1622         /* Calculate ecc bytes */
1623         chip->ecc.calculate(mtd, buf, ecc_calc);
1624
1625         memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
1626
1627         stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
1628
1629         if (stat < 0) {
1630                 mtd->ecc_stats.failed++;
1631         } else {
1632                 mtd->ecc_stats.corrected += stat;
1633                 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1634         }
1635
1636         return max_bitflips;
1637 }
1638
1639 /**
1640  * omap3_free_bch - Release BCH ecc resources
1641  * @mtd: MTD device structure
1642  */
1643 static void omap3_free_bch(struct mtd_info *mtd)
1644 {
1645         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1646                                                    mtd);
1647         if (info->bch) {
1648                 free_bch(info->bch);
1649                 info->bch = NULL;
1650         }
1651 }
1652
1653 /**
1654  * omap3_init_bch - Initialize BCH ECC
1655  * @mtd: MTD device structure
1656  * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
1657  */
1658 static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1659 {
1660         int max_errors;
1661         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1662                                                    mtd);
1663 #ifdef CONFIG_MTD_NAND_OMAP_BCH8
1664         const int hw_errors = BCH8_MAX_ERROR;
1665 #else
1666         const int hw_errors = BCH4_MAX_ERROR;
1667 #endif
1668         enum bch_ecc bch_type;
1669         const __be32 *parp;
1670         int lenp;
1671         struct device_node *elm_node;
1672
1673         info->bch = NULL;
1674
1675         max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
1676                 BCH8_MAX_ERROR : BCH4_MAX_ERROR;
1677         if (max_errors != hw_errors) {
1678                 pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
1679                        max_errors, hw_errors);
1680                 goto fail;
1681         }
1682
1683         info->nand.ecc.size = 512;
1684         info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
1685         info->nand.ecc.mode = NAND_ECC_HW;
1686         info->nand.ecc.strength = max_errors;
1687
1688         if (hw_errors == BCH8_MAX_ERROR)
1689                 bch_type = BCH8_ECC;
1690         else
1691                 bch_type = BCH4_ECC;
1692
1693         /* Detect availability of ELM module */
1694         parp = of_get_property(info->of_node, "elm_id", &lenp);
1695         if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
1696                 pr_err("Missing elm_id property, fall back to Software BCH\n");
1697                 info->is_elm_used = false;
1698         } else {
1699                 struct platform_device *pdev;
1700
1701                 elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
1702                 pdev = of_find_device_by_node(elm_node);
1703                 info->elm_dev = &pdev->dev;
1704
1705                 if (elm_config(info->elm_dev, bch_type) == 0)
1706                         info->is_elm_used = true;
1707         }
1708
1709         if (info->is_elm_used && (mtd->writesize <= 4096)) {
1710
1711                 if (hw_errors == BCH8_MAX_ERROR)
1712                         info->nand.ecc.bytes = BCH8_SIZE;
1713                 else
1714                         info->nand.ecc.bytes = BCH4_SIZE;
1715
1716                 info->nand.ecc.correct = omap_elm_correct_data;
1717                 info->nand.ecc.calculate = omap3_calculate_ecc_bch;
1718                 info->nand.ecc.read_page = omap_read_page_bch;
1719                 info->nand.ecc.write_page = omap_write_page_bch;
1720         } else {
1721                 /*
1722                  * software bch library is only used to detect and
1723                  * locate errors
1724                  */
1725                 info->bch = init_bch(13, max_errors,
1726                                 0x201b /* hw polynomial */);
1727                 if (!info->bch)
1728                         goto fail;
1729
1730                 info->nand.ecc.correct = omap3_correct_data_bch;
1731
1732                 /*
1733                  * The number of corrected errors in an ecc block that will
1734                  * trigger block scrubbing defaults to the ecc strength (4 or 8)
1735                  * Set mtd->bitflip_threshold here to define a custom threshold.
1736                  */
1737
1738                 if (max_errors == 8) {
1739                         info->nand.ecc.bytes = 13;
1740                         info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
1741                 } else {
1742                         info->nand.ecc.bytes = 7;
1743                         info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
1744                 }
1745         }
1746
1747         pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
1748         return 0;
1749 fail:
1750         omap3_free_bch(mtd);
1751         return -1;
1752 }
1753
1754 /**
1755  * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
1756  * @mtd: MTD device structure
1757  */
1758 static int omap3_init_bch_tail(struct mtd_info *mtd)
1759 {
1760         int i, steps, offset;
1761         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1762                                                    mtd);
1763         struct nand_ecclayout *layout = &info->ecclayout;
1764
1765         /* build oob layout */
1766         steps = mtd->writesize/info->nand.ecc.size;
1767         layout->eccbytes = steps*info->nand.ecc.bytes;
1768
1769         /* do not bother creating special oob layouts for small page devices */
1770         if (mtd->oobsize < 64) {
1771                 pr_err("BCH ecc is not supported on small page devices\n");
1772                 goto fail;
1773         }
1774
1775         /* reserve 2 bytes for bad block marker */
1776         if (layout->eccbytes+2 > mtd->oobsize) {
1777                 pr_err("no oob layout available for oobsize %d eccbytes %u\n",
1778                        mtd->oobsize, layout->eccbytes);
1779                 goto fail;
1780         }
1781
1782         /* ECC layout compatible with RBL for BCH8 */
1783         if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
1784                 offset = 2;
1785         else
1786                 offset = mtd->oobsize - layout->eccbytes;
1787
1788         /* put ecc bytes at oob tail */
1789         for (i = 0; i < layout->eccbytes; i++)
1790                 layout->eccpos[i] = offset + i;
1791
1792         if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
1793                 layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
1794         else
1795                 layout->oobfree[0].offset = 2;
1796
1797         layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
1798         info->nand.ecc.layout = layout;
1799
1800         if (!(info->nand.options & NAND_BUSWIDTH_16))
1801                 info->nand.badblock_pattern = &bb_descrip_flashbased;
1802         return 0;
1803 fail:
1804         omap3_free_bch(mtd);
1805         return -1;
1806 }
1807
1808 #else
1809 static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1810 {
1811         pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
1812         return -1;
1813 }
1814 static int omap3_init_bch_tail(struct mtd_info *mtd)
1815 {
1816         return -1;
1817 }
1818 static void omap3_free_bch(struct mtd_info *mtd)
1819 {
1820 }
1821 #endif /* CONFIG_MTD_NAND_OMAP_BCH */
1822
1823 static int omap_nand_probe(struct platform_device *pdev)
1824 {
1825         struct omap_nand_info           *info;
1826         struct omap_nand_platform_data  *pdata;
1827         int                             err;
1828         int                             i, offset;
1829         dma_cap_mask_t mask;
1830         unsigned sig;
1831         struct resource                 *res;
1832         struct mtd_part_parser_data     ppdata = {};
1833
1834         pdata = pdev->dev.platform_data;
1835         if (pdata == NULL) {
1836                 dev_err(&pdev->dev, "platform data missing\n");
1837                 return -ENODEV;
1838         }
1839
1840         info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
1841         if (!info)
1842                 return -ENOMEM;
1843
1844         platform_set_drvdata(pdev, info);
1845
1846         spin_lock_init(&info->controller.lock);
1847         init_waitqueue_head(&info->controller.wq);
1848
1849         info->pdev = pdev;
1850
1851         info->gpmc_cs           = pdata->cs;
1852         info->reg               = pdata->reg;
1853
1854         info->mtd.priv          = &info->nand;
1855         info->mtd.name          = dev_name(&pdev->dev);
1856         info->mtd.owner         = THIS_MODULE;
1857
1858         info->nand.options      = pdata->devsize;
1859         info->nand.options      |= NAND_SKIP_BBTSCAN;
1860 #ifdef CONFIG_MTD_NAND_OMAP_BCH
1861         info->of_node           = pdata->of_node;
1862 #endif
1863
1864         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1865         if (res == NULL) {
1866                 err = -EINVAL;
1867                 dev_err(&pdev->dev, "error getting memory resource\n");
1868                 goto out_free_info;
1869         }
1870
1871         info->phys_base = res->start;
1872         info->mem_size = resource_size(res);
1873
1874         if (!request_mem_region(info->phys_base, info->mem_size,
1875                                 pdev->dev.driver->name)) {
1876                 err = -EBUSY;
1877                 goto out_free_info;
1878         }
1879
1880         info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
1881         if (!info->nand.IO_ADDR_R) {
1882                 err = -ENOMEM;
1883                 goto out_release_mem_region;
1884         }
1885
1886         info->nand.controller = &info->controller;
1887
1888         info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
1889         info->nand.cmd_ctrl  = omap_hwcontrol;
1890
1891         /*
1892          * If RDY/BSY line is connected to OMAP then use the omap ready
1893          * function and the generic nand_wait function which reads the status
1894          * register after monitoring the RDY/BSY line. Otherwise use a standard
1895          * chip delay which is slightly more than tR (AC Timing) of the NAND
1896          * device and read status register until you get a failure or success
1897          */
1898         if (pdata->dev_ready) {
1899                 info->nand.dev_ready = omap_dev_ready;
1900                 info->nand.chip_delay = 0;
1901         } else {
1902                 info->nand.waitfunc = omap_wait;
1903                 info->nand.chip_delay = 50;
1904         }
1905
1906         switch (pdata->xfer_type) {
1907         case NAND_OMAP_PREFETCH_POLLED:
1908                 info->nand.read_buf   = omap_read_buf_pref;
1909                 info->nand.write_buf  = omap_write_buf_pref;
1910                 break;
1911
1912         case NAND_OMAP_POLLED:
1913                 if (info->nand.options & NAND_BUSWIDTH_16) {
1914                         info->nand.read_buf   = omap_read_buf16;
1915                         info->nand.write_buf  = omap_write_buf16;
1916                 } else {
1917                         info->nand.read_buf   = omap_read_buf8;
1918                         info->nand.write_buf  = omap_write_buf8;
1919                 }
1920                 break;
1921
1922         case NAND_OMAP_PREFETCH_DMA:
1923                 dma_cap_zero(mask);
1924                 dma_cap_set(DMA_SLAVE, mask);
1925                 sig = OMAP24XX_DMA_GPMC;
1926                 info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1927                 if (!info->dma) {
1928                         dev_err(&pdev->dev, "DMA engine request failed\n");
1929                         err = -ENXIO;
1930                         goto out_release_mem_region;
1931                 } else {
1932                         struct dma_slave_config cfg;
1933
1934                         memset(&cfg, 0, sizeof(cfg));
1935                         cfg.src_addr = info->phys_base;
1936                         cfg.dst_addr = info->phys_base;
1937                         cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1938                         cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1939                         cfg.src_maxburst = 16;
1940                         cfg.dst_maxburst = 16;
1941                         err = dmaengine_slave_config(info->dma, &cfg);
1942                         if (err) {
1943                                 dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
1944                                         err);
1945                                 goto out_release_mem_region;
1946                         }
1947                         info->nand.read_buf   = omap_read_buf_dma_pref;
1948                         info->nand.write_buf  = omap_write_buf_dma_pref;
1949                 }
1950                 break;
1951
1952         case NAND_OMAP_PREFETCH_IRQ:
1953                 info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
1954                 if (info->gpmc_irq_fifo <= 0) {
1955                         dev_err(&pdev->dev, "error getting fifo irq\n");
1956                         err = -ENODEV;
1957                         goto out_release_mem_region;
1958                 }
1959                 err = request_irq(info->gpmc_irq_fifo,  omap_nand_irq,
1960                                         IRQF_SHARED, "gpmc-nand-fifo", info);
1961                 if (err) {
1962                         dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1963                                                 info->gpmc_irq_fifo, err);
1964                         info->gpmc_irq_fifo = 0;
1965                         goto out_release_mem_region;
1966                 }
1967
1968                 info->gpmc_irq_count = platform_get_irq(pdev, 1);
1969                 if (info->gpmc_irq_count <= 0) {
1970                         dev_err(&pdev->dev, "error getting count irq\n");
1971                         err = -ENODEV;
1972                         goto out_release_mem_region;
1973                 }
1974                 err = request_irq(info->gpmc_irq_count, omap_nand_irq,
1975                                         IRQF_SHARED, "gpmc-nand-count", info);
1976                 if (err) {
1977                         dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1978                                                 info->gpmc_irq_count, err);
1979                         info->gpmc_irq_count = 0;
1980                         goto out_release_mem_region;
1981                 }
1982
1983                 info->nand.read_buf  = omap_read_buf_irq_pref;
1984                 info->nand.write_buf = omap_write_buf_irq_pref;
1985
1986                 break;
1987
1988         default:
1989                 dev_err(&pdev->dev,
1990                         "xfer_type(%d) not supported!\n", pdata->xfer_type);
1991                 err = -EINVAL;
1992                 goto out_release_mem_region;
1993         }
1994
1995         /* select the ecc type */
1996         if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1997                 info->nand.ecc.mode = NAND_ECC_SOFT;
1998         else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
1999                 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
2000                 info->nand.ecc.bytes            = 3;
2001                 info->nand.ecc.size             = 512;
2002                 info->nand.ecc.strength         = 1;
2003                 info->nand.ecc.calculate        = omap_calculate_ecc;
2004                 info->nand.ecc.hwctl            = omap_enable_hwecc;
2005                 info->nand.ecc.correct          = omap_correct_data;
2006                 info->nand.ecc.mode             = NAND_ECC_HW;
2007         } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
2008                    (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
2009                 err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
2010                 if (err) {
2011                         err = -EINVAL;
2012                         goto out_release_mem_region;
2013                 }
2014         }
2015
2016         /* DIP switches on some boards change between 8 and 16 bit
2017          * bus widths for flash.  Try the other width if the first try fails.
2018          */
2019         if (nand_scan_ident(&info->mtd, 1, NULL)) {
2020                 info->nand.options ^= NAND_BUSWIDTH_16;
2021                 if (nand_scan_ident(&info->mtd, 1, NULL)) {
2022                         err = -ENXIO;
2023                         goto out_release_mem_region;
2024                 }
2025         }
2026
2027         /* rom code layout */
2028         if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
2029
2030                 if (info->nand.options & NAND_BUSWIDTH_16)
2031                         offset = 2;
2032                 else {
2033                         offset = 1;
2034                         info->nand.badblock_pattern = &bb_descrip_flashbased;
2035                 }
2036                 omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
2037                 for (i = 0; i < omap_oobinfo.eccbytes; i++)
2038                         omap_oobinfo.eccpos[i] = i+offset;
2039
2040                 omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
2041                 omap_oobinfo.oobfree->length = info->mtd.oobsize -
2042                                         (offset + omap_oobinfo.eccbytes);
2043
2044                 info->nand.ecc.layout = &omap_oobinfo;
2045         } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
2046                    (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
2047                 /* build OOB layout for BCH ECC correction */
2048                 err = omap3_init_bch_tail(&info->mtd);
2049                 if (err) {
2050                         err = -EINVAL;
2051                         goto out_release_mem_region;
2052                 }
2053         }
2054
2055         /* second phase scan */
2056         if (nand_scan_tail(&info->mtd)) {
2057                 err = -ENXIO;
2058                 goto out_release_mem_region;
2059         }
2060
2061         ppdata.of_node = pdata->of_node;
2062         mtd_device_parse_register(&info->mtd, NULL, &ppdata, pdata->parts,
2063                                   pdata->nr_parts);
2064
2065         platform_set_drvdata(pdev, &info->mtd);
2066
2067         return 0;
2068
2069 out_release_mem_region:
2070         if (info->dma)
2071                 dma_release_channel(info->dma);
2072         if (info->gpmc_irq_count > 0)
2073                 free_irq(info->gpmc_irq_count, info);
2074         if (info->gpmc_irq_fifo > 0)
2075                 free_irq(info->gpmc_irq_fifo, info);
2076         release_mem_region(info->phys_base, info->mem_size);
2077 out_free_info:
2078         kfree(info);
2079
2080         return err;
2081 }
2082
2083 static int omap_nand_remove(struct platform_device *pdev)
2084 {
2085         struct mtd_info *mtd = platform_get_drvdata(pdev);
2086         struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
2087                                                         mtd);
2088         omap3_free_bch(&info->mtd);
2089
2090         platform_set_drvdata(pdev, NULL);
2091         if (info->dma)
2092                 dma_release_channel(info->dma);
2093
2094         if (info->gpmc_irq_count > 0)
2095                 free_irq(info->gpmc_irq_count, info);
2096         if (info->gpmc_irq_fifo > 0)
2097                 free_irq(info->gpmc_irq_fifo, info);
2098
2099         /* Release NAND device, its internal structures and partitions */
2100         nand_release(&info->mtd);
2101         iounmap(info->nand.IO_ADDR_R);
2102         release_mem_region(info->phys_base, info->mem_size);
2103         kfree(info);
2104         return 0;
2105 }
2106
2107 static struct platform_driver omap_nand_driver = {
2108         .probe          = omap_nand_probe,
2109         .remove         = omap_nand_remove,
2110         .driver         = {
2111                 .name   = DRIVER_NAME,
2112                 .owner  = THIS_MODULE,
2113         },
2114 };
2115
2116 module_platform_driver(omap_nand_driver);
2117
2118 MODULE_ALIAS("platform:" DRIVER_NAME);
2119 MODULE_LICENSE("GPL");
2120 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");