GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / mtd / nand / sh_flctl.c
1 /*
2  * SuperH FLCTL nand controller
3  *
4  * Copyright (c) 2008 Renesas Solutions Corp.
5  * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
6  *
7  * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/completion.h>
27 #include <linux/delay.h>
28 #include <linux/dmaengine.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/interrupt.h>
31 #include <linux/io.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/platform_device.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/sh_dma.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39
40 #include <linux/mtd/mtd.h>
41 #include <linux/mtd/rawnand.h>
42 #include <linux/mtd/partitions.h>
43 #include <linux/mtd/sh_flctl.h>
44
45 static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
46                                         struct mtd_oob_region *oobregion)
47 {
48         struct nand_chip *chip = mtd_to_nand(mtd);
49
50         if (section)
51                 return -ERANGE;
52
53         oobregion->offset = 0;
54         oobregion->length = chip->ecc.bytes;
55
56         return 0;
57 }
58
59 static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
60                                          struct mtd_oob_region *oobregion)
61 {
62         if (section)
63                 return -ERANGE;
64
65         oobregion->offset = 12;
66         oobregion->length = 4;
67
68         return 0;
69 }
70
71 static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
72         .ecc = flctl_4secc_ooblayout_sp_ecc,
73         .free = flctl_4secc_ooblayout_sp_free,
74 };
75
76 static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
77                                         struct mtd_oob_region *oobregion)
78 {
79         struct nand_chip *chip = mtd_to_nand(mtd);
80
81         if (section >= chip->ecc.steps)
82                 return -ERANGE;
83
84         oobregion->offset = (section * 16) + 6;
85         oobregion->length = chip->ecc.bytes;
86
87         return 0;
88 }
89
90 static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
91                                          struct mtd_oob_region *oobregion)
92 {
93         struct nand_chip *chip = mtd_to_nand(mtd);
94
95         if (section >= chip->ecc.steps)
96                 return -ERANGE;
97
98         oobregion->offset = section * 16;
99         oobregion->length = 6;
100
101         if (!section) {
102                 oobregion->offset += 2;
103                 oobregion->length -= 2;
104         }
105
106         return 0;
107 }
108
109 static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
110         .ecc = flctl_4secc_ooblayout_lp_ecc,
111         .free = flctl_4secc_ooblayout_lp_free,
112 };
113
114 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
115
116 static struct nand_bbt_descr flctl_4secc_smallpage = {
117         .options = NAND_BBT_SCAN2NDPAGE,
118         .offs = 11,
119         .len = 1,
120         .pattern = scan_ff_pattern,
121 };
122
123 static struct nand_bbt_descr flctl_4secc_largepage = {
124         .options = NAND_BBT_SCAN2NDPAGE,
125         .offs = 0,
126         .len = 2,
127         .pattern = scan_ff_pattern,
128 };
129
130 static void empty_fifo(struct sh_flctl *flctl)
131 {
132         writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
133         writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
134 }
135
136 static void start_translation(struct sh_flctl *flctl)
137 {
138         writeb(TRSTRT, FLTRCR(flctl));
139 }
140
141 static void timeout_error(struct sh_flctl *flctl, const char *str)
142 {
143         dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
144 }
145
146 static void wait_completion(struct sh_flctl *flctl)
147 {
148         uint32_t timeout = LOOP_TIMEOUT_MAX;
149
150         while (timeout--) {
151                 if (readb(FLTRCR(flctl)) & TREND) {
152                         writeb(0x0, FLTRCR(flctl));
153                         return;
154                 }
155                 udelay(1);
156         }
157
158         timeout_error(flctl, __func__);
159         writeb(0x0, FLTRCR(flctl));
160 }
161
162 static void flctl_dma_complete(void *param)
163 {
164         struct sh_flctl *flctl = param;
165
166         complete(&flctl->dma_complete);
167 }
168
169 static void flctl_release_dma(struct sh_flctl *flctl)
170 {
171         if (flctl->chan_fifo0_rx) {
172                 dma_release_channel(flctl->chan_fifo0_rx);
173                 flctl->chan_fifo0_rx = NULL;
174         }
175         if (flctl->chan_fifo0_tx) {
176                 dma_release_channel(flctl->chan_fifo0_tx);
177                 flctl->chan_fifo0_tx = NULL;
178         }
179 }
180
181 static void flctl_setup_dma(struct sh_flctl *flctl)
182 {
183         dma_cap_mask_t mask;
184         struct dma_slave_config cfg;
185         struct platform_device *pdev = flctl->pdev;
186         struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
187         int ret;
188
189         if (!pdata)
190                 return;
191
192         if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
193                 return;
194
195         /* We can only either use DMA for both Tx and Rx or not use it at all */
196         dma_cap_zero(mask);
197         dma_cap_set(DMA_SLAVE, mask);
198
199         flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
200                                 (void *)(uintptr_t)pdata->slave_id_fifo0_tx);
201         dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
202                 flctl->chan_fifo0_tx);
203
204         if (!flctl->chan_fifo0_tx)
205                 return;
206
207         memset(&cfg, 0, sizeof(cfg));
208         cfg.direction = DMA_MEM_TO_DEV;
209         cfg.dst_addr = flctl->fifo;
210         cfg.src_addr = 0;
211         ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
212         if (ret < 0)
213                 goto err;
214
215         flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
216                                 (void *)(uintptr_t)pdata->slave_id_fifo0_rx);
217         dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
218                 flctl->chan_fifo0_rx);
219
220         if (!flctl->chan_fifo0_rx)
221                 goto err;
222
223         cfg.direction = DMA_DEV_TO_MEM;
224         cfg.dst_addr = 0;
225         cfg.src_addr = flctl->fifo;
226         ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
227         if (ret < 0)
228                 goto err;
229
230         init_completion(&flctl->dma_complete);
231
232         return;
233
234 err:
235         flctl_release_dma(flctl);
236 }
237
238 static void set_addr(struct mtd_info *mtd, int column, int page_addr)
239 {
240         struct sh_flctl *flctl = mtd_to_flctl(mtd);
241         uint32_t addr = 0;
242
243         if (column == -1) {
244                 addr = page_addr;       /* ERASE1 */
245         } else if (page_addr != -1) {
246                 /* SEQIN, READ0, etc.. */
247                 if (flctl->chip.options & NAND_BUSWIDTH_16)
248                         column >>= 1;
249                 if (flctl->page_size) {
250                         addr = column & 0x0FFF;
251                         addr |= (page_addr & 0xff) << 16;
252                         addr |= ((page_addr >> 8) & 0xff) << 24;
253                         /* big than 128MB */
254                         if (flctl->rw_ADRCNT == ADRCNT2_E) {
255                                 uint32_t        addr2;
256                                 addr2 = (page_addr >> 16) & 0xff;
257                                 writel(addr2, FLADR2(flctl));
258                         }
259                 } else {
260                         addr = column;
261                         addr |= (page_addr & 0xff) << 8;
262                         addr |= ((page_addr >> 8) & 0xff) << 16;
263                         addr |= ((page_addr >> 16) & 0xff) << 24;
264                 }
265         }
266         writel(addr, FLADR(flctl));
267 }
268
269 static void wait_rfifo_ready(struct sh_flctl *flctl)
270 {
271         uint32_t timeout = LOOP_TIMEOUT_MAX;
272
273         while (timeout--) {
274                 uint32_t val;
275                 /* check FIFO */
276                 val = readl(FLDTCNTR(flctl)) >> 16;
277                 if (val & 0xFF)
278                         return;
279                 udelay(1);
280         }
281         timeout_error(flctl, __func__);
282 }
283
284 static void wait_wfifo_ready(struct sh_flctl *flctl)
285 {
286         uint32_t len, timeout = LOOP_TIMEOUT_MAX;
287
288         while (timeout--) {
289                 /* check FIFO */
290                 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
291                 if (len >= 4)
292                         return;
293                 udelay(1);
294         }
295         timeout_error(flctl, __func__);
296 }
297
298 static enum flctl_ecc_res_t wait_recfifo_ready
299                 (struct sh_flctl *flctl, int sector_number)
300 {
301         uint32_t timeout = LOOP_TIMEOUT_MAX;
302         void __iomem *ecc_reg[4];
303         int i;
304         int state = FL_SUCCESS;
305         uint32_t data, size;
306
307         /*
308          * First this loops checks in FLDTCNTR if we are ready to read out the
309          * oob data. This is the case if either all went fine without errors or
310          * if the bottom part of the loop corrected the errors or marked them as
311          * uncorrectable and the controller is given time to push the data into
312          * the FIFO.
313          */
314         while (timeout--) {
315                 /* check if all is ok and we can read out the OOB */
316                 size = readl(FLDTCNTR(flctl)) >> 24;
317                 if ((size & 0xFF) == 4)
318                         return state;
319
320                 /* check if a correction code has been calculated */
321                 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
322                         /*
323                          * either we wait for the fifo to be filled or a
324                          * correction pattern is being generated
325                          */
326                         udelay(1);
327                         continue;
328                 }
329
330                 /* check for an uncorrectable error */
331                 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
332                         /* check if we face a non-empty page */
333                         for (i = 0; i < 512; i++) {
334                                 if (flctl->done_buff[i] != 0xff) {
335                                         state = FL_ERROR; /* can't correct */
336                                         break;
337                                 }
338                         }
339
340                         if (state == FL_SUCCESS)
341                                 dev_dbg(&flctl->pdev->dev,
342                                 "reading empty sector %d, ecc error ignored\n",
343                                 sector_number);
344
345                         writel(0, FL4ECCCR(flctl));
346                         continue;
347                 }
348
349                 /* start error correction */
350                 ecc_reg[0] = FL4ECCRESULT0(flctl);
351                 ecc_reg[1] = FL4ECCRESULT1(flctl);
352                 ecc_reg[2] = FL4ECCRESULT2(flctl);
353                 ecc_reg[3] = FL4ECCRESULT3(flctl);
354
355                 for (i = 0; i < 3; i++) {
356                         uint8_t org;
357                         unsigned int index;
358
359                         data = readl(ecc_reg[i]);
360
361                         if (flctl->page_size)
362                                 index = (512 * sector_number) +
363                                         (data >> 16);
364                         else
365                                 index = data >> 16;
366
367                         org = flctl->done_buff[index];
368                         flctl->done_buff[index] = org ^ (data & 0xFF);
369                 }
370                 state = FL_REPAIRABLE;
371                 writel(0, FL4ECCCR(flctl));
372         }
373
374         timeout_error(flctl, __func__);
375         return FL_TIMEOUT;      /* timeout */
376 }
377
378 static void wait_wecfifo_ready(struct sh_flctl *flctl)
379 {
380         uint32_t timeout = LOOP_TIMEOUT_MAX;
381         uint32_t len;
382
383         while (timeout--) {
384                 /* check FLECFIFO */
385                 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
386                 if (len >= 4)
387                         return;
388                 udelay(1);
389         }
390         timeout_error(flctl, __func__);
391 }
392
393 static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
394                                         int len, enum dma_data_direction dir)
395 {
396         struct dma_async_tx_descriptor *desc = NULL;
397         struct dma_chan *chan;
398         enum dma_transfer_direction tr_dir;
399         dma_addr_t dma_addr;
400         dma_cookie_t cookie;
401         uint32_t reg;
402         int ret = 0;
403         unsigned long time_left;
404
405         if (dir == DMA_FROM_DEVICE) {
406                 chan = flctl->chan_fifo0_rx;
407                 tr_dir = DMA_DEV_TO_MEM;
408         } else {
409                 chan = flctl->chan_fifo0_tx;
410                 tr_dir = DMA_MEM_TO_DEV;
411         }
412
413         dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
414
415         if (!dma_mapping_error(chan->device->dev, dma_addr))
416                 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
417                         tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
418
419         if (desc) {
420                 reg = readl(FLINTDMACR(flctl));
421                 reg |= DREQ0EN;
422                 writel(reg, FLINTDMACR(flctl));
423
424                 desc->callback = flctl_dma_complete;
425                 desc->callback_param = flctl;
426                 cookie = dmaengine_submit(desc);
427                 if (dma_submit_error(cookie)) {
428                         ret = dma_submit_error(cookie);
429                         dev_warn(&flctl->pdev->dev,
430                                  "DMA submit failed, falling back to PIO\n");
431                         goto out;
432                 }
433
434                 dma_async_issue_pending(chan);
435         } else {
436                 /* DMA failed, fall back to PIO */
437                 flctl_release_dma(flctl);
438                 dev_warn(&flctl->pdev->dev,
439                          "DMA failed, falling back to PIO\n");
440                 ret = -EIO;
441                 goto out;
442         }
443
444         time_left =
445         wait_for_completion_timeout(&flctl->dma_complete,
446                                 msecs_to_jiffies(3000));
447
448         if (time_left == 0) {
449                 dmaengine_terminate_all(chan);
450                 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
451                 ret = -ETIMEDOUT;
452         }
453
454 out:
455         reg = readl(FLINTDMACR(flctl));
456         reg &= ~DREQ0EN;
457         writel(reg, FLINTDMACR(flctl));
458
459         dma_unmap_single(chan->device->dev, dma_addr, len, dir);
460
461         /* ret == 0 is success */
462         return ret;
463 }
464
465 static void read_datareg(struct sh_flctl *flctl, int offset)
466 {
467         unsigned long data;
468         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
469
470         wait_completion(flctl);
471
472         data = readl(FLDATAR(flctl));
473         *buf = le32_to_cpu(data);
474 }
475
476 static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
477 {
478         int i, len_4align;
479         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
480
481         len_4align = (rlen + 3) / 4;
482
483         /* initiate DMA transfer */
484         if (flctl->chan_fifo0_rx && rlen >= 32 &&
485                 !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
486                         goto convert;   /* DMA success */
487
488         /* do polling transfer */
489         for (i = 0; i < len_4align; i++) {
490                 wait_rfifo_ready(flctl);
491                 buf[i] = readl(FLDTFIFO(flctl));
492         }
493
494 convert:
495         for (i = 0; i < len_4align; i++)
496                 buf[i] = be32_to_cpu(buf[i]);
497 }
498
499 static enum flctl_ecc_res_t read_ecfiforeg
500                 (struct sh_flctl *flctl, uint8_t *buff, int sector)
501 {
502         int i;
503         enum flctl_ecc_res_t res;
504         unsigned long *ecc_buf = (unsigned long *)buff;
505
506         res = wait_recfifo_ready(flctl , sector);
507
508         if (res != FL_ERROR) {
509                 for (i = 0; i < 4; i++) {
510                         ecc_buf[i] = readl(FLECFIFO(flctl));
511                         ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
512                 }
513         }
514
515         return res;
516 }
517
518 static void write_fiforeg(struct sh_flctl *flctl, int rlen,
519                                                 unsigned int offset)
520 {
521         int i, len_4align;
522         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
523
524         len_4align = (rlen + 3) / 4;
525         for (i = 0; i < len_4align; i++) {
526                 wait_wfifo_ready(flctl);
527                 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
528         }
529 }
530
531 static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
532                                                 unsigned int offset)
533 {
534         int i, len_4align;
535         unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
536
537         len_4align = (rlen + 3) / 4;
538
539         for (i = 0; i < len_4align; i++)
540                 buf[i] = cpu_to_be32(buf[i]);
541
542         /* initiate DMA transfer */
543         if (flctl->chan_fifo0_tx && rlen >= 32 &&
544                 !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
545                         return; /* DMA success */
546
547         /* do polling transfer */
548         for (i = 0; i < len_4align; i++) {
549                 wait_wecfifo_ready(flctl);
550                 writel(buf[i], FLECFIFO(flctl));
551         }
552 }
553
554 static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
555 {
556         struct sh_flctl *flctl = mtd_to_flctl(mtd);
557         uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
558         uint32_t flcmdcr_val, addr_len_bytes = 0;
559
560         /* Set SNAND bit if page size is 2048byte */
561         if (flctl->page_size)
562                 flcmncr_val |= SNAND_E;
563         else
564                 flcmncr_val &= ~SNAND_E;
565
566         /* default FLCMDCR val */
567         flcmdcr_val = DOCMD1_E | DOADR_E;
568
569         /* Set for FLCMDCR */
570         switch (cmd) {
571         case NAND_CMD_ERASE1:
572                 addr_len_bytes = flctl->erase_ADRCNT;
573                 flcmdcr_val |= DOCMD2_E;
574                 break;
575         case NAND_CMD_READ0:
576         case NAND_CMD_READOOB:
577         case NAND_CMD_RNDOUT:
578                 addr_len_bytes = flctl->rw_ADRCNT;
579                 flcmdcr_val |= CDSRC_E;
580                 if (flctl->chip.options & NAND_BUSWIDTH_16)
581                         flcmncr_val |= SEL_16BIT;
582                 break;
583         case NAND_CMD_SEQIN:
584                 /* This case is that cmd is READ0 or READ1 or READ00 */
585                 flcmdcr_val &= ~DOADR_E;        /* ONLY execute 1st cmd */
586                 break;
587         case NAND_CMD_PAGEPROG:
588                 addr_len_bytes = flctl->rw_ADRCNT;
589                 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
590                 if (flctl->chip.options & NAND_BUSWIDTH_16)
591                         flcmncr_val |= SEL_16BIT;
592                 break;
593         case NAND_CMD_READID:
594                 flcmncr_val &= ~SNAND_E;
595                 flcmdcr_val |= CDSRC_E;
596                 addr_len_bytes = ADRCNT_1;
597                 break;
598         case NAND_CMD_STATUS:
599         case NAND_CMD_RESET:
600                 flcmncr_val &= ~SNAND_E;
601                 flcmdcr_val &= ~(DOADR_E | DOSR_E);
602                 break;
603         default:
604                 break;
605         }
606
607         /* Set address bytes parameter */
608         flcmdcr_val |= addr_len_bytes;
609
610         /* Now actually write */
611         writel(flcmncr_val, FLCMNCR(flctl));
612         writel(flcmdcr_val, FLCMDCR(flctl));
613         writel(flcmcdr_val, FLCMCDR(flctl));
614 }
615
616 static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
617                                 uint8_t *buf, int oob_required, int page)
618 {
619         chip->read_buf(mtd, buf, mtd->writesize);
620         if (oob_required)
621                 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
622         return 0;
623 }
624
625 static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
626                                   const uint8_t *buf, int oob_required,
627                                   int page)
628 {
629         chip->write_buf(mtd, buf, mtd->writesize);
630         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
631         return 0;
632 }
633
634 static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
635 {
636         struct sh_flctl *flctl = mtd_to_flctl(mtd);
637         int sector, page_sectors;
638         enum flctl_ecc_res_t ecc_result;
639
640         page_sectors = flctl->page_size ? 4 : 1;
641
642         set_cmd_regs(mtd, NAND_CMD_READ0,
643                 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
644
645         writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
646                  FLCMNCR(flctl));
647         writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
648         writel(page_addr << 2, FLADR(flctl));
649
650         empty_fifo(flctl);
651         start_translation(flctl);
652
653         for (sector = 0; sector < page_sectors; sector++) {
654                 read_fiforeg(flctl, 512, 512 * sector);
655
656                 ecc_result = read_ecfiforeg(flctl,
657                         &flctl->done_buff[mtd->writesize + 16 * sector],
658                         sector);
659
660                 switch (ecc_result) {
661                 case FL_REPAIRABLE:
662                         dev_info(&flctl->pdev->dev,
663                                 "applied ecc on page 0x%x", page_addr);
664                         mtd->ecc_stats.corrected++;
665                         break;
666                 case FL_ERROR:
667                         dev_warn(&flctl->pdev->dev,
668                                 "page 0x%x contains corrupted data\n",
669                                 page_addr);
670                         mtd->ecc_stats.failed++;
671                         break;
672                 default:
673                         ;
674                 }
675         }
676
677         wait_completion(flctl);
678
679         writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
680                         FLCMNCR(flctl));
681 }
682
683 static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
684 {
685         struct sh_flctl *flctl = mtd_to_flctl(mtd);
686         int page_sectors = flctl->page_size ? 4 : 1;
687         int i;
688
689         set_cmd_regs(mtd, NAND_CMD_READ0,
690                 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
691
692         empty_fifo(flctl);
693
694         for (i = 0; i < page_sectors; i++) {
695                 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
696                 writel(16, FLDTCNTR(flctl));
697
698                 start_translation(flctl);
699                 read_fiforeg(flctl, 16, 16 * i);
700                 wait_completion(flctl);
701         }
702 }
703
704 static void execmd_write_page_sector(struct mtd_info *mtd)
705 {
706         struct sh_flctl *flctl = mtd_to_flctl(mtd);
707         int page_addr = flctl->seqin_page_addr;
708         int sector, page_sectors;
709
710         page_sectors = flctl->page_size ? 4 : 1;
711
712         set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
713                         (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
714
715         empty_fifo(flctl);
716         writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
717         writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
718         writel(page_addr << 2, FLADR(flctl));
719         start_translation(flctl);
720
721         for (sector = 0; sector < page_sectors; sector++) {
722                 write_fiforeg(flctl, 512, 512 * sector);
723                 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
724         }
725
726         wait_completion(flctl);
727         writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
728 }
729
730 static void execmd_write_oob(struct mtd_info *mtd)
731 {
732         struct sh_flctl *flctl = mtd_to_flctl(mtd);
733         int page_addr = flctl->seqin_page_addr;
734         int sector, page_sectors;
735
736         page_sectors = flctl->page_size ? 4 : 1;
737
738         set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
739                         (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
740
741         for (sector = 0; sector < page_sectors; sector++) {
742                 empty_fifo(flctl);
743                 set_addr(mtd, sector * 528 + 512, page_addr);
744                 writel(16, FLDTCNTR(flctl));    /* set read size */
745
746                 start_translation(flctl);
747                 write_fiforeg(flctl, 16, 16 * sector);
748                 wait_completion(flctl);
749         }
750 }
751
752 static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
753                         int column, int page_addr)
754 {
755         struct sh_flctl *flctl = mtd_to_flctl(mtd);
756         uint32_t read_cmd = 0;
757
758         pm_runtime_get_sync(&flctl->pdev->dev);
759
760         flctl->read_bytes = 0;
761         if (command != NAND_CMD_PAGEPROG)
762                 flctl->index = 0;
763
764         switch (command) {
765         case NAND_CMD_READ1:
766         case NAND_CMD_READ0:
767                 if (flctl->hwecc) {
768                         /* read page with hwecc */
769                         execmd_read_page_sector(mtd, page_addr);
770                         break;
771                 }
772                 if (flctl->page_size)
773                         set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
774                                 | command);
775                 else
776                         set_cmd_regs(mtd, command, command);
777
778                 set_addr(mtd, 0, page_addr);
779
780                 flctl->read_bytes = mtd->writesize + mtd->oobsize;
781                 if (flctl->chip.options & NAND_BUSWIDTH_16)
782                         column >>= 1;
783                 flctl->index += column;
784                 goto read_normal_exit;
785
786         case NAND_CMD_READOOB:
787                 if (flctl->hwecc) {
788                         /* read page with hwecc */
789                         execmd_read_oob(mtd, page_addr);
790                         break;
791                 }
792
793                 if (flctl->page_size) {
794                         set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
795                                 | NAND_CMD_READ0);
796                         set_addr(mtd, mtd->writesize, page_addr);
797                 } else {
798                         set_cmd_regs(mtd, command, command);
799                         set_addr(mtd, 0, page_addr);
800                 }
801                 flctl->read_bytes = mtd->oobsize;
802                 goto read_normal_exit;
803
804         case NAND_CMD_RNDOUT:
805                 if (flctl->hwecc)
806                         break;
807
808                 if (flctl->page_size)
809                         set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
810                                 | command);
811                 else
812                         set_cmd_regs(mtd, command, command);
813
814                 set_addr(mtd, column, 0);
815
816                 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
817                 goto read_normal_exit;
818
819         case NAND_CMD_READID:
820                 set_cmd_regs(mtd, command, command);
821
822                 /* READID is always performed using an 8-bit bus */
823                 if (flctl->chip.options & NAND_BUSWIDTH_16)
824                         column <<= 1;
825                 set_addr(mtd, column, 0);
826
827                 flctl->read_bytes = 8;
828                 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
829                 empty_fifo(flctl);
830                 start_translation(flctl);
831                 read_fiforeg(flctl, flctl->read_bytes, 0);
832                 wait_completion(flctl);
833                 break;
834
835         case NAND_CMD_ERASE1:
836                 flctl->erase1_page_addr = page_addr;
837                 break;
838
839         case NAND_CMD_ERASE2:
840                 set_cmd_regs(mtd, NAND_CMD_ERASE1,
841                         (command << 8) | NAND_CMD_ERASE1);
842                 set_addr(mtd, -1, flctl->erase1_page_addr);
843                 start_translation(flctl);
844                 wait_completion(flctl);
845                 break;
846
847         case NAND_CMD_SEQIN:
848                 if (!flctl->page_size) {
849                         /* output read command */
850                         if (column >= mtd->writesize) {
851                                 column -= mtd->writesize;
852                                 read_cmd = NAND_CMD_READOOB;
853                         } else if (column < 256) {
854                                 read_cmd = NAND_CMD_READ0;
855                         } else {
856                                 column -= 256;
857                                 read_cmd = NAND_CMD_READ1;
858                         }
859                 }
860                 flctl->seqin_column = column;
861                 flctl->seqin_page_addr = page_addr;
862                 flctl->seqin_read_cmd = read_cmd;
863                 break;
864
865         case NAND_CMD_PAGEPROG:
866                 empty_fifo(flctl);
867                 if (!flctl->page_size) {
868                         set_cmd_regs(mtd, NAND_CMD_SEQIN,
869                                         flctl->seqin_read_cmd);
870                         set_addr(mtd, -1, -1);
871                         writel(0, FLDTCNTR(flctl));     /* set 0 size */
872                         start_translation(flctl);
873                         wait_completion(flctl);
874                 }
875                 if (flctl->hwecc) {
876                         /* write page with hwecc */
877                         if (flctl->seqin_column == mtd->writesize)
878                                 execmd_write_oob(mtd);
879                         else if (!flctl->seqin_column)
880                                 execmd_write_page_sector(mtd);
881                         else
882                                 printk(KERN_ERR "Invalid address !?\n");
883                         break;
884                 }
885                 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
886                 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
887                 writel(flctl->index, FLDTCNTR(flctl));  /* set write size */
888                 start_translation(flctl);
889                 write_fiforeg(flctl, flctl->index, 0);
890                 wait_completion(flctl);
891                 break;
892
893         case NAND_CMD_STATUS:
894                 set_cmd_regs(mtd, command, command);
895                 set_addr(mtd, -1, -1);
896
897                 flctl->read_bytes = 1;
898                 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
899                 start_translation(flctl);
900                 read_datareg(flctl, 0); /* read and end */
901                 break;
902
903         case NAND_CMD_RESET:
904                 set_cmd_regs(mtd, command, command);
905                 set_addr(mtd, -1, -1);
906
907                 writel(0, FLDTCNTR(flctl));     /* set 0 size */
908                 start_translation(flctl);
909                 wait_completion(flctl);
910                 break;
911
912         default:
913                 break;
914         }
915         goto runtime_exit;
916
917 read_normal_exit:
918         writel(flctl->read_bytes, FLDTCNTR(flctl));     /* set read size */
919         empty_fifo(flctl);
920         start_translation(flctl);
921         read_fiforeg(flctl, flctl->read_bytes, 0);
922         wait_completion(flctl);
923 runtime_exit:
924         pm_runtime_put_sync(&flctl->pdev->dev);
925         return;
926 }
927
928 static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
929 {
930         struct sh_flctl *flctl = mtd_to_flctl(mtd);
931         int ret;
932
933         switch (chipnr) {
934         case -1:
935                 flctl->flcmncr_base &= ~CE0_ENABLE;
936
937                 pm_runtime_get_sync(&flctl->pdev->dev);
938                 writel(flctl->flcmncr_base, FLCMNCR(flctl));
939
940                 if (flctl->qos_request) {
941                         dev_pm_qos_remove_request(&flctl->pm_qos);
942                         flctl->qos_request = 0;
943                 }
944
945                 pm_runtime_put_sync(&flctl->pdev->dev);
946                 break;
947         case 0:
948                 flctl->flcmncr_base |= CE0_ENABLE;
949
950                 if (!flctl->qos_request) {
951                         ret = dev_pm_qos_add_request(&flctl->pdev->dev,
952                                                         &flctl->pm_qos,
953                                                         DEV_PM_QOS_RESUME_LATENCY,
954                                                         100);
955                         if (ret < 0)
956                                 dev_err(&flctl->pdev->dev,
957                                         "PM QoS request failed: %d\n", ret);
958                         flctl->qos_request = 1;
959                 }
960
961                 if (flctl->holden) {
962                         pm_runtime_get_sync(&flctl->pdev->dev);
963                         writel(HOLDEN, FLHOLDCR(flctl));
964                         pm_runtime_put_sync(&flctl->pdev->dev);
965                 }
966                 break;
967         default:
968                 BUG();
969         }
970 }
971
972 static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
973 {
974         struct sh_flctl *flctl = mtd_to_flctl(mtd);
975
976         memcpy(&flctl->done_buff[flctl->index], buf, len);
977         flctl->index += len;
978 }
979
980 static uint8_t flctl_read_byte(struct mtd_info *mtd)
981 {
982         struct sh_flctl *flctl = mtd_to_flctl(mtd);
983         uint8_t data;
984
985         data = flctl->done_buff[flctl->index];
986         flctl->index++;
987         return data;
988 }
989
990 static uint16_t flctl_read_word(struct mtd_info *mtd)
991 {
992         struct sh_flctl *flctl = mtd_to_flctl(mtd);
993         uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
994
995         flctl->index += 2;
996         return *buf;
997 }
998
999 static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1000 {
1001         struct sh_flctl *flctl = mtd_to_flctl(mtd);
1002
1003         memcpy(buf, &flctl->done_buff[flctl->index], len);
1004         flctl->index += len;
1005 }
1006
1007 static int flctl_chip_init_tail(struct mtd_info *mtd)
1008 {
1009         struct sh_flctl *flctl = mtd_to_flctl(mtd);
1010         struct nand_chip *chip = &flctl->chip;
1011
1012         if (mtd->writesize == 512) {
1013                 flctl->page_size = 0;
1014                 if (chip->chipsize > (32 << 20)) {
1015                         /* big than 32MB */
1016                         flctl->rw_ADRCNT = ADRCNT_4;
1017                         flctl->erase_ADRCNT = ADRCNT_3;
1018                 } else if (chip->chipsize > (2 << 16)) {
1019                         /* big than 128KB */
1020                         flctl->rw_ADRCNT = ADRCNT_3;
1021                         flctl->erase_ADRCNT = ADRCNT_2;
1022                 } else {
1023                         flctl->rw_ADRCNT = ADRCNT_2;
1024                         flctl->erase_ADRCNT = ADRCNT_1;
1025                 }
1026         } else {
1027                 flctl->page_size = 1;
1028                 if (chip->chipsize > (128 << 20)) {
1029                         /* big than 128MB */
1030                         flctl->rw_ADRCNT = ADRCNT2_E;
1031                         flctl->erase_ADRCNT = ADRCNT_3;
1032                 } else if (chip->chipsize > (8 << 16)) {
1033                         /* big than 512KB */
1034                         flctl->rw_ADRCNT = ADRCNT_4;
1035                         flctl->erase_ADRCNT = ADRCNT_2;
1036                 } else {
1037                         flctl->rw_ADRCNT = ADRCNT_3;
1038                         flctl->erase_ADRCNT = ADRCNT_1;
1039                 }
1040         }
1041
1042         if (flctl->hwecc) {
1043                 if (mtd->writesize == 512) {
1044                         mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
1045                         chip->badblock_pattern = &flctl_4secc_smallpage;
1046                 } else {
1047                         mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
1048                         chip->badblock_pattern = &flctl_4secc_largepage;
1049                 }
1050
1051                 chip->ecc.size = 512;
1052                 chip->ecc.bytes = 10;
1053                 chip->ecc.strength = 4;
1054                 chip->ecc.read_page = flctl_read_page_hwecc;
1055                 chip->ecc.write_page = flctl_write_page_hwecc;
1056                 chip->ecc.mode = NAND_ECC_HW;
1057
1058                 /* 4 symbols ECC enabled */
1059                 flctl->flcmncr_base |= _4ECCEN;
1060         } else {
1061                 chip->ecc.mode = NAND_ECC_SOFT;
1062                 chip->ecc.algo = NAND_ECC_HAMMING;
1063         }
1064
1065         return 0;
1066 }
1067
1068 static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
1069 {
1070         struct sh_flctl *flctl = dev_id;
1071
1072         dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1073         writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1074
1075         return IRQ_HANDLED;
1076 }
1077
1078 struct flctl_soc_config {
1079         unsigned long flcmncr_val;
1080         unsigned has_hwecc:1;
1081         unsigned use_holden:1;
1082 };
1083
1084 static struct flctl_soc_config flctl_sh7372_config = {
1085         .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
1086         .has_hwecc = 1,
1087         .use_holden = 1,
1088 };
1089
1090 static const struct of_device_id of_flctl_match[] = {
1091         { .compatible = "renesas,shmobile-flctl-sh7372",
1092                                 .data = &flctl_sh7372_config },
1093         {},
1094 };
1095 MODULE_DEVICE_TABLE(of, of_flctl_match);
1096
1097 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
1098 {
1099         const struct of_device_id *match;
1100         struct flctl_soc_config *config;
1101         struct sh_flctl_platform_data *pdata;
1102
1103         match = of_match_device(of_flctl_match, dev);
1104         if (match)
1105                 config = (struct flctl_soc_config *)match->data;
1106         else {
1107                 dev_err(dev, "%s: no OF configuration attached\n", __func__);
1108                 return NULL;
1109         }
1110
1111         pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
1112                                                                 GFP_KERNEL);
1113         if (!pdata)
1114                 return NULL;
1115
1116         /* set SoC specific options */
1117         pdata->flcmncr_val = config->flcmncr_val;
1118         pdata->has_hwecc = config->has_hwecc;
1119         pdata->use_holden = config->use_holden;
1120
1121         return pdata;
1122 }
1123
1124 static int flctl_probe(struct platform_device *pdev)
1125 {
1126         struct resource *res;
1127         struct sh_flctl *flctl;
1128         struct mtd_info *flctl_mtd;
1129         struct nand_chip *nand;
1130         struct sh_flctl_platform_data *pdata;
1131         int ret;
1132         int irq;
1133
1134         flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
1135         if (!flctl)
1136                 return -ENOMEM;
1137
1138         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1139         flctl->reg = devm_ioremap_resource(&pdev->dev, res);
1140         if (IS_ERR(flctl->reg))
1141                 return PTR_ERR(flctl->reg);
1142         flctl->fifo = res->start + 0x24; /* FLDTFIFO */
1143
1144         irq = platform_get_irq(pdev, 0);
1145         if (irq < 0) {
1146                 dev_err(&pdev->dev, "failed to get flste irq data: %d\n", irq);
1147                 return irq;
1148         }
1149
1150         ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
1151                                "flste", flctl);
1152         if (ret) {
1153                 dev_err(&pdev->dev, "request interrupt failed.\n");
1154                 return ret;
1155         }
1156
1157         if (pdev->dev.of_node)
1158                 pdata = flctl_parse_dt(&pdev->dev);
1159         else
1160                 pdata = dev_get_platdata(&pdev->dev);
1161
1162         if (!pdata) {
1163                 dev_err(&pdev->dev, "no setup data defined\n");
1164                 return -EINVAL;
1165         }
1166
1167         platform_set_drvdata(pdev, flctl);
1168         nand = &flctl->chip;
1169         flctl_mtd = nand_to_mtd(nand);
1170         nand_set_flash_node(nand, pdev->dev.of_node);
1171         flctl_mtd->dev.parent = &pdev->dev;
1172         flctl->pdev = pdev;
1173         flctl->hwecc = pdata->has_hwecc;
1174         flctl->holden = pdata->use_holden;
1175         flctl->flcmncr_base = pdata->flcmncr_val;
1176         flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
1177
1178         /* Set address of hardware control function */
1179         /* 20 us command delay time */
1180         nand->chip_delay = 20;
1181
1182         nand->read_byte = flctl_read_byte;
1183         nand->read_word = flctl_read_word;
1184         nand->write_buf = flctl_write_buf;
1185         nand->read_buf = flctl_read_buf;
1186         nand->select_chip = flctl_select_chip;
1187         nand->cmdfunc = flctl_cmdfunc;
1188         nand->onfi_set_features = nand_onfi_get_set_features_notsupp;
1189         nand->onfi_get_features = nand_onfi_get_set_features_notsupp;
1190
1191         if (pdata->flcmncr_val & SEL_16BIT)
1192                 nand->options |= NAND_BUSWIDTH_16;
1193
1194         pm_runtime_enable(&pdev->dev);
1195         pm_runtime_resume(&pdev->dev);
1196
1197         flctl_setup_dma(flctl);
1198
1199         ret = nand_scan_ident(flctl_mtd, 1, NULL);
1200         if (ret)
1201                 goto err_chip;
1202
1203         if (nand->options & NAND_BUSWIDTH_16) {
1204                 /*
1205                  * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
1206                  * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign
1207                  * flctl->flcmncr_base to pdata->flcmncr_val.
1208                  */
1209                 pdata->flcmncr_val |= SEL_16BIT;
1210                 flctl->flcmncr_base = pdata->flcmncr_val;
1211         }
1212
1213         ret = flctl_chip_init_tail(flctl_mtd);
1214         if (ret)
1215                 goto err_chip;
1216
1217         ret = nand_scan_tail(flctl_mtd);
1218         if (ret)
1219                 goto err_chip;
1220
1221         ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
1222
1223         return 0;
1224
1225 err_chip:
1226         flctl_release_dma(flctl);
1227         pm_runtime_disable(&pdev->dev);
1228         return ret;
1229 }
1230
1231 static int flctl_remove(struct platform_device *pdev)
1232 {
1233         struct sh_flctl *flctl = platform_get_drvdata(pdev);
1234
1235         flctl_release_dma(flctl);
1236         nand_release(&flctl->chip);
1237         pm_runtime_disable(&pdev->dev);
1238
1239         return 0;
1240 }
1241
1242 static struct platform_driver flctl_driver = {
1243         .remove         = flctl_remove,
1244         .driver = {
1245                 .name   = "sh_flctl",
1246                 .of_match_table = of_match_ptr(of_flctl_match),
1247         },
1248 };
1249
1250 module_platform_driver_probe(flctl_driver, flctl_probe);
1251
1252 MODULE_LICENSE("GPL");
1253 MODULE_AUTHOR("Yoshihiro Shimoda");
1254 MODULE_DESCRIPTION("SuperH FLCTL driver");
1255 MODULE_ALIAS("platform:sh_flctl");