GNU Linux-libre 4.4.288-gnu1
[releases.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 #define M28F00AP30      0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56 /* Sharp chips */
57 #define LH28F640BFHE_PTTL90     0x00b0
58 #define LH28F640BFHE_PBTL90     0x00b1
59 #define LH28F640BFHE_PTTL70A    0x00b2
60 #define LH28F640BFHE_PBTL70A    0x00b3
61
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
71                                   uint64_t len);
72 #ifdef CONFIG_MTD_OTP
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78                                            size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80                                            size_t *, struct otp_info *);
81 #endif
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
85
86 static void cfi_intelext_destroy(struct mtd_info *);
87
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
89
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
92
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94                      size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
96
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101
102
103
104 /*
105  *  *********** SETUP AND PROBE BITS  ***********
106  */
107
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109         .probe          = NULL, /* Not usable directly */
110         .destroy        = cfi_intelext_destroy,
111         .name           = "cfi_cmdset_0001",
112         .module         = THIS_MODULE
113 };
114
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
117
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
120 {
121         int i;
122         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
124         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135         for (i=11; i<32; i++) {
136                 if (extp->FeatureSupport & (1<<i))
137                         printk("     - Unknown Bit %X:      supported\n", i);
138         }
139
140         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142         for (i=1; i<8; i++) {
143                 if (extp->SuspendCmdSupport & (1<<i))
144                         printk("     - Unknown Bit %X:               supported\n", i);
145         }
146
147         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150         for (i=2; i<3; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156         for (i=6; i<16; i++) {
157                 if (extp->BlkStatusRegMask & (1<<i))
158                         printk("     - Unknown Bit %X Active: yes\n",i);
159         }
160
161         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163         if (extp->VppOptimal)
164                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
166 }
167 #endif
168
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
171 {
172         struct map_info *map = mtd->priv;
173         struct cfi_private *cfi = map->fldrv_priv;
174         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175         struct cfi_pri_atmel atmel_pri;
176         uint32_t features = 0;
177
178         /* Reverse byteswapping */
179         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
182
183         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
185
186         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
187
188         if (atmel_pri.Features & 0x01) /* chip erase supported */
189                 features |= (1<<0);
190         if (atmel_pri.Features & 0x02) /* erase suspend supported */
191                 features |= (1<<1);
192         if (atmel_pri.Features & 0x04) /* program suspend supported */
193                 features |= (1<<2);
194         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
195                 features |= (1<<9);
196         if (atmel_pri.Features & 0x20) /* page mode read supported */
197                 features |= (1<<7);
198         if (atmel_pri.Features & 0x40) /* queued erase supported */
199                 features |= (1<<4);
200         if (atmel_pri.Features & 0x80) /* Protection bits supported */
201                 features |= (1<<6);
202
203         extp->FeatureSupport = features;
204
205         /* burst write mode not supported */
206         cfi->cfiq->BufWriteTimeoutTyp = 0;
207         cfi->cfiq->BufWriteTimeoutMax = 0;
208 }
209
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
211 {
212         struct map_info *map = mtd->priv;
213         struct cfi_private *cfi = map->fldrv_priv;
214         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
215
216         cfip->FeatureSupport |= (1 << 5);
217         mtd->flags |= MTD_POWERUP_LOCK;
218 }
219
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
223 {
224         struct map_info *map = mtd->priv;
225         struct cfi_private *cfi = map->fldrv_priv;
226         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
227
228         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229                             "erase on write disabled.\n");
230         extp->SuspendCmdSupport &= ~1;
231 }
232 #endif
233
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
236 {
237         struct map_info *map = mtd->priv;
238         struct cfi_private *cfi = map->fldrv_priv;
239         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
240
241         if (cfip && (cfip->FeatureSupport&4)) {
242                 cfip->FeatureSupport &= ~4;
243                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
244         }
245 }
246 #endif
247
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
249 {
250         struct map_info *map = mtd->priv;
251         struct cfi_private *cfi = map->fldrv_priv;
252
253         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
254         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
255 }
256
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261
262         /* Note this is done after the region info is endian swapped */
263         cfi->cfiq->EraseRegionInfo[1] =
264                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
265 };
266
267 static int is_LH28F640BF(struct cfi_private *cfi)
268 {
269         /* Sharp LH28F640BF Family */
270         if (cfi->mfr == CFI_MFR_SHARP && (
271             cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272             cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
273                 return 1;
274         return 0;
275 }
276
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
278 {
279         struct map_info *map = mtd->priv;
280         struct cfi_private *cfi = map->fldrv_priv;
281         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
282
283         /* Reset the Partition Configuration Register on LH28F640BF
284          * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285         if (is_LH28F640BF(cfi)) {
286                 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287                 map_write(map, CMD(0x60), 0);
288                 map_write(map, CMD(0x04), 0);
289
290                 /* We have set one single partition thus
291                  * Simultaneous Operations are not allowed */
292                 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293                 extp->FeatureSupport &= ~512;
294         }
295 }
296
297 static void fixup_use_point(struct mtd_info *mtd)
298 {
299         struct map_info *map = mtd->priv;
300         if (!mtd->_point && map_is_linear(map)) {
301                 mtd->_point   = cfi_intelext_point;
302                 mtd->_unpoint = cfi_intelext_unpoint;
303         }
304 }
305
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
307 {
308         struct map_info *map = mtd->priv;
309         struct cfi_private *cfi = map->fldrv_priv;
310         if (cfi->cfiq->BufWriteTimeoutTyp) {
311                 printk(KERN_INFO "Using buffer write method\n" );
312                 mtd->_write = cfi_intelext_write_buffers;
313                 mtd->_writev = cfi_intelext_writev;
314         }
315 }
316
317 /*
318  * Some chips power-up with all sectors locked by default.
319  */
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
321 {
322         struct map_info *map = mtd->priv;
323         struct cfi_private *cfi = map->fldrv_priv;
324         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
325
326         if (cfip->FeatureSupport&32) {
327                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328                 mtd->flags |= MTD_POWERUP_LOCK;
329         }
330 }
331
332 static struct cfi_fixup cfi_fixup_table[] = {
333         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
338 #endif
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
341 #endif
342 #if !FORCE_WORD_WRITE
343         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
344 #endif
345         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
350         { 0, 0, NULL }
351 };
352
353 static struct cfi_fixup jedec_fixup_table[] = {
354         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
355         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
356         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
357         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
358         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
359         { 0, 0, NULL }
360 };
361 static struct cfi_fixup fixup_table[] = {
362         /* The CFI vendor ids and the JEDEC vendor IDs appear
363          * to be common.  It is like the devices id's are as
364          * well.  This table is to pick all cases where
365          * we know that is the case.
366          */
367         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
368         { 0, 0, NULL }
369 };
370
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372                                                 struct cfi_pri_intelext *extp)
373 {
374         if (cfi->mfr == CFI_MFR_INTEL &&
375                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
376                 extp->MinorVersion = '1';
377 }
378
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380 {
381         /*
382          * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383          * Erase Supend for their small Erase Blocks(0x8000)
384          */
385         if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386                 return 1;
387         return 0;
388 }
389
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
392 {
393         struct cfi_private *cfi = map->fldrv_priv;
394         struct cfi_pri_intelext *extp;
395         unsigned int extra_size = 0;
396         unsigned int extp_size = sizeof(*extp);
397
398  again:
399         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
400         if (!extp)
401                 return NULL;
402
403         cfi_fixup_major_minor(cfi, extp);
404
405         if (extp->MajorVersion != '1' ||
406             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
408                        "version %c.%c.\n",  extp->MajorVersion,
409                        extp->MinorVersion);
410                 kfree(extp);
411                 return NULL;
412         }
413
414         /* Do some byteswapping if necessary */
415         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
418
419         if (extp->MinorVersion >= '0') {
420                 extra_size = 0;
421
422                 /* Protection Register info */
423                 extra_size += (extp->NumProtectionFields - 1) *
424                               sizeof(struct cfi_intelext_otpinfo);
425         }
426
427         if (extp->MinorVersion >= '1') {
428                 /* Burst Read info */
429                 extra_size += 2;
430                 if (extp_size < sizeof(*extp) + extra_size)
431                         goto need_more;
432                 extra_size += extp->extra[extra_size - 1];
433         }
434
435         if (extp->MinorVersion >= '3') {
436                 int nb_parts, i;
437
438                 /* Number of hardware-partitions */
439                 extra_size += 1;
440                 if (extp_size < sizeof(*extp) + extra_size)
441                         goto need_more;
442                 nb_parts = extp->extra[extra_size - 1];
443
444                 /* skip the sizeof(partregion) field in CFI 1.4 */
445                 if (extp->MinorVersion >= '4')
446                         extra_size += 2;
447
448                 for (i = 0; i < nb_parts; i++) {
449                         struct cfi_intelext_regioninfo *rinfo;
450                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
451                         extra_size += sizeof(*rinfo);
452                         if (extp_size < sizeof(*extp) + extra_size)
453                                 goto need_more;
454                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
455                         extra_size += (rinfo->NumBlockTypes - 1)
456                                       * sizeof(struct cfi_intelext_blockinfo);
457                 }
458
459                 if (extp->MinorVersion >= '4')
460                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
461
462                 if (extp_size < sizeof(*extp) + extra_size) {
463                         need_more:
464                         extp_size = sizeof(*extp) + extra_size;
465                         kfree(extp);
466                         if (extp_size > 4096) {
467                                 printk(KERN_ERR
468                                         "%s: cfi_pri_intelext is too fat\n",
469                                         __func__);
470                                 return NULL;
471                         }
472                         goto again;
473                 }
474         }
475
476         return extp;
477 }
478
479 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
480 {
481         struct cfi_private *cfi = map->fldrv_priv;
482         struct mtd_info *mtd;
483         int i;
484
485         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
486         if (!mtd)
487                 return NULL;
488         mtd->priv = map;
489         mtd->type = MTD_NORFLASH;
490
491         /* Fill in the default mtd operations */
492         mtd->_erase   = cfi_intelext_erase_varsize;
493         mtd->_read    = cfi_intelext_read;
494         mtd->_write   = cfi_intelext_write_words;
495         mtd->_sync    = cfi_intelext_sync;
496         mtd->_lock    = cfi_intelext_lock;
497         mtd->_unlock  = cfi_intelext_unlock;
498         mtd->_is_locked = cfi_intelext_is_locked;
499         mtd->_suspend = cfi_intelext_suspend;
500         mtd->_resume  = cfi_intelext_resume;
501         mtd->flags   = MTD_CAP_NORFLASH;
502         mtd->name    = map->name;
503         mtd->writesize = 1;
504         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
505
506         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
507
508         if (cfi->cfi_mode == CFI_MODE_CFI) {
509                 /*
510                  * It's a real CFI chip, not one for which the probe
511                  * routine faked a CFI structure. So we read the feature
512                  * table from it.
513                  */
514                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
515                 struct cfi_pri_intelext *extp;
516
517                 extp = read_pri_intelext(map, adr);
518                 if (!extp) {
519                         kfree(mtd);
520                         return NULL;
521                 }
522
523                 /* Install our own private info structure */
524                 cfi->cmdset_priv = extp;
525
526                 cfi_fixup(mtd, cfi_fixup_table);
527
528 #ifdef DEBUG_CFI_FEATURES
529                 /* Tell the user about it in lots of lovely detail */
530                 cfi_tell_features(extp);
531 #endif
532
533                 if(extp->SuspendCmdSupport & 1) {
534                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
535                 }
536         }
537         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
538                 /* Apply jedec specific fixups */
539                 cfi_fixup(mtd, jedec_fixup_table);
540         }
541         /* Apply generic fixups */
542         cfi_fixup(mtd, fixup_table);
543
544         for (i=0; i< cfi->numchips; i++) {
545                 if (cfi->cfiq->WordWriteTimeoutTyp)
546                         cfi->chips[i].word_write_time =
547                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
548                 else
549                         cfi->chips[i].word_write_time = 50000;
550
551                 if (cfi->cfiq->BufWriteTimeoutTyp)
552                         cfi->chips[i].buffer_write_time =
553                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
554                 /* No default; if it isn't specified, we won't use it */
555
556                 if (cfi->cfiq->BlockEraseTimeoutTyp)
557                         cfi->chips[i].erase_time =
558                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
559                 else
560                         cfi->chips[i].erase_time = 2000000;
561
562                 if (cfi->cfiq->WordWriteTimeoutTyp &&
563                     cfi->cfiq->WordWriteTimeoutMax)
564                         cfi->chips[i].word_write_time_max =
565                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
566                                     cfi->cfiq->WordWriteTimeoutMax);
567                 else
568                         cfi->chips[i].word_write_time_max = 50000 * 8;
569
570                 if (cfi->cfiq->BufWriteTimeoutTyp &&
571                     cfi->cfiq->BufWriteTimeoutMax)
572                         cfi->chips[i].buffer_write_time_max =
573                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
574                                     cfi->cfiq->BufWriteTimeoutMax);
575
576                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
577                     cfi->cfiq->BlockEraseTimeoutMax)
578                         cfi->chips[i].erase_time_max =
579                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
580                                        cfi->cfiq->BlockEraseTimeoutMax);
581                 else
582                         cfi->chips[i].erase_time_max = 2000000 * 8;
583
584                 cfi->chips[i].ref_point_counter = 0;
585                 init_waitqueue_head(&(cfi->chips[i].wq));
586         }
587
588         map->fldrv = &cfi_intelext_chipdrv;
589
590         return cfi_intelext_setup(mtd);
591 }
592 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
593 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
594 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
595 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
597
598 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
599 {
600         struct map_info *map = mtd->priv;
601         struct cfi_private *cfi = map->fldrv_priv;
602         unsigned long offset = 0;
603         int i,j;
604         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
605
606         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
607
608         mtd->size = devsize * cfi->numchips;
609
610         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
611         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
612                         * mtd->numeraseregions, GFP_KERNEL);
613         if (!mtd->eraseregions)
614                 goto setup_err;
615
616         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
617                 unsigned long ernum, ersize;
618                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
619                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
620
621                 if (mtd->erasesize < ersize) {
622                         mtd->erasesize = ersize;
623                 }
624                 for (j=0; j<cfi->numchips; j++) {
625                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
626                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
627                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
628                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
629                 }
630                 offset += (ersize * ernum);
631         }
632
633         if (offset != devsize) {
634                 /* Argh */
635                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
636                 goto setup_err;
637         }
638
639         for (i=0; i<mtd->numeraseregions;i++){
640                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
641                        i,(unsigned long long)mtd->eraseregions[i].offset,
642                        mtd->eraseregions[i].erasesize,
643                        mtd->eraseregions[i].numblocks);
644         }
645
646 #ifdef CONFIG_MTD_OTP
647         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
648         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
649         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
650         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
651         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
652         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
653 #endif
654
655         /* This function has the potential to distort the reality
656            a bit and therefore should be called last. */
657         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
658                 goto setup_err;
659
660         __module_get(THIS_MODULE);
661         register_reboot_notifier(&mtd->reboot_notifier);
662         return mtd;
663
664  setup_err:
665         kfree(mtd->eraseregions);
666         kfree(mtd);
667         kfree(cfi->cmdset_priv);
668         return NULL;
669 }
670
671 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
672                                         struct cfi_private **pcfi)
673 {
674         struct map_info *map = mtd->priv;
675         struct cfi_private *cfi = *pcfi;
676         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
677
678         /*
679          * Probing of multi-partition flash chips.
680          *
681          * To support multiple partitions when available, we simply arrange
682          * for each of them to have their own flchip structure even if they
683          * are on the same physical chip.  This means completely recreating
684          * a new cfi_private structure right here which is a blatent code
685          * layering violation, but this is still the least intrusive
686          * arrangement at this point. This can be rearranged in the future
687          * if someone feels motivated enough.  --nico
688          */
689         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
690             && extp->FeatureSupport & (1 << 9)) {
691                 struct cfi_private *newcfi;
692                 struct flchip *chip;
693                 struct flchip_shared *shared;
694                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
695
696                 /* Protection Register info */
697                 offs = (extp->NumProtectionFields - 1) *
698                        sizeof(struct cfi_intelext_otpinfo);
699
700                 /* Burst Read info */
701                 offs += extp->extra[offs+1]+2;
702
703                 /* Number of partition regions */
704                 numregions = extp->extra[offs];
705                 offs += 1;
706
707                 /* skip the sizeof(partregion) field in CFI 1.4 */
708                 if (extp->MinorVersion >= '4')
709                         offs += 2;
710
711                 /* Number of hardware partitions */
712                 numparts = 0;
713                 for (i = 0; i < numregions; i++) {
714                         struct cfi_intelext_regioninfo *rinfo;
715                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
716                         numparts += rinfo->NumIdentPartitions;
717                         offs += sizeof(*rinfo)
718                                 + (rinfo->NumBlockTypes - 1) *
719                                   sizeof(struct cfi_intelext_blockinfo);
720                 }
721
722                 if (!numparts)
723                         numparts = 1;
724
725                 /* Programming Region info */
726                 if (extp->MinorVersion >= '4') {
727                         struct cfi_intelext_programming_regioninfo *prinfo;
728                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
729                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
730                         mtd->flags &= ~MTD_BIT_WRITEABLE;
731                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
732                                map->name, mtd->writesize,
733                                cfi->interleave * prinfo->ControlValid,
734                                cfi->interleave * prinfo->ControlInvalid);
735                 }
736
737                 /*
738                  * All functions below currently rely on all chips having
739                  * the same geometry so we'll just assume that all hardware
740                  * partitions are of the same size too.
741                  */
742                 partshift = cfi->chipshift - __ffs(numparts);
743
744                 if ((1 << partshift) < mtd->erasesize) {
745                         printk( KERN_ERR
746                                 "%s: bad number of hw partitions (%d)\n",
747                                 __func__, numparts);
748                         return -EINVAL;
749                 }
750
751                 numvirtchips = cfi->numchips * numparts;
752                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
753                 if (!newcfi)
754                         return -ENOMEM;
755                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
756                 if (!shared) {
757                         kfree(newcfi);
758                         return -ENOMEM;
759                 }
760                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
761                 newcfi->numchips = numvirtchips;
762                 newcfi->chipshift = partshift;
763
764                 chip = &newcfi->chips[0];
765                 for (i = 0; i < cfi->numchips; i++) {
766                         shared[i].writing = shared[i].erasing = NULL;
767                         mutex_init(&shared[i].lock);
768                         for (j = 0; j < numparts; j++) {
769                                 *chip = cfi->chips[i];
770                                 chip->start += j << partshift;
771                                 chip->priv = &shared[i];
772                                 /* those should be reset too since
773                                    they create memory references. */
774                                 init_waitqueue_head(&chip->wq);
775                                 mutex_init(&chip->mutex);
776                                 chip++;
777                         }
778                 }
779
780                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
781                                   "--> %d partitions of %d KiB\n",
782                                   map->name, cfi->numchips, cfi->interleave,
783                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
784
785                 map->fldrv_priv = newcfi;
786                 *pcfi = newcfi;
787                 kfree(cfi);
788         }
789
790         return 0;
791 }
792
793 /*
794  *  *********** CHIP ACCESS FUNCTIONS ***********
795  */
796 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
797 {
798         DECLARE_WAITQUEUE(wait, current);
799         struct cfi_private *cfi = map->fldrv_priv;
800         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
801         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
802         unsigned long timeo = jiffies + HZ;
803
804         /* Prevent setting state FL_SYNCING for chip in suspended state. */
805         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
806                 goto sleep;
807
808         switch (chip->state) {
809
810         case FL_STATUS:
811                 for (;;) {
812                         status = map_read(map, adr);
813                         if (map_word_andequal(map, status, status_OK, status_OK))
814                                 break;
815
816                         /* At this point we're fine with write operations
817                            in other partitions as they don't conflict. */
818                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
819                                 break;
820
821                         mutex_unlock(&chip->mutex);
822                         cfi_udelay(1);
823                         mutex_lock(&chip->mutex);
824                         /* Someone else might have been playing with it. */
825                         return -EAGAIN;
826                 }
827                 /* Fall through */
828         case FL_READY:
829         case FL_CFI_QUERY:
830         case FL_JEDEC_QUERY:
831                 return 0;
832
833         case FL_ERASING:
834                 if (!cfip ||
835                     !(cfip->FeatureSupport & 2) ||
836                     !(mode == FL_READY || mode == FL_POINT ||
837                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
838                         goto sleep;
839
840                 /* Do not allow suspend iff read/write to EB address */
841                 if ((adr & chip->in_progress_block_mask) ==
842                     chip->in_progress_block_addr)
843                         goto sleep;
844
845                 /* do not suspend small EBs, buggy Micron Chips */
846                 if (cfi_is_micron_28F00AP30(cfi, chip) &&
847                     (chip->in_progress_block_mask == ~(0x8000-1)))
848                         goto sleep;
849
850                 /* Erase suspend */
851                 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
852
853                 /* If the flash has finished erasing, then 'erase suspend'
854                  * appears to make some (28F320) flash devices switch to
855                  * 'read' mode.  Make sure that we switch to 'read status'
856                  * mode so we get the right data. --rmk
857                  */
858                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
859                 chip->oldstate = FL_ERASING;
860                 chip->state = FL_ERASE_SUSPENDING;
861                 chip->erase_suspended = 1;
862                 for (;;) {
863                         status = map_read(map, chip->in_progress_block_addr);
864                         if (map_word_andequal(map, status, status_OK, status_OK))
865                                 break;
866
867                         if (time_after(jiffies, timeo)) {
868                                 /* Urgh. Resume and pretend we weren't here.
869                                  * Make sure we're in 'read status' mode if it had finished */
870                                 put_chip(map, chip, adr);
871                                 printk(KERN_ERR "%s: Chip not ready after erase "
872                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
873                                 return -EIO;
874                         }
875
876                         mutex_unlock(&chip->mutex);
877                         cfi_udelay(1);
878                         mutex_lock(&chip->mutex);
879                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
880                            So we can just loop here. */
881                 }
882                 chip->state = FL_STATUS;
883                 return 0;
884
885         case FL_XIP_WHILE_ERASING:
886                 if (mode != FL_READY && mode != FL_POINT &&
887                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
888                         goto sleep;
889                 chip->oldstate = chip->state;
890                 chip->state = FL_READY;
891                 return 0;
892
893         case FL_SHUTDOWN:
894                 /* The machine is rebooting now,so no one can get chip anymore */
895                 return -EIO;
896         case FL_POINT:
897                 /* Only if there's no operation suspended... */
898                 if (mode == FL_READY && chip->oldstate == FL_READY)
899                         return 0;
900                 /* Fall through */
901         default:
902         sleep:
903                 set_current_state(TASK_UNINTERRUPTIBLE);
904                 add_wait_queue(&chip->wq, &wait);
905                 mutex_unlock(&chip->mutex);
906                 schedule();
907                 remove_wait_queue(&chip->wq, &wait);
908                 mutex_lock(&chip->mutex);
909                 return -EAGAIN;
910         }
911 }
912
913 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
914 {
915         int ret;
916         DECLARE_WAITQUEUE(wait, current);
917
918  retry:
919         if (chip->priv &&
920             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
921             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
922                 /*
923                  * OK. We have possibility for contention on the write/erase
924                  * operations which are global to the real chip and not per
925                  * partition.  So let's fight it over in the partition which
926                  * currently has authority on the operation.
927                  *
928                  * The rules are as follows:
929                  *
930                  * - any write operation must own shared->writing.
931                  *
932                  * - any erase operation must own _both_ shared->writing and
933                  *   shared->erasing.
934                  *
935                  * - contention arbitration is handled in the owner's context.
936                  *
937                  * The 'shared' struct can be read and/or written only when
938                  * its lock is taken.
939                  */
940                 struct flchip_shared *shared = chip->priv;
941                 struct flchip *contender;
942                 mutex_lock(&shared->lock);
943                 contender = shared->writing;
944                 if (contender && contender != chip) {
945                         /*
946                          * The engine to perform desired operation on this
947                          * partition is already in use by someone else.
948                          * Let's fight over it in the context of the chip
949                          * currently using it.  If it is possible to suspend,
950                          * that other partition will do just that, otherwise
951                          * it'll happily send us to sleep.  In any case, when
952                          * get_chip returns success we're clear to go ahead.
953                          */
954                         ret = mutex_trylock(&contender->mutex);
955                         mutex_unlock(&shared->lock);
956                         if (!ret)
957                                 goto retry;
958                         mutex_unlock(&chip->mutex);
959                         ret = chip_ready(map, contender, contender->start, mode);
960                         mutex_lock(&chip->mutex);
961
962                         if (ret == -EAGAIN) {
963                                 mutex_unlock(&contender->mutex);
964                                 goto retry;
965                         }
966                         if (ret) {
967                                 mutex_unlock(&contender->mutex);
968                                 return ret;
969                         }
970                         mutex_lock(&shared->lock);
971
972                         /* We should not own chip if it is already
973                          * in FL_SYNCING state. Put contender and retry. */
974                         if (chip->state == FL_SYNCING) {
975                                 put_chip(map, contender, contender->start);
976                                 mutex_unlock(&contender->mutex);
977                                 goto retry;
978                         }
979                         mutex_unlock(&contender->mutex);
980                 }
981
982                 /* Check if we already have suspended erase
983                  * on this chip. Sleep. */
984                 if (mode == FL_ERASING && shared->erasing
985                     && shared->erasing->oldstate == FL_ERASING) {
986                         mutex_unlock(&shared->lock);
987                         set_current_state(TASK_UNINTERRUPTIBLE);
988                         add_wait_queue(&chip->wq, &wait);
989                         mutex_unlock(&chip->mutex);
990                         schedule();
991                         remove_wait_queue(&chip->wq, &wait);
992                         mutex_lock(&chip->mutex);
993                         goto retry;
994                 }
995
996                 /* We now own it */
997                 shared->writing = chip;
998                 if (mode == FL_ERASING)
999                         shared->erasing = chip;
1000                 mutex_unlock(&shared->lock);
1001         }
1002         ret = chip_ready(map, chip, adr, mode);
1003         if (ret == -EAGAIN)
1004                 goto retry;
1005
1006         return ret;
1007 }
1008
1009 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1010 {
1011         struct cfi_private *cfi = map->fldrv_priv;
1012
1013         if (chip->priv) {
1014                 struct flchip_shared *shared = chip->priv;
1015                 mutex_lock(&shared->lock);
1016                 if (shared->writing == chip && chip->oldstate == FL_READY) {
1017                         /* We own the ability to write, but we're done */
1018                         shared->writing = shared->erasing;
1019                         if (shared->writing && shared->writing != chip) {
1020                                 /* give back ownership to who we loaned it from */
1021                                 struct flchip *loaner = shared->writing;
1022                                 mutex_lock(&loaner->mutex);
1023                                 mutex_unlock(&shared->lock);
1024                                 mutex_unlock(&chip->mutex);
1025                                 put_chip(map, loaner, loaner->start);
1026                                 mutex_lock(&chip->mutex);
1027                                 mutex_unlock(&loaner->mutex);
1028                                 wake_up(&chip->wq);
1029                                 return;
1030                         }
1031                         shared->erasing = NULL;
1032                         shared->writing = NULL;
1033                 } else if (shared->erasing == chip && shared->writing != chip) {
1034                         /*
1035                          * We own the ability to erase without the ability
1036                          * to write, which means the erase was suspended
1037                          * and some other partition is currently writing.
1038                          * Don't let the switch below mess things up since
1039                          * we don't have ownership to resume anything.
1040                          */
1041                         mutex_unlock(&shared->lock);
1042                         wake_up(&chip->wq);
1043                         return;
1044                 }
1045                 mutex_unlock(&shared->lock);
1046         }
1047
1048         switch(chip->oldstate) {
1049         case FL_ERASING:
1050                 /* What if one interleaved chip has finished and the
1051                    other hasn't? The old code would leave the finished
1052                    one in READY mode. That's bad, and caused -EROFS
1053                    errors to be returned from do_erase_oneblock because
1054                    that's the only bit it checked for at the time.
1055                    As the state machine appears to explicitly allow
1056                    sending the 0x70 (Read Status) command to an erasing
1057                    chip and expecting it to be ignored, that's what we
1058                    do. */
1059                 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1060                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1061                 chip->oldstate = FL_READY;
1062                 chip->state = FL_ERASING;
1063                 break;
1064
1065         case FL_XIP_WHILE_ERASING:
1066                 chip->state = chip->oldstate;
1067                 chip->oldstate = FL_READY;
1068                 break;
1069
1070         case FL_READY:
1071         case FL_STATUS:
1072         case FL_JEDEC_QUERY:
1073                 break;
1074         default:
1075                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1076         }
1077         wake_up(&chip->wq);
1078 }
1079
1080 #ifdef CONFIG_MTD_XIP
1081
1082 /*
1083  * No interrupt what so ever can be serviced while the flash isn't in array
1084  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1085  * enclosing any code path where the flash is known not to be in array mode.
1086  * And within a XIP disabled code path, only functions marked with __xipram
1087  * may be called and nothing else (it's a good thing to inspect generated
1088  * assembly to make sure inline functions were actually inlined and that gcc
1089  * didn't emit calls to its own support functions). Also configuring MTD CFI
1090  * support to a single buswidth and a single interleave is also recommended.
1091  */
1092
1093 static void xip_disable(struct map_info *map, struct flchip *chip,
1094                         unsigned long adr)
1095 {
1096         /* TODO: chips with no XIP use should ignore and return */
1097         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1098         local_irq_disable();
1099 }
1100
1101 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1102                                 unsigned long adr)
1103 {
1104         struct cfi_private *cfi = map->fldrv_priv;
1105         if (chip->state != FL_POINT && chip->state != FL_READY) {
1106                 map_write(map, CMD(0xff), adr);
1107                 chip->state = FL_READY;
1108         }
1109         (void) map_read(map, adr);
1110         xip_iprefetch();
1111         local_irq_enable();
1112 }
1113
1114 /*
1115  * When a delay is required for the flash operation to complete, the
1116  * xip_wait_for_operation() function is polling for both the given timeout
1117  * and pending (but still masked) hardware interrupts.  Whenever there is an
1118  * interrupt pending then the flash erase or write operation is suspended,
1119  * array mode restored and interrupts unmasked.  Task scheduling might also
1120  * happen at that point.  The CPU eventually returns from the interrupt or
1121  * the call to schedule() and the suspended flash operation is resumed for
1122  * the remaining of the delay period.
1123  *
1124  * Warning: this function _will_ fool interrupt latency tracing tools.
1125  */
1126
1127 static int __xipram xip_wait_for_operation(
1128                 struct map_info *map, struct flchip *chip,
1129                 unsigned long adr, unsigned int chip_op_time_max)
1130 {
1131         struct cfi_private *cfi = map->fldrv_priv;
1132         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1133         map_word status, OK = CMD(0x80);
1134         unsigned long usec, suspended, start, done;
1135         flstate_t oldstate, newstate;
1136
1137         start = xip_currtime();
1138         usec = chip_op_time_max;
1139         if (usec == 0)
1140                 usec = 500000;
1141         done = 0;
1142
1143         do {
1144                 cpu_relax();
1145                 if (xip_irqpending() && cfip &&
1146                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1147                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1148                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1149                         /*
1150                          * Let's suspend the erase or write operation when
1151                          * supported.  Note that we currently don't try to
1152                          * suspend interleaved chips if there is already
1153                          * another operation suspended (imagine what happens
1154                          * when one chip was already done with the current
1155                          * operation while another chip suspended it, then
1156                          * we resume the whole thing at once).  Yes, it
1157                          * can happen!
1158                          */
1159                         usec -= done;
1160                         map_write(map, CMD(0xb0), adr);
1161                         map_write(map, CMD(0x70), adr);
1162                         suspended = xip_currtime();
1163                         do {
1164                                 if (xip_elapsed_since(suspended) > 100000) {
1165                                         /*
1166                                          * The chip doesn't want to suspend
1167                                          * after waiting for 100 msecs.
1168                                          * This is a critical error but there
1169                                          * is not much we can do here.
1170                                          */
1171                                         return -EIO;
1172                                 }
1173                                 status = map_read(map, adr);
1174                         } while (!map_word_andequal(map, status, OK, OK));
1175
1176                         /* Suspend succeeded */
1177                         oldstate = chip->state;
1178                         if (oldstate == FL_ERASING) {
1179                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1180                                         break;
1181                                 newstate = FL_XIP_WHILE_ERASING;
1182                                 chip->erase_suspended = 1;
1183                         } else {
1184                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1185                                         break;
1186                                 newstate = FL_XIP_WHILE_WRITING;
1187                                 chip->write_suspended = 1;
1188                         }
1189                         chip->state = newstate;
1190                         map_write(map, CMD(0xff), adr);
1191                         (void) map_read(map, adr);
1192                         xip_iprefetch();
1193                         local_irq_enable();
1194                         mutex_unlock(&chip->mutex);
1195                         xip_iprefetch();
1196                         cond_resched();
1197
1198                         /*
1199                          * We're back.  However someone else might have
1200                          * decided to go write to the chip if we are in
1201                          * a suspended erase state.  If so let's wait
1202                          * until it's done.
1203                          */
1204                         mutex_lock(&chip->mutex);
1205                         while (chip->state != newstate) {
1206                                 DECLARE_WAITQUEUE(wait, current);
1207                                 set_current_state(TASK_UNINTERRUPTIBLE);
1208                                 add_wait_queue(&chip->wq, &wait);
1209                                 mutex_unlock(&chip->mutex);
1210                                 schedule();
1211                                 remove_wait_queue(&chip->wq, &wait);
1212                                 mutex_lock(&chip->mutex);
1213                         }
1214                         /* Disallow XIP again */
1215                         local_irq_disable();
1216
1217                         /* Resume the write or erase operation */
1218                         map_write(map, CMD(0xd0), adr);
1219                         map_write(map, CMD(0x70), adr);
1220                         chip->state = oldstate;
1221                         start = xip_currtime();
1222                 } else if (usec >= 1000000/HZ) {
1223                         /*
1224                          * Try to save on CPU power when waiting delay
1225                          * is at least a system timer tick period.
1226                          * No need to be extremely accurate here.
1227                          */
1228                         xip_cpu_idle();
1229                 }
1230                 status = map_read(map, adr);
1231                 done = xip_elapsed_since(start);
1232         } while (!map_word_andequal(map, status, OK, OK)
1233                  && done < usec);
1234
1235         return (done >= usec) ? -ETIME : 0;
1236 }
1237
1238 /*
1239  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1240  * the flash is actively programming or erasing since we have to poll for
1241  * the operation to complete anyway.  We can't do that in a generic way with
1242  * a XIP setup so do it before the actual flash operation in this case
1243  * and stub it out from INVAL_CACHE_AND_WAIT.
1244  */
1245 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1246         INVALIDATE_CACHED_RANGE(map, from, size)
1247
1248 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1249         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1250
1251 #else
1252
1253 #define xip_disable(map, chip, adr)
1254 #define xip_enable(map, chip, adr)
1255 #define XIP_INVAL_CACHED_RANGE(x...)
1256 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1257
1258 static int inval_cache_and_wait_for_operation(
1259                 struct map_info *map, struct flchip *chip,
1260                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1261                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1262 {
1263         struct cfi_private *cfi = map->fldrv_priv;
1264         map_word status, status_OK = CMD(0x80);
1265         int chip_state = chip->state;
1266         unsigned int timeo, sleep_time, reset_timeo;
1267
1268         mutex_unlock(&chip->mutex);
1269         if (inval_len)
1270                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1271         mutex_lock(&chip->mutex);
1272
1273         timeo = chip_op_time_max;
1274         if (!timeo)
1275                 timeo = 500000;
1276         reset_timeo = timeo;
1277         sleep_time = chip_op_time / 2;
1278
1279         for (;;) {
1280                 if (chip->state != chip_state) {
1281                         /* Someone's suspended the operation: sleep */
1282                         DECLARE_WAITQUEUE(wait, current);
1283                         set_current_state(TASK_UNINTERRUPTIBLE);
1284                         add_wait_queue(&chip->wq, &wait);
1285                         mutex_unlock(&chip->mutex);
1286                         schedule();
1287                         remove_wait_queue(&chip->wq, &wait);
1288                         mutex_lock(&chip->mutex);
1289                         continue;
1290                 }
1291
1292                 status = map_read(map, cmd_adr);
1293                 if (map_word_andequal(map, status, status_OK, status_OK))
1294                         break;
1295
1296                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1297                         /* Erase suspend occurred while sleep: reset timeout */
1298                         timeo = reset_timeo;
1299                         chip->erase_suspended = 0;
1300                 }
1301                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1302                         /* Write suspend occurred while sleep: reset timeout */
1303                         timeo = reset_timeo;
1304                         chip->write_suspended = 0;
1305                 }
1306                 if (!timeo) {
1307                         map_write(map, CMD(0x70), cmd_adr);
1308                         chip->state = FL_STATUS;
1309                         return -ETIME;
1310                 }
1311
1312                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1313                 mutex_unlock(&chip->mutex);
1314                 if (sleep_time >= 1000000/HZ) {
1315                         /*
1316                          * Half of the normal delay still remaining
1317                          * can be performed with a sleeping delay instead
1318                          * of busy waiting.
1319                          */
1320                         msleep(sleep_time/1000);
1321                         timeo -= sleep_time;
1322                         sleep_time = 1000000/HZ;
1323                 } else {
1324                         udelay(1);
1325                         cond_resched();
1326                         timeo--;
1327                 }
1328                 mutex_lock(&chip->mutex);
1329         }
1330
1331         /* Done and happy. */
1332         chip->state = FL_STATUS;
1333         return 0;
1334 }
1335
1336 #endif
1337
1338 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1339         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1340
1341
1342 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1343 {
1344         unsigned long cmd_addr;
1345         struct cfi_private *cfi = map->fldrv_priv;
1346         int ret = 0;
1347
1348         adr += chip->start;
1349
1350         /* Ensure cmd read/writes are aligned. */
1351         cmd_addr = adr & ~(map_bankwidth(map)-1);
1352
1353         mutex_lock(&chip->mutex);
1354
1355         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1356
1357         if (!ret) {
1358                 if (chip->state != FL_POINT && chip->state != FL_READY)
1359                         map_write(map, CMD(0xff), cmd_addr);
1360
1361                 chip->state = FL_POINT;
1362                 chip->ref_point_counter++;
1363         }
1364         mutex_unlock(&chip->mutex);
1365
1366         return ret;
1367 }
1368
1369 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1370                 size_t *retlen, void **virt, resource_size_t *phys)
1371 {
1372         struct map_info *map = mtd->priv;
1373         struct cfi_private *cfi = map->fldrv_priv;
1374         unsigned long ofs, last_end = 0;
1375         int chipnum;
1376         int ret = 0;
1377
1378         if (!map->virt)
1379                 return -EINVAL;
1380
1381         /* Now lock the chip(s) to POINT state */
1382
1383         /* ofs: offset within the first chip that the first read should start */
1384         chipnum = (from >> cfi->chipshift);
1385         ofs = from - (chipnum << cfi->chipshift);
1386
1387         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1388         if (phys)
1389                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1390
1391         while (len) {
1392                 unsigned long thislen;
1393
1394                 if (chipnum >= cfi->numchips)
1395                         break;
1396
1397                 /* We cannot point across chips that are virtually disjoint */
1398                 if (!last_end)
1399                         last_end = cfi->chips[chipnum].start;
1400                 else if (cfi->chips[chipnum].start != last_end)
1401                         break;
1402
1403                 if ((len + ofs -1) >> cfi->chipshift)
1404                         thislen = (1<<cfi->chipshift) - ofs;
1405                 else
1406                         thislen = len;
1407
1408                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1409                 if (ret)
1410                         break;
1411
1412                 *retlen += thislen;
1413                 len -= thislen;
1414
1415                 ofs = 0;
1416                 last_end += 1 << cfi->chipshift;
1417                 chipnum++;
1418         }
1419         return 0;
1420 }
1421
1422 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1423 {
1424         struct map_info *map = mtd->priv;
1425         struct cfi_private *cfi = map->fldrv_priv;
1426         unsigned long ofs;
1427         int chipnum, err = 0;
1428
1429         /* Now unlock the chip(s) POINT state */
1430
1431         /* ofs: offset within the first chip that the first read should start */
1432         chipnum = (from >> cfi->chipshift);
1433         ofs = from - (chipnum <<  cfi->chipshift);
1434
1435         while (len && !err) {
1436                 unsigned long thislen;
1437                 struct flchip *chip;
1438
1439                 chip = &cfi->chips[chipnum];
1440                 if (chipnum >= cfi->numchips)
1441                         break;
1442
1443                 if ((len + ofs -1) >> cfi->chipshift)
1444                         thislen = (1<<cfi->chipshift) - ofs;
1445                 else
1446                         thislen = len;
1447
1448                 mutex_lock(&chip->mutex);
1449                 if (chip->state == FL_POINT) {
1450                         chip->ref_point_counter--;
1451                         if(chip->ref_point_counter == 0)
1452                                 chip->state = FL_READY;
1453                 } else {
1454                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1455                         err = -EINVAL;
1456                 }
1457
1458                 put_chip(map, chip, chip->start);
1459                 mutex_unlock(&chip->mutex);
1460
1461                 len -= thislen;
1462                 ofs = 0;
1463                 chipnum++;
1464         }
1465
1466         return err;
1467 }
1468
1469 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1470 {
1471         unsigned long cmd_addr;
1472         struct cfi_private *cfi = map->fldrv_priv;
1473         int ret;
1474
1475         adr += chip->start;
1476
1477         /* Ensure cmd read/writes are aligned. */
1478         cmd_addr = adr & ~(map_bankwidth(map)-1);
1479
1480         mutex_lock(&chip->mutex);
1481         ret = get_chip(map, chip, cmd_addr, FL_READY);
1482         if (ret) {
1483                 mutex_unlock(&chip->mutex);
1484                 return ret;
1485         }
1486
1487         if (chip->state != FL_POINT && chip->state != FL_READY) {
1488                 map_write(map, CMD(0xff), cmd_addr);
1489
1490                 chip->state = FL_READY;
1491         }
1492
1493         map_copy_from(map, buf, adr, len);
1494
1495         put_chip(map, chip, cmd_addr);
1496
1497         mutex_unlock(&chip->mutex);
1498         return 0;
1499 }
1500
1501 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1502 {
1503         struct map_info *map = mtd->priv;
1504         struct cfi_private *cfi = map->fldrv_priv;
1505         unsigned long ofs;
1506         int chipnum;
1507         int ret = 0;
1508
1509         /* ofs: offset within the first chip that the first read should start */
1510         chipnum = (from >> cfi->chipshift);
1511         ofs = from - (chipnum <<  cfi->chipshift);
1512
1513         while (len) {
1514                 unsigned long thislen;
1515
1516                 if (chipnum >= cfi->numchips)
1517                         break;
1518
1519                 if ((len + ofs -1) >> cfi->chipshift)
1520                         thislen = (1<<cfi->chipshift) - ofs;
1521                 else
1522                         thislen = len;
1523
1524                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1525                 if (ret)
1526                         break;
1527
1528                 *retlen += thislen;
1529                 len -= thislen;
1530                 buf += thislen;
1531
1532                 ofs = 0;
1533                 chipnum++;
1534         }
1535         return ret;
1536 }
1537
1538 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1539                                      unsigned long adr, map_word datum, int mode)
1540 {
1541         struct cfi_private *cfi = map->fldrv_priv;
1542         map_word status, write_cmd;
1543         int ret=0;
1544
1545         adr += chip->start;
1546
1547         switch (mode) {
1548         case FL_WRITING:
1549                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1550                 break;
1551         case FL_OTP_WRITE:
1552                 write_cmd = CMD(0xc0);
1553                 break;
1554         default:
1555                 return -EINVAL;
1556         }
1557
1558         mutex_lock(&chip->mutex);
1559         ret = get_chip(map, chip, adr, mode);
1560         if (ret) {
1561                 mutex_unlock(&chip->mutex);
1562                 return ret;
1563         }
1564
1565         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1566         ENABLE_VPP(map);
1567         xip_disable(map, chip, adr);
1568         map_write(map, write_cmd, adr);
1569         map_write(map, datum, adr);
1570         chip->state = mode;
1571
1572         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1573                                    adr, map_bankwidth(map),
1574                                    chip->word_write_time,
1575                                    chip->word_write_time_max);
1576         if (ret) {
1577                 xip_enable(map, chip, adr);
1578                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1579                 goto out;
1580         }
1581
1582         /* check for errors */
1583         status = map_read(map, adr);
1584         if (map_word_bitsset(map, status, CMD(0x1a))) {
1585                 unsigned long chipstatus = MERGESTATUS(status);
1586
1587                 /* reset status */
1588                 map_write(map, CMD(0x50), adr);
1589                 map_write(map, CMD(0x70), adr);
1590                 xip_enable(map, chip, adr);
1591
1592                 if (chipstatus & 0x02) {
1593                         ret = -EROFS;
1594                 } else if (chipstatus & 0x08) {
1595                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1596                         ret = -EIO;
1597                 } else {
1598                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1599                         ret = -EINVAL;
1600                 }
1601
1602                 goto out;
1603         }
1604
1605         xip_enable(map, chip, adr);
1606  out:   DISABLE_VPP(map);
1607         put_chip(map, chip, adr);
1608         mutex_unlock(&chip->mutex);
1609         return ret;
1610 }
1611
1612
1613 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1614 {
1615         struct map_info *map = mtd->priv;
1616         struct cfi_private *cfi = map->fldrv_priv;
1617         int ret = 0;
1618         int chipnum;
1619         unsigned long ofs;
1620
1621         chipnum = to >> cfi->chipshift;
1622         ofs = to  - (chipnum << cfi->chipshift);
1623
1624         /* If it's not bus-aligned, do the first byte write */
1625         if (ofs & (map_bankwidth(map)-1)) {
1626                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1627                 int gap = ofs - bus_ofs;
1628                 int n;
1629                 map_word datum;
1630
1631                 n = min_t(int, len, map_bankwidth(map)-gap);
1632                 datum = map_word_ff(map);
1633                 datum = map_word_load_partial(map, datum, buf, gap, n);
1634
1635                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1636                                                bus_ofs, datum, FL_WRITING);
1637                 if (ret)
1638                         return ret;
1639
1640                 len -= n;
1641                 ofs += n;
1642                 buf += n;
1643                 (*retlen) += n;
1644
1645                 if (ofs >> cfi->chipshift) {
1646                         chipnum ++;
1647                         ofs = 0;
1648                         if (chipnum == cfi->numchips)
1649                                 return 0;
1650                 }
1651         }
1652
1653         while(len >= map_bankwidth(map)) {
1654                 map_word datum = map_word_load(map, buf);
1655
1656                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1657                                        ofs, datum, FL_WRITING);
1658                 if (ret)
1659                         return ret;
1660
1661                 ofs += map_bankwidth(map);
1662                 buf += map_bankwidth(map);
1663                 (*retlen) += map_bankwidth(map);
1664                 len -= map_bankwidth(map);
1665
1666                 if (ofs >> cfi->chipshift) {
1667                         chipnum ++;
1668                         ofs = 0;
1669                         if (chipnum == cfi->numchips)
1670                                 return 0;
1671                 }
1672         }
1673
1674         if (len & (map_bankwidth(map)-1)) {
1675                 map_word datum;
1676
1677                 datum = map_word_ff(map);
1678                 datum = map_word_load_partial(map, datum, buf, 0, len);
1679
1680                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1681                                        ofs, datum, FL_WRITING);
1682                 if (ret)
1683                         return ret;
1684
1685                 (*retlen) += len;
1686         }
1687
1688         return 0;
1689 }
1690
1691
1692 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1693                                     unsigned long adr, const struct kvec **pvec,
1694                                     unsigned long *pvec_seek, int len)
1695 {
1696         struct cfi_private *cfi = map->fldrv_priv;
1697         map_word status, write_cmd, datum;
1698         unsigned long cmd_adr;
1699         int ret, wbufsize, word_gap, words;
1700         const struct kvec *vec;
1701         unsigned long vec_seek;
1702         unsigned long initial_adr;
1703         int initial_len = len;
1704
1705         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1706         adr += chip->start;
1707         initial_adr = adr;
1708         cmd_adr = adr & ~(wbufsize-1);
1709
1710         /* Sharp LH28F640BF chips need the first address for the
1711          * Page Buffer Program command. See Table 5 of
1712          * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1713         if (is_LH28F640BF(cfi))
1714                 cmd_adr = adr;
1715
1716         /* Let's determine this according to the interleave only once */
1717         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1718
1719         mutex_lock(&chip->mutex);
1720         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1721         if (ret) {
1722                 mutex_unlock(&chip->mutex);
1723                 return ret;
1724         }
1725
1726         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1727         ENABLE_VPP(map);
1728         xip_disable(map, chip, cmd_adr);
1729
1730         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1731            [...], the device will not accept any more Write to Buffer commands".
1732            So we must check here and reset those bits if they're set. Otherwise
1733            we're just pissing in the wind */
1734         if (chip->state != FL_STATUS) {
1735                 map_write(map, CMD(0x70), cmd_adr);
1736                 chip->state = FL_STATUS;
1737         }
1738         status = map_read(map, cmd_adr);
1739         if (map_word_bitsset(map, status, CMD(0x30))) {
1740                 xip_enable(map, chip, cmd_adr);
1741                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1742                 xip_disable(map, chip, cmd_adr);
1743                 map_write(map, CMD(0x50), cmd_adr);
1744                 map_write(map, CMD(0x70), cmd_adr);
1745         }
1746
1747         chip->state = FL_WRITING_TO_BUFFER;
1748         map_write(map, write_cmd, cmd_adr);
1749         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1750         if (ret) {
1751                 /* Argh. Not ready for write to buffer */
1752                 map_word Xstatus = map_read(map, cmd_adr);
1753                 map_write(map, CMD(0x70), cmd_adr);
1754                 chip->state = FL_STATUS;
1755                 status = map_read(map, cmd_adr);
1756                 map_write(map, CMD(0x50), cmd_adr);
1757                 map_write(map, CMD(0x70), cmd_adr);
1758                 xip_enable(map, chip, cmd_adr);
1759                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1760                                 map->name, Xstatus.x[0], status.x[0]);
1761                 goto out;
1762         }
1763
1764         /* Figure out the number of words to write */
1765         word_gap = (-adr & (map_bankwidth(map)-1));
1766         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1767         if (!word_gap) {
1768                 words--;
1769         } else {
1770                 word_gap = map_bankwidth(map) - word_gap;
1771                 adr -= word_gap;
1772                 datum = map_word_ff(map);
1773         }
1774
1775         /* Write length of data to come */
1776         map_write(map, CMD(words), cmd_adr );
1777
1778         /* Write data */
1779         vec = *pvec;
1780         vec_seek = *pvec_seek;
1781         do {
1782                 int n = map_bankwidth(map) - word_gap;
1783                 if (n > vec->iov_len - vec_seek)
1784                         n = vec->iov_len - vec_seek;
1785                 if (n > len)
1786                         n = len;
1787
1788                 if (!word_gap && len < map_bankwidth(map))
1789                         datum = map_word_ff(map);
1790
1791                 datum = map_word_load_partial(map, datum,
1792                                               vec->iov_base + vec_seek,
1793                                               word_gap, n);
1794
1795                 len -= n;
1796                 word_gap += n;
1797                 if (!len || word_gap == map_bankwidth(map)) {
1798                         map_write(map, datum, adr);
1799                         adr += map_bankwidth(map);
1800                         word_gap = 0;
1801                 }
1802
1803                 vec_seek += n;
1804                 if (vec_seek == vec->iov_len) {
1805                         vec++;
1806                         vec_seek = 0;
1807                 }
1808         } while (len);
1809         *pvec = vec;
1810         *pvec_seek = vec_seek;
1811
1812         /* GO GO GO */
1813         map_write(map, CMD(0xd0), cmd_adr);
1814         chip->state = FL_WRITING;
1815
1816         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1817                                    initial_adr, initial_len,
1818                                    chip->buffer_write_time,
1819                                    chip->buffer_write_time_max);
1820         if (ret) {
1821                 map_write(map, CMD(0x70), cmd_adr);
1822                 chip->state = FL_STATUS;
1823                 xip_enable(map, chip, cmd_adr);
1824                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1825                 goto out;
1826         }
1827
1828         /* check for errors */
1829         status = map_read(map, cmd_adr);
1830         if (map_word_bitsset(map, status, CMD(0x1a))) {
1831                 unsigned long chipstatus = MERGESTATUS(status);
1832
1833                 /* reset status */
1834                 map_write(map, CMD(0x50), cmd_adr);
1835                 map_write(map, CMD(0x70), cmd_adr);
1836                 xip_enable(map, chip, cmd_adr);
1837
1838                 if (chipstatus & 0x02) {
1839                         ret = -EROFS;
1840                 } else if (chipstatus & 0x08) {
1841                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1842                         ret = -EIO;
1843                 } else {
1844                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1845                         ret = -EINVAL;
1846                 }
1847
1848                 goto out;
1849         }
1850
1851         xip_enable(map, chip, cmd_adr);
1852  out:   DISABLE_VPP(map);
1853         put_chip(map, chip, cmd_adr);
1854         mutex_unlock(&chip->mutex);
1855         return ret;
1856 }
1857
1858 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1859                                 unsigned long count, loff_t to, size_t *retlen)
1860 {
1861         struct map_info *map = mtd->priv;
1862         struct cfi_private *cfi = map->fldrv_priv;
1863         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1864         int ret = 0;
1865         int chipnum;
1866         unsigned long ofs, vec_seek, i;
1867         size_t len = 0;
1868
1869         for (i = 0; i < count; i++)
1870                 len += vecs[i].iov_len;
1871
1872         if (!len)
1873                 return 0;
1874
1875         chipnum = to >> cfi->chipshift;
1876         ofs = to - (chipnum << cfi->chipshift);
1877         vec_seek = 0;
1878
1879         do {
1880                 /* We must not cross write block boundaries */
1881                 int size = wbufsize - (ofs & (wbufsize-1));
1882
1883                 if (size > len)
1884                         size = len;
1885                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1886                                       ofs, &vecs, &vec_seek, size);
1887                 if (ret)
1888                         return ret;
1889
1890                 ofs += size;
1891                 (*retlen) += size;
1892                 len -= size;
1893
1894                 if (ofs >> cfi->chipshift) {
1895                         chipnum ++;
1896                         ofs = 0;
1897                         if (chipnum == cfi->numchips)
1898                                 return 0;
1899                 }
1900
1901                 /* Be nice and reschedule with the chip in a usable state for other
1902                    processes. */
1903                 cond_resched();
1904
1905         } while (len);
1906
1907         return 0;
1908 }
1909
1910 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1911                                        size_t len, size_t *retlen, const u_char *buf)
1912 {
1913         struct kvec vec;
1914
1915         vec.iov_base = (void *) buf;
1916         vec.iov_len = len;
1917
1918         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1919 }
1920
1921 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1922                                       unsigned long adr, int len, void *thunk)
1923 {
1924         struct cfi_private *cfi = map->fldrv_priv;
1925         map_word status;
1926         int retries = 3;
1927         int ret;
1928
1929         adr += chip->start;
1930
1931  retry:
1932         mutex_lock(&chip->mutex);
1933         ret = get_chip(map, chip, adr, FL_ERASING);
1934         if (ret) {
1935                 mutex_unlock(&chip->mutex);
1936                 return ret;
1937         }
1938
1939         XIP_INVAL_CACHED_RANGE(map, adr, len);
1940         ENABLE_VPP(map);
1941         xip_disable(map, chip, adr);
1942
1943         /* Clear the status register first */
1944         map_write(map, CMD(0x50), adr);
1945
1946         /* Now erase */
1947         map_write(map, CMD(0x20), adr);
1948         map_write(map, CMD(0xD0), adr);
1949         chip->state = FL_ERASING;
1950         chip->erase_suspended = 0;
1951         chip->in_progress_block_addr = adr;
1952         chip->in_progress_block_mask = ~(len - 1);
1953
1954         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1955                                    adr, len,
1956                                    chip->erase_time,
1957                                    chip->erase_time_max);
1958         if (ret) {
1959                 map_write(map, CMD(0x70), adr);
1960                 chip->state = FL_STATUS;
1961                 xip_enable(map, chip, adr);
1962                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1963                 goto out;
1964         }
1965
1966         /* We've broken this before. It doesn't hurt to be safe */
1967         map_write(map, CMD(0x70), adr);
1968         chip->state = FL_STATUS;
1969         status = map_read(map, adr);
1970
1971         /* check for errors */
1972         if (map_word_bitsset(map, status, CMD(0x3a))) {
1973                 unsigned long chipstatus = MERGESTATUS(status);
1974
1975                 /* Reset the error bits */
1976                 map_write(map, CMD(0x50), adr);
1977                 map_write(map, CMD(0x70), adr);
1978                 xip_enable(map, chip, adr);
1979
1980                 if ((chipstatus & 0x30) == 0x30) {
1981                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1982                         ret = -EINVAL;
1983                 } else if (chipstatus & 0x02) {
1984                         /* Protection bit set */
1985                         ret = -EROFS;
1986                 } else if (chipstatus & 0x8) {
1987                         /* Voltage */
1988                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1989                         ret = -EIO;
1990                 } else if (chipstatus & 0x20 && retries--) {
1991                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1992                         DISABLE_VPP(map);
1993                         put_chip(map, chip, adr);
1994                         mutex_unlock(&chip->mutex);
1995                         goto retry;
1996                 } else {
1997                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1998                         ret = -EIO;
1999                 }
2000
2001                 goto out;
2002         }
2003
2004         xip_enable(map, chip, adr);
2005  out:   DISABLE_VPP(map);
2006         put_chip(map, chip, adr);
2007         mutex_unlock(&chip->mutex);
2008         return ret;
2009 }
2010
2011 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2012 {
2013         unsigned long ofs, len;
2014         int ret;
2015
2016         ofs = instr->addr;
2017         len = instr->len;
2018
2019         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2020         if (ret)
2021                 return ret;
2022
2023         instr->state = MTD_ERASE_DONE;
2024         mtd_erase_callback(instr);
2025
2026         return 0;
2027 }
2028
2029 static void cfi_intelext_sync (struct mtd_info *mtd)
2030 {
2031         struct map_info *map = mtd->priv;
2032         struct cfi_private *cfi = map->fldrv_priv;
2033         int i;
2034         struct flchip *chip;
2035         int ret = 0;
2036
2037         for (i=0; !ret && i<cfi->numchips; i++) {
2038                 chip = &cfi->chips[i];
2039
2040                 mutex_lock(&chip->mutex);
2041                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2042
2043                 if (!ret) {
2044                         chip->oldstate = chip->state;
2045                         chip->state = FL_SYNCING;
2046                         /* No need to wake_up() on this state change -
2047                          * as the whole point is that nobody can do anything
2048                          * with the chip now anyway.
2049                          */
2050                 }
2051                 mutex_unlock(&chip->mutex);
2052         }
2053
2054         /* Unlock the chips again */
2055
2056         for (i--; i >=0; i--) {
2057                 chip = &cfi->chips[i];
2058
2059                 mutex_lock(&chip->mutex);
2060
2061                 if (chip->state == FL_SYNCING) {
2062                         chip->state = chip->oldstate;
2063                         chip->oldstate = FL_READY;
2064                         wake_up(&chip->wq);
2065                 }
2066                 mutex_unlock(&chip->mutex);
2067         }
2068 }
2069
2070 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2071                                                 struct flchip *chip,
2072                                                 unsigned long adr,
2073                                                 int len, void *thunk)
2074 {
2075         struct cfi_private *cfi = map->fldrv_priv;
2076         int status, ofs_factor = cfi->interleave * cfi->device_type;
2077
2078         adr += chip->start;
2079         xip_disable(map, chip, adr+(2*ofs_factor));
2080         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2081         chip->state = FL_JEDEC_QUERY;
2082         status = cfi_read_query(map, adr+(2*ofs_factor));
2083         xip_enable(map, chip, 0);
2084         return status;
2085 }
2086
2087 #ifdef DEBUG_LOCK_BITS
2088 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2089                                                 struct flchip *chip,
2090                                                 unsigned long adr,
2091                                                 int len, void *thunk)
2092 {
2093         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2094                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2095         return 0;
2096 }
2097 #endif
2098
2099 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2100 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2101
2102 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2103                                        unsigned long adr, int len, void *thunk)
2104 {
2105         struct cfi_private *cfi = map->fldrv_priv;
2106         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2107         int mdelay;
2108         int ret;
2109
2110         adr += chip->start;
2111
2112         mutex_lock(&chip->mutex);
2113         ret = get_chip(map, chip, adr, FL_LOCKING);
2114         if (ret) {
2115                 mutex_unlock(&chip->mutex);
2116                 return ret;
2117         }
2118
2119         ENABLE_VPP(map);
2120         xip_disable(map, chip, adr);
2121
2122         map_write(map, CMD(0x60), adr);
2123         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2124                 map_write(map, CMD(0x01), adr);
2125                 chip->state = FL_LOCKING;
2126         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2127                 map_write(map, CMD(0xD0), adr);
2128                 chip->state = FL_UNLOCKING;
2129         } else
2130                 BUG();
2131
2132         /*
2133          * If Instant Individual Block Locking supported then no need
2134          * to delay.
2135          */
2136         /*
2137          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2138          * lets use a max of 1.5 seconds (1500ms) as timeout.
2139          *
2140          * See "Clear Block Lock-Bits Time" on page 40 in
2141          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2142          * from February 2003
2143          */
2144         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2145
2146         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2147         if (ret) {
2148                 map_write(map, CMD(0x70), adr);
2149                 chip->state = FL_STATUS;
2150                 xip_enable(map, chip, adr);
2151                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2152                 goto out;
2153         }
2154
2155         xip_enable(map, chip, adr);
2156  out:   DISABLE_VPP(map);
2157         put_chip(map, chip, adr);
2158         mutex_unlock(&chip->mutex);
2159         return ret;
2160 }
2161
2162 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2163 {
2164         int ret;
2165
2166 #ifdef DEBUG_LOCK_BITS
2167         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2168                __func__, ofs, len);
2169         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2170                 ofs, len, NULL);
2171 #endif
2172
2173         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2174                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2175
2176 #ifdef DEBUG_LOCK_BITS
2177         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2178                __func__, ret);
2179         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2180                 ofs, len, NULL);
2181 #endif
2182
2183         return ret;
2184 }
2185
2186 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2187 {
2188         int ret;
2189
2190 #ifdef DEBUG_LOCK_BITS
2191         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2192                __func__, ofs, len);
2193         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2194                 ofs, len, NULL);
2195 #endif
2196
2197         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2198                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2199
2200 #ifdef DEBUG_LOCK_BITS
2201         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2202                __func__, ret);
2203         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2204                 ofs, len, NULL);
2205 #endif
2206
2207         return ret;
2208 }
2209
2210 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2211                                   uint64_t len)
2212 {
2213         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2214                                 ofs, len, NULL) ? 1 : 0;
2215 }
2216
2217 #ifdef CONFIG_MTD_OTP
2218
2219 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2220                         u_long data_offset, u_char *buf, u_int size,
2221                         u_long prot_offset, u_int groupno, u_int groupsize);
2222
2223 static int __xipram
2224 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2225             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2226 {
2227         struct cfi_private *cfi = map->fldrv_priv;
2228         int ret;
2229
2230         mutex_lock(&chip->mutex);
2231         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2232         if (ret) {
2233                 mutex_unlock(&chip->mutex);
2234                 return ret;
2235         }
2236
2237         /* let's ensure we're not reading back cached data from array mode */
2238         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2239
2240         xip_disable(map, chip, chip->start);
2241         if (chip->state != FL_JEDEC_QUERY) {
2242                 map_write(map, CMD(0x90), chip->start);
2243                 chip->state = FL_JEDEC_QUERY;
2244         }
2245         map_copy_from(map, buf, chip->start + offset, size);
2246         xip_enable(map, chip, chip->start);
2247
2248         /* then ensure we don't keep OTP data in the cache */
2249         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2250
2251         put_chip(map, chip, chip->start);
2252         mutex_unlock(&chip->mutex);
2253         return 0;
2254 }
2255
2256 static int
2257 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2258              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2259 {
2260         int ret;
2261
2262         while (size) {
2263                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2264                 int gap = offset - bus_ofs;
2265                 int n = min_t(int, size, map_bankwidth(map)-gap);
2266                 map_word datum = map_word_ff(map);
2267
2268                 datum = map_word_load_partial(map, datum, buf, gap, n);
2269                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2270                 if (ret)
2271                         return ret;
2272
2273                 offset += n;
2274                 buf += n;
2275                 size -= n;
2276         }
2277
2278         return 0;
2279 }
2280
2281 static int
2282 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2283             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2284 {
2285         struct cfi_private *cfi = map->fldrv_priv;
2286         map_word datum;
2287
2288         /* make sure area matches group boundaries */
2289         if (size != grpsz)
2290                 return -EXDEV;
2291
2292         datum = map_word_ff(map);
2293         datum = map_word_clr(map, datum, CMD(1 << grpno));
2294         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2295 }
2296
2297 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2298                                  size_t *retlen, u_char *buf,
2299                                  otp_op_t action, int user_regs)
2300 {
2301         struct map_info *map = mtd->priv;
2302         struct cfi_private *cfi = map->fldrv_priv;
2303         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2304         struct flchip *chip;
2305         struct cfi_intelext_otpinfo *otp;
2306         u_long devsize, reg_prot_offset, data_offset;
2307         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2308         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2309         int ret;
2310
2311         *retlen = 0;
2312
2313         /* Check that we actually have some OTP registers */
2314         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2315                 return -ENODATA;
2316
2317         /* we need real chips here not virtual ones */
2318         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2319         chip_step = devsize >> cfi->chipshift;
2320         chip_num = 0;
2321
2322         /* Some chips have OTP located in the _top_ partition only.
2323            For example: Intel 28F256L18T (T means top-parameter device) */
2324         if (cfi->mfr == CFI_MFR_INTEL) {
2325                 switch (cfi->id) {
2326                 case 0x880b:
2327                 case 0x880c:
2328                 case 0x880d:
2329                         chip_num = chip_step - 1;
2330                 }
2331         }
2332
2333         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2334                 chip = &cfi->chips[chip_num];
2335                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2336
2337                 /* first OTP region */
2338                 field = 0;
2339                 reg_prot_offset = extp->ProtRegAddr;
2340                 reg_fact_groups = 1;
2341                 reg_fact_size = 1 << extp->FactProtRegSize;
2342                 reg_user_groups = 1;
2343                 reg_user_size = 1 << extp->UserProtRegSize;
2344
2345                 while (len > 0) {
2346                         /* flash geometry fixup */
2347                         data_offset = reg_prot_offset + 1;
2348                         data_offset *= cfi->interleave * cfi->device_type;
2349                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2350                         reg_fact_size *= cfi->interleave;
2351                         reg_user_size *= cfi->interleave;
2352
2353                         if (user_regs) {
2354                                 groups = reg_user_groups;
2355                                 groupsize = reg_user_size;
2356                                 /* skip over factory reg area */
2357                                 groupno = reg_fact_groups;
2358                                 data_offset += reg_fact_groups * reg_fact_size;
2359                         } else {
2360                                 groups = reg_fact_groups;
2361                                 groupsize = reg_fact_size;
2362                                 groupno = 0;
2363                         }
2364
2365                         while (len > 0 && groups > 0) {
2366                                 if (!action) {
2367                                         /*
2368                                          * Special case: if action is NULL
2369                                          * we fill buf with otp_info records.
2370                                          */
2371                                         struct otp_info *otpinfo;
2372                                         map_word lockword;
2373                                         len -= sizeof(struct otp_info);
2374                                         if (len <= 0)
2375                                                 return -ENOSPC;
2376                                         ret = do_otp_read(map, chip,
2377                                                           reg_prot_offset,
2378                                                           (u_char *)&lockword,
2379                                                           map_bankwidth(map),
2380                                                           0, 0,  0);
2381                                         if (ret)
2382                                                 return ret;
2383                                         otpinfo = (struct otp_info *)buf;
2384                                         otpinfo->start = from;
2385                                         otpinfo->length = groupsize;
2386                                         otpinfo->locked =
2387                                            !map_word_bitsset(map, lockword,
2388                                                              CMD(1 << groupno));
2389                                         from += groupsize;
2390                                         buf += sizeof(*otpinfo);
2391                                         *retlen += sizeof(*otpinfo);
2392                                 } else if (from >= groupsize) {
2393                                         from -= groupsize;
2394                                         data_offset += groupsize;
2395                                 } else {
2396                                         int size = groupsize;
2397                                         data_offset += from;
2398                                         size -= from;
2399                                         from = 0;
2400                                         if (size > len)
2401                                                 size = len;
2402                                         ret = action(map, chip, data_offset,
2403                                                      buf, size, reg_prot_offset,
2404                                                      groupno, groupsize);
2405                                         if (ret < 0)
2406                                                 return ret;
2407                                         buf += size;
2408                                         len -= size;
2409                                         *retlen += size;
2410                                         data_offset += size;
2411                                 }
2412                                 groupno++;
2413                                 groups--;
2414                         }
2415
2416                         /* next OTP region */
2417                         if (++field == extp->NumProtectionFields)
2418                                 break;
2419                         reg_prot_offset = otp->ProtRegAddr;
2420                         reg_fact_groups = otp->FactGroups;
2421                         reg_fact_size = 1 << otp->FactProtRegSize;
2422                         reg_user_groups = otp->UserGroups;
2423                         reg_user_size = 1 << otp->UserProtRegSize;
2424                         otp++;
2425                 }
2426         }
2427
2428         return 0;
2429 }
2430
2431 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2432                                            size_t len, size_t *retlen,
2433                                             u_char *buf)
2434 {
2435         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2436                                      buf, do_otp_read, 0);
2437 }
2438
2439 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2440                                            size_t len, size_t *retlen,
2441                                             u_char *buf)
2442 {
2443         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2444                                      buf, do_otp_read, 1);
2445 }
2446
2447 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2448                                             size_t len, size_t *retlen,
2449                                              u_char *buf)
2450 {
2451         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2452                                      buf, do_otp_write, 1);
2453 }
2454
2455 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2456                                            loff_t from, size_t len)
2457 {
2458         size_t retlen;
2459         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2460                                      NULL, do_otp_lock, 1);
2461 }
2462
2463 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2464                                            size_t *retlen, struct otp_info *buf)
2465
2466 {
2467         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2468                                      NULL, 0);
2469 }
2470
2471 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2472                                            size_t *retlen, struct otp_info *buf)
2473 {
2474         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2475                                      NULL, 1);
2476 }
2477
2478 #endif
2479
2480 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2481 {
2482         struct mtd_erase_region_info *region;
2483         int block, status, i;
2484         unsigned long adr;
2485         size_t len;
2486
2487         for (i = 0; i < mtd->numeraseregions; i++) {
2488                 region = &mtd->eraseregions[i];
2489                 if (!region->lockmap)
2490                         continue;
2491
2492                 for (block = 0; block < region->numblocks; block++){
2493                         len = region->erasesize;
2494                         adr = region->offset + block * len;
2495
2496                         status = cfi_varsize_frob(mtd,
2497                                         do_getlockstatus_oneblock, adr, len, NULL);
2498                         if (status)
2499                                 set_bit(block, region->lockmap);
2500                         else
2501                                 clear_bit(block, region->lockmap);
2502                 }
2503         }
2504 }
2505
2506 static int cfi_intelext_suspend(struct mtd_info *mtd)
2507 {
2508         struct map_info *map = mtd->priv;
2509         struct cfi_private *cfi = map->fldrv_priv;
2510         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2511         int i;
2512         struct flchip *chip;
2513         int ret = 0;
2514
2515         if ((mtd->flags & MTD_POWERUP_LOCK)
2516             && extp && (extp->FeatureSupport & (1 << 5)))
2517                 cfi_intelext_save_locks(mtd);
2518
2519         for (i=0; !ret && i<cfi->numchips; i++) {
2520                 chip = &cfi->chips[i];
2521
2522                 mutex_lock(&chip->mutex);
2523
2524                 switch (chip->state) {
2525                 case FL_READY:
2526                 case FL_STATUS:
2527                 case FL_CFI_QUERY:
2528                 case FL_JEDEC_QUERY:
2529                         if (chip->oldstate == FL_READY) {
2530                                 /* place the chip in a known state before suspend */
2531                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2532                                 chip->oldstate = chip->state;
2533                                 chip->state = FL_PM_SUSPENDED;
2534                                 /* No need to wake_up() on this state change -
2535                                  * as the whole point is that nobody can do anything
2536                                  * with the chip now anyway.
2537                                  */
2538                         } else {
2539                                 /* There seems to be an operation pending. We must wait for it. */
2540                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2541                                 ret = -EAGAIN;
2542                         }
2543                         break;
2544                 default:
2545                         /* Should we actually wait? Once upon a time these routines weren't
2546                            allowed to. Or should we return -EAGAIN, because the upper layers
2547                            ought to have already shut down anything which was using the device
2548                            anyway? The latter for now. */
2549                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2550                         ret = -EAGAIN;
2551                 case FL_PM_SUSPENDED:
2552                         break;
2553                 }
2554                 mutex_unlock(&chip->mutex);
2555         }
2556
2557         /* Unlock the chips again */
2558
2559         if (ret) {
2560                 for (i--; i >=0; i--) {
2561                         chip = &cfi->chips[i];
2562
2563                         mutex_lock(&chip->mutex);
2564
2565                         if (chip->state == FL_PM_SUSPENDED) {
2566                                 /* No need to force it into a known state here,
2567                                    because we're returning failure, and it didn't
2568                                    get power cycled */
2569                                 chip->state = chip->oldstate;
2570                                 chip->oldstate = FL_READY;
2571                                 wake_up(&chip->wq);
2572                         }
2573                         mutex_unlock(&chip->mutex);
2574                 }
2575         }
2576
2577         return ret;
2578 }
2579
2580 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2581 {
2582         struct mtd_erase_region_info *region;
2583         int block, i;
2584         unsigned long adr;
2585         size_t len;
2586
2587         for (i = 0; i < mtd->numeraseregions; i++) {
2588                 region = &mtd->eraseregions[i];
2589                 if (!region->lockmap)
2590                         continue;
2591
2592                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2593                         len = region->erasesize;
2594                         adr = region->offset + block * len;
2595                         cfi_intelext_unlock(mtd, adr, len);
2596                 }
2597         }
2598 }
2599
2600 static void cfi_intelext_resume(struct mtd_info *mtd)
2601 {
2602         struct map_info *map = mtd->priv;
2603         struct cfi_private *cfi = map->fldrv_priv;
2604         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2605         int i;
2606         struct flchip *chip;
2607
2608         for (i=0; i<cfi->numchips; i++) {
2609
2610                 chip = &cfi->chips[i];
2611
2612                 mutex_lock(&chip->mutex);
2613
2614                 /* Go to known state. Chip may have been power cycled */
2615                 if (chip->state == FL_PM_SUSPENDED) {
2616                         /* Refresh LH28F640BF Partition Config. Register */
2617                         fixup_LH28F640BF(mtd);
2618                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2619                         chip->oldstate = chip->state = FL_READY;
2620                         wake_up(&chip->wq);
2621                 }
2622
2623                 mutex_unlock(&chip->mutex);
2624         }
2625
2626         if ((mtd->flags & MTD_POWERUP_LOCK)
2627             && extp && (extp->FeatureSupport & (1 << 5)))
2628                 cfi_intelext_restore_locks(mtd);
2629 }
2630
2631 static int cfi_intelext_reset(struct mtd_info *mtd)
2632 {
2633         struct map_info *map = mtd->priv;
2634         struct cfi_private *cfi = map->fldrv_priv;
2635         int i, ret;
2636
2637         for (i=0; i < cfi->numchips; i++) {
2638                 struct flchip *chip = &cfi->chips[i];
2639
2640                 /* force the completion of any ongoing operation
2641                    and switch to array mode so any bootloader in
2642                    flash is accessible for soft reboot. */
2643                 mutex_lock(&chip->mutex);
2644                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2645                 if (!ret) {
2646                         map_write(map, CMD(0xff), chip->start);
2647                         chip->state = FL_SHUTDOWN;
2648                         put_chip(map, chip, chip->start);
2649                 }
2650                 mutex_unlock(&chip->mutex);
2651         }
2652
2653         return 0;
2654 }
2655
2656 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2657                                void *v)
2658 {
2659         struct mtd_info *mtd;
2660
2661         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2662         cfi_intelext_reset(mtd);
2663         return NOTIFY_DONE;
2664 }
2665
2666 static void cfi_intelext_destroy(struct mtd_info *mtd)
2667 {
2668         struct map_info *map = mtd->priv;
2669         struct cfi_private *cfi = map->fldrv_priv;
2670         struct mtd_erase_region_info *region;
2671         int i;
2672         cfi_intelext_reset(mtd);
2673         unregister_reboot_notifier(&mtd->reboot_notifier);
2674         kfree(cfi->cmdset_priv);
2675         kfree(cfi->cfiq);
2676         kfree(cfi->chips[0].priv);
2677         kfree(cfi);
2678         for (i = 0; i < mtd->numeraseregions; i++) {
2679                 region = &mtd->eraseregions[i];
2680                 kfree(region->lockmap);
2681         }
2682         kfree(mtd->eraseregions);
2683 }
2684
2685 MODULE_LICENSE("GPL");
2686 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2687 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2688 MODULE_ALIAS("cfi_cmdset_0003");
2689 MODULE_ALIAS("cfi_cmdset_0200");