GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 #define M28F00AP30      0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56 /* Sharp chips */
57 #define LH28F640BFHE_PTTL90     0x00b0
58 #define LH28F640BFHE_PBTL90     0x00b1
59 #define LH28F640BFHE_PTTL70A    0x00b2
60 #define LH28F640BFHE_PBTL70A    0x00b3
61
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
71                                   uint64_t len);
72 #ifdef CONFIG_MTD_OTP
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78                                            size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80                                            size_t *, struct otp_info *);
81 #endif
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
85
86 static void cfi_intelext_destroy(struct mtd_info *);
87
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
89
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
92
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94                      size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
96
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101
102
103
104 /*
105  *  *********** SETUP AND PROBE BITS  ***********
106  */
107
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109         .probe          = NULL, /* Not usable directly */
110         .destroy        = cfi_intelext_destroy,
111         .name           = "cfi_cmdset_0001",
112         .module         = THIS_MODULE
113 };
114
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
117
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
120 {
121         int i;
122         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
124         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135         for (i=11; i<32; i++) {
136                 if (extp->FeatureSupport & (1<<i))
137                         printk("     - Unknown Bit %X:      supported\n", i);
138         }
139
140         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142         for (i=1; i<8; i++) {
143                 if (extp->SuspendCmdSupport & (1<<i))
144                         printk("     - Unknown Bit %X:               supported\n", i);
145         }
146
147         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150         for (i=2; i<3; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156         for (i=6; i<16; i++) {
157                 if (extp->BlkStatusRegMask & (1<<i))
158                         printk("     - Unknown Bit %X Active: yes\n",i);
159         }
160
161         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163         if (extp->VppOptimal)
164                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
166 }
167 #endif
168
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
171 {
172         struct map_info *map = mtd->priv;
173         struct cfi_private *cfi = map->fldrv_priv;
174         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175         struct cfi_pri_atmel atmel_pri;
176         uint32_t features = 0;
177
178         /* Reverse byteswapping */
179         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
182
183         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
185
186         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
187
188         if (atmel_pri.Features & 0x01) /* chip erase supported */
189                 features |= (1<<0);
190         if (atmel_pri.Features & 0x02) /* erase suspend supported */
191                 features |= (1<<1);
192         if (atmel_pri.Features & 0x04) /* program suspend supported */
193                 features |= (1<<2);
194         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
195                 features |= (1<<9);
196         if (atmel_pri.Features & 0x20) /* page mode read supported */
197                 features |= (1<<7);
198         if (atmel_pri.Features & 0x40) /* queued erase supported */
199                 features |= (1<<4);
200         if (atmel_pri.Features & 0x80) /* Protection bits supported */
201                 features |= (1<<6);
202
203         extp->FeatureSupport = features;
204
205         /* burst write mode not supported */
206         cfi->cfiq->BufWriteTimeoutTyp = 0;
207         cfi->cfiq->BufWriteTimeoutMax = 0;
208 }
209
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
211 {
212         struct map_info *map = mtd->priv;
213         struct cfi_private *cfi = map->fldrv_priv;
214         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
215
216         cfip->FeatureSupport |= (1 << 5);
217         mtd->flags |= MTD_POWERUP_LOCK;
218 }
219
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
223 {
224         struct map_info *map = mtd->priv;
225         struct cfi_private *cfi = map->fldrv_priv;
226         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
227
228         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229                             "erase on write disabled.\n");
230         extp->SuspendCmdSupport &= ~1;
231 }
232 #endif
233
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
236 {
237         struct map_info *map = mtd->priv;
238         struct cfi_private *cfi = map->fldrv_priv;
239         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
240
241         if (cfip && (cfip->FeatureSupport&4)) {
242                 cfip->FeatureSupport &= ~4;
243                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
244         }
245 }
246 #endif
247
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
249 {
250         struct map_info *map = mtd->priv;
251         struct cfi_private *cfi = map->fldrv_priv;
252
253         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
254         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
255 }
256
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261
262         /* Note this is done after the region info is endian swapped */
263         cfi->cfiq->EraseRegionInfo[1] =
264                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
265 };
266
267 static int is_LH28F640BF(struct cfi_private *cfi)
268 {
269         /* Sharp LH28F640BF Family */
270         if (cfi->mfr == CFI_MFR_SHARP && (
271             cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272             cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
273                 return 1;
274         return 0;
275 }
276
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
278 {
279         struct map_info *map = mtd->priv;
280         struct cfi_private *cfi = map->fldrv_priv;
281         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
282
283         /* Reset the Partition Configuration Register on LH28F640BF
284          * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285         if (is_LH28F640BF(cfi)) {
286                 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287                 map_write(map, CMD(0x60), 0);
288                 map_write(map, CMD(0x04), 0);
289
290                 /* We have set one single partition thus
291                  * Simultaneous Operations are not allowed */
292                 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293                 extp->FeatureSupport &= ~512;
294         }
295 }
296
297 static void fixup_use_point(struct mtd_info *mtd)
298 {
299         struct map_info *map = mtd->priv;
300         if (!mtd->_point && map_is_linear(map)) {
301                 mtd->_point   = cfi_intelext_point;
302                 mtd->_unpoint = cfi_intelext_unpoint;
303         }
304 }
305
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
307 {
308         struct map_info *map = mtd->priv;
309         struct cfi_private *cfi = map->fldrv_priv;
310         if (cfi->cfiq->BufWriteTimeoutTyp) {
311                 printk(KERN_INFO "Using buffer write method\n" );
312                 mtd->_write = cfi_intelext_write_buffers;
313                 mtd->_writev = cfi_intelext_writev;
314         }
315 }
316
317 /*
318  * Some chips power-up with all sectors locked by default.
319  */
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
321 {
322         struct map_info *map = mtd->priv;
323         struct cfi_private *cfi = map->fldrv_priv;
324         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
325
326         if (cfip->FeatureSupport&32) {
327                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328                 mtd->flags |= MTD_POWERUP_LOCK;
329         }
330 }
331
332 static struct cfi_fixup cfi_fixup_table[] = {
333         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
338 #endif
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
341 #endif
342 #if !FORCE_WORD_WRITE
343         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
344 #endif
345         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
350         { 0, 0, NULL }
351 };
352
353 static struct cfi_fixup jedec_fixup_table[] = {
354         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
355         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
356         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
357         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
358         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
359         { 0, 0, NULL }
360 };
361 static struct cfi_fixup fixup_table[] = {
362         /* The CFI vendor ids and the JEDEC vendor IDs appear
363          * to be common.  It is like the devices id's are as
364          * well.  This table is to pick all cases where
365          * we know that is the case.
366          */
367         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
368         { 0, 0, NULL }
369 };
370
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372                                                 struct cfi_pri_intelext *extp)
373 {
374         if (cfi->mfr == CFI_MFR_INTEL &&
375                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
376                 extp->MinorVersion = '1';
377 }
378
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380 {
381         /*
382          * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383          * Erase Supend for their small Erase Blocks(0x8000)
384          */
385         if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386                 return 1;
387         return 0;
388 }
389
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
392 {
393         struct cfi_private *cfi = map->fldrv_priv;
394         struct cfi_pri_intelext *extp;
395         unsigned int extra_size = 0;
396         unsigned int extp_size = sizeof(*extp);
397
398  again:
399         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
400         if (!extp)
401                 return NULL;
402
403         cfi_fixup_major_minor(cfi, extp);
404
405         if (extp->MajorVersion != '1' ||
406             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
408                        "version %c.%c.\n",  extp->MajorVersion,
409                        extp->MinorVersion);
410                 kfree(extp);
411                 return NULL;
412         }
413
414         /* Do some byteswapping if necessary */
415         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
418
419         if (extp->MinorVersion >= '0') {
420                 extra_size = 0;
421
422                 /* Protection Register info */
423                 extra_size += (extp->NumProtectionFields - 1) *
424                               sizeof(struct cfi_intelext_otpinfo);
425         }
426
427         if (extp->MinorVersion >= '1') {
428                 /* Burst Read info */
429                 extra_size += 2;
430                 if (extp_size < sizeof(*extp) + extra_size)
431                         goto need_more;
432                 extra_size += extp->extra[extra_size - 1];
433         }
434
435         if (extp->MinorVersion >= '3') {
436                 int nb_parts, i;
437
438                 /* Number of hardware-partitions */
439                 extra_size += 1;
440                 if (extp_size < sizeof(*extp) + extra_size)
441                         goto need_more;
442                 nb_parts = extp->extra[extra_size - 1];
443
444                 /* skip the sizeof(partregion) field in CFI 1.4 */
445                 if (extp->MinorVersion >= '4')
446                         extra_size += 2;
447
448                 for (i = 0; i < nb_parts; i++) {
449                         struct cfi_intelext_regioninfo *rinfo;
450                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
451                         extra_size += sizeof(*rinfo);
452                         if (extp_size < sizeof(*extp) + extra_size)
453                                 goto need_more;
454                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
455                         extra_size += (rinfo->NumBlockTypes - 1)
456                                       * sizeof(struct cfi_intelext_blockinfo);
457                 }
458
459                 if (extp->MinorVersion >= '4')
460                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
461
462                 if (extp_size < sizeof(*extp) + extra_size) {
463                         need_more:
464                         extp_size = sizeof(*extp) + extra_size;
465                         kfree(extp);
466                         if (extp_size > 4096) {
467                                 printk(KERN_ERR
468                                         "%s: cfi_pri_intelext is too fat\n",
469                                         __func__);
470                                 return NULL;
471                         }
472                         goto again;
473                 }
474         }
475
476         return extp;
477 }
478
479 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
480 {
481         struct cfi_private *cfi = map->fldrv_priv;
482         struct mtd_info *mtd;
483         int i;
484
485         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
486         if (!mtd)
487                 return NULL;
488         mtd->priv = map;
489         mtd->type = MTD_NORFLASH;
490
491         /* Fill in the default mtd operations */
492         mtd->_erase   = cfi_intelext_erase_varsize;
493         mtd->_read    = cfi_intelext_read;
494         mtd->_write   = cfi_intelext_write_words;
495         mtd->_sync    = cfi_intelext_sync;
496         mtd->_lock    = cfi_intelext_lock;
497         mtd->_unlock  = cfi_intelext_unlock;
498         mtd->_is_locked = cfi_intelext_is_locked;
499         mtd->_suspend = cfi_intelext_suspend;
500         mtd->_resume  = cfi_intelext_resume;
501         mtd->flags   = MTD_CAP_NORFLASH;
502         mtd->name    = map->name;
503         mtd->writesize = 1;
504         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
505
506         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
507
508         if (cfi->cfi_mode == CFI_MODE_CFI) {
509                 /*
510                  * It's a real CFI chip, not one for which the probe
511                  * routine faked a CFI structure. So we read the feature
512                  * table from it.
513                  */
514                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
515                 struct cfi_pri_intelext *extp;
516
517                 extp = read_pri_intelext(map, adr);
518                 if (!extp) {
519                         kfree(mtd);
520                         return NULL;
521                 }
522
523                 /* Install our own private info structure */
524                 cfi->cmdset_priv = extp;
525
526                 cfi_fixup(mtd, cfi_fixup_table);
527
528 #ifdef DEBUG_CFI_FEATURES
529                 /* Tell the user about it in lots of lovely detail */
530                 cfi_tell_features(extp);
531 #endif
532
533                 if(extp->SuspendCmdSupport & 1) {
534                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
535                 }
536         }
537         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
538                 /* Apply jedec specific fixups */
539                 cfi_fixup(mtd, jedec_fixup_table);
540         }
541         /* Apply generic fixups */
542         cfi_fixup(mtd, fixup_table);
543
544         for (i=0; i< cfi->numchips; i++) {
545                 if (cfi->cfiq->WordWriteTimeoutTyp)
546                         cfi->chips[i].word_write_time =
547                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
548                 else
549                         cfi->chips[i].word_write_time = 50000;
550
551                 if (cfi->cfiq->BufWriteTimeoutTyp)
552                         cfi->chips[i].buffer_write_time =
553                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
554                 /* No default; if it isn't specified, we won't use it */
555
556                 if (cfi->cfiq->BlockEraseTimeoutTyp)
557                         cfi->chips[i].erase_time =
558                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
559                 else
560                         cfi->chips[i].erase_time = 2000000;
561
562                 if (cfi->cfiq->WordWriteTimeoutTyp &&
563                     cfi->cfiq->WordWriteTimeoutMax)
564                         cfi->chips[i].word_write_time_max =
565                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
566                                     cfi->cfiq->WordWriteTimeoutMax);
567                 else
568                         cfi->chips[i].word_write_time_max = 50000 * 8;
569
570                 if (cfi->cfiq->BufWriteTimeoutTyp &&
571                     cfi->cfiq->BufWriteTimeoutMax)
572                         cfi->chips[i].buffer_write_time_max =
573                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
574                                     cfi->cfiq->BufWriteTimeoutMax);
575
576                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
577                     cfi->cfiq->BlockEraseTimeoutMax)
578                         cfi->chips[i].erase_time_max =
579                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
580                                        cfi->cfiq->BlockEraseTimeoutMax);
581                 else
582                         cfi->chips[i].erase_time_max = 2000000 * 8;
583
584                 cfi->chips[i].ref_point_counter = 0;
585                 init_waitqueue_head(&(cfi->chips[i].wq));
586         }
587
588         map->fldrv = &cfi_intelext_chipdrv;
589
590         return cfi_intelext_setup(mtd);
591 }
592 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
593 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
594 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
595 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
597
598 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
599 {
600         struct map_info *map = mtd->priv;
601         struct cfi_private *cfi = map->fldrv_priv;
602         unsigned long offset = 0;
603         int i,j;
604         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
605
606         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
607
608         mtd->size = devsize * cfi->numchips;
609
610         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
611         mtd->eraseregions = kcalloc(mtd->numeraseregions,
612                                     sizeof(struct mtd_erase_region_info),
613                                     GFP_KERNEL);
614         if (!mtd->eraseregions)
615                 goto setup_err;
616
617         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
618                 unsigned long ernum, ersize;
619                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
620                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
621
622                 if (mtd->erasesize < ersize) {
623                         mtd->erasesize = ersize;
624                 }
625                 for (j=0; j<cfi->numchips; j++) {
626                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
627                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
628                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
629                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
630                         if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
631                                 goto setup_err;
632                 }
633                 offset += (ersize * ernum);
634         }
635
636         if (offset != devsize) {
637                 /* Argh */
638                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
639                 goto setup_err;
640         }
641
642         for (i=0; i<mtd->numeraseregions;i++){
643                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
644                        i,(unsigned long long)mtd->eraseregions[i].offset,
645                        mtd->eraseregions[i].erasesize,
646                        mtd->eraseregions[i].numblocks);
647         }
648
649 #ifdef CONFIG_MTD_OTP
650         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
651         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
652         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
653         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
654         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
655         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
656 #endif
657
658         /* This function has the potential to distort the reality
659            a bit and therefore should be called last. */
660         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
661                 goto setup_err;
662
663         __module_get(THIS_MODULE);
664         register_reboot_notifier(&mtd->reboot_notifier);
665         return mtd;
666
667  setup_err:
668         if (mtd->eraseregions)
669                 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
670                         for (j=0; j<cfi->numchips; j++)
671                                 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
672         kfree(mtd->eraseregions);
673         kfree(mtd);
674         kfree(cfi->cmdset_priv);
675         return NULL;
676 }
677
678 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
679                                         struct cfi_private **pcfi)
680 {
681         struct map_info *map = mtd->priv;
682         struct cfi_private *cfi = *pcfi;
683         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
684
685         /*
686          * Probing of multi-partition flash chips.
687          *
688          * To support multiple partitions when available, we simply arrange
689          * for each of them to have their own flchip structure even if they
690          * are on the same physical chip.  This means completely recreating
691          * a new cfi_private structure right here which is a blatent code
692          * layering violation, but this is still the least intrusive
693          * arrangement at this point. This can be rearranged in the future
694          * if someone feels motivated enough.  --nico
695          */
696         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
697             && extp->FeatureSupport & (1 << 9)) {
698                 struct cfi_private *newcfi;
699                 struct flchip *chip;
700                 struct flchip_shared *shared;
701                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
702
703                 /* Protection Register info */
704                 offs = (extp->NumProtectionFields - 1) *
705                        sizeof(struct cfi_intelext_otpinfo);
706
707                 /* Burst Read info */
708                 offs += extp->extra[offs+1]+2;
709
710                 /* Number of partition regions */
711                 numregions = extp->extra[offs];
712                 offs += 1;
713
714                 /* skip the sizeof(partregion) field in CFI 1.4 */
715                 if (extp->MinorVersion >= '4')
716                         offs += 2;
717
718                 /* Number of hardware partitions */
719                 numparts = 0;
720                 for (i = 0; i < numregions; i++) {
721                         struct cfi_intelext_regioninfo *rinfo;
722                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
723                         numparts += rinfo->NumIdentPartitions;
724                         offs += sizeof(*rinfo)
725                                 + (rinfo->NumBlockTypes - 1) *
726                                   sizeof(struct cfi_intelext_blockinfo);
727                 }
728
729                 if (!numparts)
730                         numparts = 1;
731
732                 /* Programming Region info */
733                 if (extp->MinorVersion >= '4') {
734                         struct cfi_intelext_programming_regioninfo *prinfo;
735                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
736                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
737                         mtd->flags &= ~MTD_BIT_WRITEABLE;
738                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
739                                map->name, mtd->writesize,
740                                cfi->interleave * prinfo->ControlValid,
741                                cfi->interleave * prinfo->ControlInvalid);
742                 }
743
744                 /*
745                  * All functions below currently rely on all chips having
746                  * the same geometry so we'll just assume that all hardware
747                  * partitions are of the same size too.
748                  */
749                 partshift = cfi->chipshift - __ffs(numparts);
750
751                 if ((1 << partshift) < mtd->erasesize) {
752                         printk( KERN_ERR
753                                 "%s: bad number of hw partitions (%d)\n",
754                                 __func__, numparts);
755                         return -EINVAL;
756                 }
757
758                 numvirtchips = cfi->numchips * numparts;
759                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
760                 if (!newcfi)
761                         return -ENOMEM;
762                 shared = kmalloc_array(cfi->numchips,
763                                        sizeof(struct flchip_shared),
764                                        GFP_KERNEL);
765                 if (!shared) {
766                         kfree(newcfi);
767                         return -ENOMEM;
768                 }
769                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
770                 newcfi->numchips = numvirtchips;
771                 newcfi->chipshift = partshift;
772
773                 chip = &newcfi->chips[0];
774                 for (i = 0; i < cfi->numchips; i++) {
775                         shared[i].writing = shared[i].erasing = NULL;
776                         mutex_init(&shared[i].lock);
777                         for (j = 0; j < numparts; j++) {
778                                 *chip = cfi->chips[i];
779                                 chip->start += j << partshift;
780                                 chip->priv = &shared[i];
781                                 /* those should be reset too since
782                                    they create memory references. */
783                                 init_waitqueue_head(&chip->wq);
784                                 mutex_init(&chip->mutex);
785                                 chip++;
786                         }
787                 }
788
789                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
790                                   "--> %d partitions of %d KiB\n",
791                                   map->name, cfi->numchips, cfi->interleave,
792                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
793
794                 map->fldrv_priv = newcfi;
795                 *pcfi = newcfi;
796                 kfree(cfi);
797         }
798
799         return 0;
800 }
801
802 /*
803  *  *********** CHIP ACCESS FUNCTIONS ***********
804  */
805 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
806 {
807         DECLARE_WAITQUEUE(wait, current);
808         struct cfi_private *cfi = map->fldrv_priv;
809         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
810         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
811         unsigned long timeo = jiffies + HZ;
812
813         /* Prevent setting state FL_SYNCING for chip in suspended state. */
814         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
815                 goto sleep;
816
817         switch (chip->state) {
818
819         case FL_STATUS:
820                 for (;;) {
821                         status = map_read(map, adr);
822                         if (map_word_andequal(map, status, status_OK, status_OK))
823                                 break;
824
825                         /* At this point we're fine with write operations
826                            in other partitions as they don't conflict. */
827                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
828                                 break;
829
830                         mutex_unlock(&chip->mutex);
831                         cfi_udelay(1);
832                         mutex_lock(&chip->mutex);
833                         /* Someone else might have been playing with it. */
834                         return -EAGAIN;
835                 }
836                 /* Fall through */
837         case FL_READY:
838         case FL_CFI_QUERY:
839         case FL_JEDEC_QUERY:
840                 return 0;
841
842         case FL_ERASING:
843                 if (!cfip ||
844                     !(cfip->FeatureSupport & 2) ||
845                     !(mode == FL_READY || mode == FL_POINT ||
846                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
847                         goto sleep;
848
849                 /* Do not allow suspend iff read/write to EB address */
850                 if ((adr & chip->in_progress_block_mask) ==
851                     chip->in_progress_block_addr)
852                         goto sleep;
853
854                 /* do not suspend small EBs, buggy Micron Chips */
855                 if (cfi_is_micron_28F00AP30(cfi, chip) &&
856                     (chip->in_progress_block_mask == ~(0x8000-1)))
857                         goto sleep;
858
859                 /* Erase suspend */
860                 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
861
862                 /* If the flash has finished erasing, then 'erase suspend'
863                  * appears to make some (28F320) flash devices switch to
864                  * 'read' mode.  Make sure that we switch to 'read status'
865                  * mode so we get the right data. --rmk
866                  */
867                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
868                 chip->oldstate = FL_ERASING;
869                 chip->state = FL_ERASE_SUSPENDING;
870                 chip->erase_suspended = 1;
871                 for (;;) {
872                         status = map_read(map, chip->in_progress_block_addr);
873                         if (map_word_andequal(map, status, status_OK, status_OK))
874                                 break;
875
876                         if (time_after(jiffies, timeo)) {
877                                 /* Urgh. Resume and pretend we weren't here.
878                                  * Make sure we're in 'read status' mode if it had finished */
879                                 put_chip(map, chip, adr);
880                                 printk(KERN_ERR "%s: Chip not ready after erase "
881                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
882                                 return -EIO;
883                         }
884
885                         mutex_unlock(&chip->mutex);
886                         cfi_udelay(1);
887                         mutex_lock(&chip->mutex);
888                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
889                            So we can just loop here. */
890                 }
891                 chip->state = FL_STATUS;
892                 return 0;
893
894         case FL_XIP_WHILE_ERASING:
895                 if (mode != FL_READY && mode != FL_POINT &&
896                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
897                         goto sleep;
898                 chip->oldstate = chip->state;
899                 chip->state = FL_READY;
900                 return 0;
901
902         case FL_SHUTDOWN:
903                 /* The machine is rebooting now,so no one can get chip anymore */
904                 return -EIO;
905         case FL_POINT:
906                 /* Only if there's no operation suspended... */
907                 if (mode == FL_READY && chip->oldstate == FL_READY)
908                         return 0;
909                 /* Fall through */
910         default:
911         sleep:
912                 set_current_state(TASK_UNINTERRUPTIBLE);
913                 add_wait_queue(&chip->wq, &wait);
914                 mutex_unlock(&chip->mutex);
915                 schedule();
916                 remove_wait_queue(&chip->wq, &wait);
917                 mutex_lock(&chip->mutex);
918                 return -EAGAIN;
919         }
920 }
921
922 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
923 {
924         int ret;
925         DECLARE_WAITQUEUE(wait, current);
926
927  retry:
928         if (chip->priv &&
929             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
930             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
931                 /*
932                  * OK. We have possibility for contention on the write/erase
933                  * operations which are global to the real chip and not per
934                  * partition.  So let's fight it over in the partition which
935                  * currently has authority on the operation.
936                  *
937                  * The rules are as follows:
938                  *
939                  * - any write operation must own shared->writing.
940                  *
941                  * - any erase operation must own _both_ shared->writing and
942                  *   shared->erasing.
943                  *
944                  * - contention arbitration is handled in the owner's context.
945                  *
946                  * The 'shared' struct can be read and/or written only when
947                  * its lock is taken.
948                  */
949                 struct flchip_shared *shared = chip->priv;
950                 struct flchip *contender;
951                 mutex_lock(&shared->lock);
952                 contender = shared->writing;
953                 if (contender && contender != chip) {
954                         /*
955                          * The engine to perform desired operation on this
956                          * partition is already in use by someone else.
957                          * Let's fight over it in the context of the chip
958                          * currently using it.  If it is possible to suspend,
959                          * that other partition will do just that, otherwise
960                          * it'll happily send us to sleep.  In any case, when
961                          * get_chip returns success we're clear to go ahead.
962                          */
963                         ret = mutex_trylock(&contender->mutex);
964                         mutex_unlock(&shared->lock);
965                         if (!ret)
966                                 goto retry;
967                         mutex_unlock(&chip->mutex);
968                         ret = chip_ready(map, contender, contender->start, mode);
969                         mutex_lock(&chip->mutex);
970
971                         if (ret == -EAGAIN) {
972                                 mutex_unlock(&contender->mutex);
973                                 goto retry;
974                         }
975                         if (ret) {
976                                 mutex_unlock(&contender->mutex);
977                                 return ret;
978                         }
979                         mutex_lock(&shared->lock);
980
981                         /* We should not own chip if it is already
982                          * in FL_SYNCING state. Put contender and retry. */
983                         if (chip->state == FL_SYNCING) {
984                                 put_chip(map, contender, contender->start);
985                                 mutex_unlock(&contender->mutex);
986                                 goto retry;
987                         }
988                         mutex_unlock(&contender->mutex);
989                 }
990
991                 /* Check if we already have suspended erase
992                  * on this chip. Sleep. */
993                 if (mode == FL_ERASING && shared->erasing
994                     && shared->erasing->oldstate == FL_ERASING) {
995                         mutex_unlock(&shared->lock);
996                         set_current_state(TASK_UNINTERRUPTIBLE);
997                         add_wait_queue(&chip->wq, &wait);
998                         mutex_unlock(&chip->mutex);
999                         schedule();
1000                         remove_wait_queue(&chip->wq, &wait);
1001                         mutex_lock(&chip->mutex);
1002                         goto retry;
1003                 }
1004
1005                 /* We now own it */
1006                 shared->writing = chip;
1007                 if (mode == FL_ERASING)
1008                         shared->erasing = chip;
1009                 mutex_unlock(&shared->lock);
1010         }
1011         ret = chip_ready(map, chip, adr, mode);
1012         if (ret == -EAGAIN)
1013                 goto retry;
1014
1015         return ret;
1016 }
1017
1018 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1019 {
1020         struct cfi_private *cfi = map->fldrv_priv;
1021
1022         if (chip->priv) {
1023                 struct flchip_shared *shared = chip->priv;
1024                 mutex_lock(&shared->lock);
1025                 if (shared->writing == chip && chip->oldstate == FL_READY) {
1026                         /* We own the ability to write, but we're done */
1027                         shared->writing = shared->erasing;
1028                         if (shared->writing && shared->writing != chip) {
1029                                 /* give back ownership to who we loaned it from */
1030                                 struct flchip *loaner = shared->writing;
1031                                 mutex_lock(&loaner->mutex);
1032                                 mutex_unlock(&shared->lock);
1033                                 mutex_unlock(&chip->mutex);
1034                                 put_chip(map, loaner, loaner->start);
1035                                 mutex_lock(&chip->mutex);
1036                                 mutex_unlock(&loaner->mutex);
1037                                 wake_up(&chip->wq);
1038                                 return;
1039                         }
1040                         shared->erasing = NULL;
1041                         shared->writing = NULL;
1042                 } else if (shared->erasing == chip && shared->writing != chip) {
1043                         /*
1044                          * We own the ability to erase without the ability
1045                          * to write, which means the erase was suspended
1046                          * and some other partition is currently writing.
1047                          * Don't let the switch below mess things up since
1048                          * we don't have ownership to resume anything.
1049                          */
1050                         mutex_unlock(&shared->lock);
1051                         wake_up(&chip->wq);
1052                         return;
1053                 }
1054                 mutex_unlock(&shared->lock);
1055         }
1056
1057         switch(chip->oldstate) {
1058         case FL_ERASING:
1059                 /* What if one interleaved chip has finished and the
1060                    other hasn't? The old code would leave the finished
1061                    one in READY mode. That's bad, and caused -EROFS
1062                    errors to be returned from do_erase_oneblock because
1063                    that's the only bit it checked for at the time.
1064                    As the state machine appears to explicitly allow
1065                    sending the 0x70 (Read Status) command to an erasing
1066                    chip and expecting it to be ignored, that's what we
1067                    do. */
1068                 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1069                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1070                 chip->oldstate = FL_READY;
1071                 chip->state = FL_ERASING;
1072                 break;
1073
1074         case FL_XIP_WHILE_ERASING:
1075                 chip->state = chip->oldstate;
1076                 chip->oldstate = FL_READY;
1077                 break;
1078
1079         case FL_READY:
1080         case FL_STATUS:
1081         case FL_JEDEC_QUERY:
1082                 break;
1083         default:
1084                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1085         }
1086         wake_up(&chip->wq);
1087 }
1088
1089 #ifdef CONFIG_MTD_XIP
1090
1091 /*
1092  * No interrupt what so ever can be serviced while the flash isn't in array
1093  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1094  * enclosing any code path where the flash is known not to be in array mode.
1095  * And within a XIP disabled code path, only functions marked with __xipram
1096  * may be called and nothing else (it's a good thing to inspect generated
1097  * assembly to make sure inline functions were actually inlined and that gcc
1098  * didn't emit calls to its own support functions). Also configuring MTD CFI
1099  * support to a single buswidth and a single interleave is also recommended.
1100  */
1101
1102 static void xip_disable(struct map_info *map, struct flchip *chip,
1103                         unsigned long adr)
1104 {
1105         /* TODO: chips with no XIP use should ignore and return */
1106         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1107         local_irq_disable();
1108 }
1109
1110 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1111                                 unsigned long adr)
1112 {
1113         struct cfi_private *cfi = map->fldrv_priv;
1114         if (chip->state != FL_POINT && chip->state != FL_READY) {
1115                 map_write(map, CMD(0xff), adr);
1116                 chip->state = FL_READY;
1117         }
1118         (void) map_read(map, adr);
1119         xip_iprefetch();
1120         local_irq_enable();
1121 }
1122
1123 /*
1124  * When a delay is required for the flash operation to complete, the
1125  * xip_wait_for_operation() function is polling for both the given timeout
1126  * and pending (but still masked) hardware interrupts.  Whenever there is an
1127  * interrupt pending then the flash erase or write operation is suspended,
1128  * array mode restored and interrupts unmasked.  Task scheduling might also
1129  * happen at that point.  The CPU eventually returns from the interrupt or
1130  * the call to schedule() and the suspended flash operation is resumed for
1131  * the remaining of the delay period.
1132  *
1133  * Warning: this function _will_ fool interrupt latency tracing tools.
1134  */
1135
1136 static int __xipram xip_wait_for_operation(
1137                 struct map_info *map, struct flchip *chip,
1138                 unsigned long adr, unsigned int chip_op_time_max)
1139 {
1140         struct cfi_private *cfi = map->fldrv_priv;
1141         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1142         map_word status, OK = CMD(0x80);
1143         unsigned long usec, suspended, start, done;
1144         flstate_t oldstate, newstate;
1145
1146         start = xip_currtime();
1147         usec = chip_op_time_max;
1148         if (usec == 0)
1149                 usec = 500000;
1150         done = 0;
1151
1152         do {
1153                 cpu_relax();
1154                 if (xip_irqpending() && cfip &&
1155                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1156                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1157                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1158                         /*
1159                          * Let's suspend the erase or write operation when
1160                          * supported.  Note that we currently don't try to
1161                          * suspend interleaved chips if there is already
1162                          * another operation suspended (imagine what happens
1163                          * when one chip was already done with the current
1164                          * operation while another chip suspended it, then
1165                          * we resume the whole thing at once).  Yes, it
1166                          * can happen!
1167                          */
1168                         usec -= done;
1169                         map_write(map, CMD(0xb0), adr);
1170                         map_write(map, CMD(0x70), adr);
1171                         suspended = xip_currtime();
1172                         do {
1173                                 if (xip_elapsed_since(suspended) > 100000) {
1174                                         /*
1175                                          * The chip doesn't want to suspend
1176                                          * after waiting for 100 msecs.
1177                                          * This is a critical error but there
1178                                          * is not much we can do here.
1179                                          */
1180                                         return -EIO;
1181                                 }
1182                                 status = map_read(map, adr);
1183                         } while (!map_word_andequal(map, status, OK, OK));
1184
1185                         /* Suspend succeeded */
1186                         oldstate = chip->state;
1187                         if (oldstate == FL_ERASING) {
1188                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1189                                         break;
1190                                 newstate = FL_XIP_WHILE_ERASING;
1191                                 chip->erase_suspended = 1;
1192                         } else {
1193                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1194                                         break;
1195                                 newstate = FL_XIP_WHILE_WRITING;
1196                                 chip->write_suspended = 1;
1197                         }
1198                         chip->state = newstate;
1199                         map_write(map, CMD(0xff), adr);
1200                         (void) map_read(map, adr);
1201                         xip_iprefetch();
1202                         local_irq_enable();
1203                         mutex_unlock(&chip->mutex);
1204                         xip_iprefetch();
1205                         cond_resched();
1206
1207                         /*
1208                          * We're back.  However someone else might have
1209                          * decided to go write to the chip if we are in
1210                          * a suspended erase state.  If so let's wait
1211                          * until it's done.
1212                          */
1213                         mutex_lock(&chip->mutex);
1214                         while (chip->state != newstate) {
1215                                 DECLARE_WAITQUEUE(wait, current);
1216                                 set_current_state(TASK_UNINTERRUPTIBLE);
1217                                 add_wait_queue(&chip->wq, &wait);
1218                                 mutex_unlock(&chip->mutex);
1219                                 schedule();
1220                                 remove_wait_queue(&chip->wq, &wait);
1221                                 mutex_lock(&chip->mutex);
1222                         }
1223                         /* Disallow XIP again */
1224                         local_irq_disable();
1225
1226                         /* Resume the write or erase operation */
1227                         map_write(map, CMD(0xd0), adr);
1228                         map_write(map, CMD(0x70), adr);
1229                         chip->state = oldstate;
1230                         start = xip_currtime();
1231                 } else if (usec >= 1000000/HZ) {
1232                         /*
1233                          * Try to save on CPU power when waiting delay
1234                          * is at least a system timer tick period.
1235                          * No need to be extremely accurate here.
1236                          */
1237                         xip_cpu_idle();
1238                 }
1239                 status = map_read(map, adr);
1240                 done = xip_elapsed_since(start);
1241         } while (!map_word_andequal(map, status, OK, OK)
1242                  && done < usec);
1243
1244         return (done >= usec) ? -ETIME : 0;
1245 }
1246
1247 /*
1248  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1249  * the flash is actively programming or erasing since we have to poll for
1250  * the operation to complete anyway.  We can't do that in a generic way with
1251  * a XIP setup so do it before the actual flash operation in this case
1252  * and stub it out from INVAL_CACHE_AND_WAIT.
1253  */
1254 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1255         INVALIDATE_CACHED_RANGE(map, from, size)
1256
1257 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1258         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1259
1260 #else
1261
1262 #define xip_disable(map, chip, adr)
1263 #define xip_enable(map, chip, adr)
1264 #define XIP_INVAL_CACHED_RANGE(x...)
1265 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1266
1267 static int inval_cache_and_wait_for_operation(
1268                 struct map_info *map, struct flchip *chip,
1269                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1270                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1271 {
1272         struct cfi_private *cfi = map->fldrv_priv;
1273         map_word status, status_OK = CMD(0x80);
1274         int chip_state = chip->state;
1275         unsigned int timeo, sleep_time, reset_timeo;
1276
1277         mutex_unlock(&chip->mutex);
1278         if (inval_len)
1279                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1280         mutex_lock(&chip->mutex);
1281
1282         timeo = chip_op_time_max;
1283         if (!timeo)
1284                 timeo = 500000;
1285         reset_timeo = timeo;
1286         sleep_time = chip_op_time / 2;
1287
1288         for (;;) {
1289                 if (chip->state != chip_state) {
1290                         /* Someone's suspended the operation: sleep */
1291                         DECLARE_WAITQUEUE(wait, current);
1292                         set_current_state(TASK_UNINTERRUPTIBLE);
1293                         add_wait_queue(&chip->wq, &wait);
1294                         mutex_unlock(&chip->mutex);
1295                         schedule();
1296                         remove_wait_queue(&chip->wq, &wait);
1297                         mutex_lock(&chip->mutex);
1298                         continue;
1299                 }
1300
1301                 status = map_read(map, cmd_adr);
1302                 if (map_word_andequal(map, status, status_OK, status_OK))
1303                         break;
1304
1305                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1306                         /* Erase suspend occurred while sleep: reset timeout */
1307                         timeo = reset_timeo;
1308                         chip->erase_suspended = 0;
1309                 }
1310                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1311                         /* Write suspend occurred while sleep: reset timeout */
1312                         timeo = reset_timeo;
1313                         chip->write_suspended = 0;
1314                 }
1315                 if (!timeo) {
1316                         map_write(map, CMD(0x70), cmd_adr);
1317                         chip->state = FL_STATUS;
1318                         return -ETIME;
1319                 }
1320
1321                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1322                 mutex_unlock(&chip->mutex);
1323                 if (sleep_time >= 1000000/HZ) {
1324                         /*
1325                          * Half of the normal delay still remaining
1326                          * can be performed with a sleeping delay instead
1327                          * of busy waiting.
1328                          */
1329                         msleep(sleep_time/1000);
1330                         timeo -= sleep_time;
1331                         sleep_time = 1000000/HZ;
1332                 } else {
1333                         udelay(1);
1334                         cond_resched();
1335                         timeo--;
1336                 }
1337                 mutex_lock(&chip->mutex);
1338         }
1339
1340         /* Done and happy. */
1341         chip->state = FL_STATUS;
1342         return 0;
1343 }
1344
1345 #endif
1346
1347 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1348         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1349
1350
1351 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1352 {
1353         unsigned long cmd_addr;
1354         struct cfi_private *cfi = map->fldrv_priv;
1355         int ret = 0;
1356
1357         adr += chip->start;
1358
1359         /* Ensure cmd read/writes are aligned. */
1360         cmd_addr = adr & ~(map_bankwidth(map)-1);
1361
1362         mutex_lock(&chip->mutex);
1363
1364         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1365
1366         if (!ret) {
1367                 if (chip->state != FL_POINT && chip->state != FL_READY)
1368                         map_write(map, CMD(0xff), cmd_addr);
1369
1370                 chip->state = FL_POINT;
1371                 chip->ref_point_counter++;
1372         }
1373         mutex_unlock(&chip->mutex);
1374
1375         return ret;
1376 }
1377
1378 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1379                 size_t *retlen, void **virt, resource_size_t *phys)
1380 {
1381         struct map_info *map = mtd->priv;
1382         struct cfi_private *cfi = map->fldrv_priv;
1383         unsigned long ofs, last_end = 0;
1384         int chipnum;
1385         int ret = 0;
1386
1387         if (!map->virt)
1388                 return -EINVAL;
1389
1390         /* Now lock the chip(s) to POINT state */
1391
1392         /* ofs: offset within the first chip that the first read should start */
1393         chipnum = (from >> cfi->chipshift);
1394         ofs = from - (chipnum << cfi->chipshift);
1395
1396         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1397         if (phys)
1398                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1399
1400         while (len) {
1401                 unsigned long thislen;
1402
1403                 if (chipnum >= cfi->numchips)
1404                         break;
1405
1406                 /* We cannot point across chips that are virtually disjoint */
1407                 if (!last_end)
1408                         last_end = cfi->chips[chipnum].start;
1409                 else if (cfi->chips[chipnum].start != last_end)
1410                         break;
1411
1412                 if ((len + ofs -1) >> cfi->chipshift)
1413                         thislen = (1<<cfi->chipshift) - ofs;
1414                 else
1415                         thislen = len;
1416
1417                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1418                 if (ret)
1419                         break;
1420
1421                 *retlen += thislen;
1422                 len -= thislen;
1423
1424                 ofs = 0;
1425                 last_end += 1 << cfi->chipshift;
1426                 chipnum++;
1427         }
1428         return 0;
1429 }
1430
1431 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1432 {
1433         struct map_info *map = mtd->priv;
1434         struct cfi_private *cfi = map->fldrv_priv;
1435         unsigned long ofs;
1436         int chipnum, err = 0;
1437
1438         /* Now unlock the chip(s) POINT state */
1439
1440         /* ofs: offset within the first chip that the first read should start */
1441         chipnum = (from >> cfi->chipshift);
1442         ofs = from - (chipnum <<  cfi->chipshift);
1443
1444         while (len && !err) {
1445                 unsigned long thislen;
1446                 struct flchip *chip;
1447
1448                 chip = &cfi->chips[chipnum];
1449                 if (chipnum >= cfi->numchips)
1450                         break;
1451
1452                 if ((len + ofs -1) >> cfi->chipshift)
1453                         thislen = (1<<cfi->chipshift) - ofs;
1454                 else
1455                         thislen = len;
1456
1457                 mutex_lock(&chip->mutex);
1458                 if (chip->state == FL_POINT) {
1459                         chip->ref_point_counter--;
1460                         if(chip->ref_point_counter == 0)
1461                                 chip->state = FL_READY;
1462                 } else {
1463                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1464                         err = -EINVAL;
1465                 }
1466
1467                 put_chip(map, chip, chip->start);
1468                 mutex_unlock(&chip->mutex);
1469
1470                 len -= thislen;
1471                 ofs = 0;
1472                 chipnum++;
1473         }
1474
1475         return err;
1476 }
1477
1478 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1479 {
1480         unsigned long cmd_addr;
1481         struct cfi_private *cfi = map->fldrv_priv;
1482         int ret;
1483
1484         adr += chip->start;
1485
1486         /* Ensure cmd read/writes are aligned. */
1487         cmd_addr = adr & ~(map_bankwidth(map)-1);
1488
1489         mutex_lock(&chip->mutex);
1490         ret = get_chip(map, chip, cmd_addr, FL_READY);
1491         if (ret) {
1492                 mutex_unlock(&chip->mutex);
1493                 return ret;
1494         }
1495
1496         if (chip->state != FL_POINT && chip->state != FL_READY) {
1497                 map_write(map, CMD(0xff), cmd_addr);
1498
1499                 chip->state = FL_READY;
1500         }
1501
1502         map_copy_from(map, buf, adr, len);
1503
1504         put_chip(map, chip, cmd_addr);
1505
1506         mutex_unlock(&chip->mutex);
1507         return 0;
1508 }
1509
1510 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1511 {
1512         struct map_info *map = mtd->priv;
1513         struct cfi_private *cfi = map->fldrv_priv;
1514         unsigned long ofs;
1515         int chipnum;
1516         int ret = 0;
1517
1518         /* ofs: offset within the first chip that the first read should start */
1519         chipnum = (from >> cfi->chipshift);
1520         ofs = from - (chipnum <<  cfi->chipshift);
1521
1522         while (len) {
1523                 unsigned long thislen;
1524
1525                 if (chipnum >= cfi->numchips)
1526                         break;
1527
1528                 if ((len + ofs -1) >> cfi->chipshift)
1529                         thislen = (1<<cfi->chipshift) - ofs;
1530                 else
1531                         thislen = len;
1532
1533                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1534                 if (ret)
1535                         break;
1536
1537                 *retlen += thislen;
1538                 len -= thislen;
1539                 buf += thislen;
1540
1541                 ofs = 0;
1542                 chipnum++;
1543         }
1544         return ret;
1545 }
1546
1547 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1548                                      unsigned long adr, map_word datum, int mode)
1549 {
1550         struct cfi_private *cfi = map->fldrv_priv;
1551         map_word status, write_cmd;
1552         int ret=0;
1553
1554         adr += chip->start;
1555
1556         switch (mode) {
1557         case FL_WRITING:
1558                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1559                 break;
1560         case FL_OTP_WRITE:
1561                 write_cmd = CMD(0xc0);
1562                 break;
1563         default:
1564                 return -EINVAL;
1565         }
1566
1567         mutex_lock(&chip->mutex);
1568         ret = get_chip(map, chip, adr, mode);
1569         if (ret) {
1570                 mutex_unlock(&chip->mutex);
1571                 return ret;
1572         }
1573
1574         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1575         ENABLE_VPP(map);
1576         xip_disable(map, chip, adr);
1577         map_write(map, write_cmd, adr);
1578         map_write(map, datum, adr);
1579         chip->state = mode;
1580
1581         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1582                                    adr, map_bankwidth(map),
1583                                    chip->word_write_time,
1584                                    chip->word_write_time_max);
1585         if (ret) {
1586                 xip_enable(map, chip, adr);
1587                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1588                 goto out;
1589         }
1590
1591         /* check for errors */
1592         status = map_read(map, adr);
1593         if (map_word_bitsset(map, status, CMD(0x1a))) {
1594                 unsigned long chipstatus = MERGESTATUS(status);
1595
1596                 /* reset status */
1597                 map_write(map, CMD(0x50), adr);
1598                 map_write(map, CMD(0x70), adr);
1599                 xip_enable(map, chip, adr);
1600
1601                 if (chipstatus & 0x02) {
1602                         ret = -EROFS;
1603                 } else if (chipstatus & 0x08) {
1604                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1605                         ret = -EIO;
1606                 } else {
1607                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1608                         ret = -EINVAL;
1609                 }
1610
1611                 goto out;
1612         }
1613
1614         xip_enable(map, chip, adr);
1615  out:   DISABLE_VPP(map);
1616         put_chip(map, chip, adr);
1617         mutex_unlock(&chip->mutex);
1618         return ret;
1619 }
1620
1621
1622 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1623 {
1624         struct map_info *map = mtd->priv;
1625         struct cfi_private *cfi = map->fldrv_priv;
1626         int ret = 0;
1627         int chipnum;
1628         unsigned long ofs;
1629
1630         chipnum = to >> cfi->chipshift;
1631         ofs = to  - (chipnum << cfi->chipshift);
1632
1633         /* If it's not bus-aligned, do the first byte write */
1634         if (ofs & (map_bankwidth(map)-1)) {
1635                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1636                 int gap = ofs - bus_ofs;
1637                 int n;
1638                 map_word datum;
1639
1640                 n = min_t(int, len, map_bankwidth(map)-gap);
1641                 datum = map_word_ff(map);
1642                 datum = map_word_load_partial(map, datum, buf, gap, n);
1643
1644                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1645                                                bus_ofs, datum, FL_WRITING);
1646                 if (ret)
1647                         return ret;
1648
1649                 len -= n;
1650                 ofs += n;
1651                 buf += n;
1652                 (*retlen) += n;
1653
1654                 if (ofs >> cfi->chipshift) {
1655                         chipnum ++;
1656                         ofs = 0;
1657                         if (chipnum == cfi->numchips)
1658                                 return 0;
1659                 }
1660         }
1661
1662         while(len >= map_bankwidth(map)) {
1663                 map_word datum = map_word_load(map, buf);
1664
1665                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1666                                        ofs, datum, FL_WRITING);
1667                 if (ret)
1668                         return ret;
1669
1670                 ofs += map_bankwidth(map);
1671                 buf += map_bankwidth(map);
1672                 (*retlen) += map_bankwidth(map);
1673                 len -= map_bankwidth(map);
1674
1675                 if (ofs >> cfi->chipshift) {
1676                         chipnum ++;
1677                         ofs = 0;
1678                         if (chipnum == cfi->numchips)
1679                                 return 0;
1680                 }
1681         }
1682
1683         if (len & (map_bankwidth(map)-1)) {
1684                 map_word datum;
1685
1686                 datum = map_word_ff(map);
1687                 datum = map_word_load_partial(map, datum, buf, 0, len);
1688
1689                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1690                                        ofs, datum, FL_WRITING);
1691                 if (ret)
1692                         return ret;
1693
1694                 (*retlen) += len;
1695         }
1696
1697         return 0;
1698 }
1699
1700
1701 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1702                                     unsigned long adr, const struct kvec **pvec,
1703                                     unsigned long *pvec_seek, int len)
1704 {
1705         struct cfi_private *cfi = map->fldrv_priv;
1706         map_word status, write_cmd, datum;
1707         unsigned long cmd_adr;
1708         int ret, wbufsize, word_gap, words;
1709         const struct kvec *vec;
1710         unsigned long vec_seek;
1711         unsigned long initial_adr;
1712         int initial_len = len;
1713
1714         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1715         adr += chip->start;
1716         initial_adr = adr;
1717         cmd_adr = adr & ~(wbufsize-1);
1718
1719         /* Sharp LH28F640BF chips need the first address for the
1720          * Page Buffer Program command. See Table 5 of
1721          * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1722         if (is_LH28F640BF(cfi))
1723                 cmd_adr = adr;
1724
1725         /* Let's determine this according to the interleave only once */
1726         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1727
1728         mutex_lock(&chip->mutex);
1729         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1730         if (ret) {
1731                 mutex_unlock(&chip->mutex);
1732                 return ret;
1733         }
1734
1735         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1736         ENABLE_VPP(map);
1737         xip_disable(map, chip, cmd_adr);
1738
1739         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1740            [...], the device will not accept any more Write to Buffer commands".
1741            So we must check here and reset those bits if they're set. Otherwise
1742            we're just pissing in the wind */
1743         if (chip->state != FL_STATUS) {
1744                 map_write(map, CMD(0x70), cmd_adr);
1745                 chip->state = FL_STATUS;
1746         }
1747         status = map_read(map, cmd_adr);
1748         if (map_word_bitsset(map, status, CMD(0x30))) {
1749                 xip_enable(map, chip, cmd_adr);
1750                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1751                 xip_disable(map, chip, cmd_adr);
1752                 map_write(map, CMD(0x50), cmd_adr);
1753                 map_write(map, CMD(0x70), cmd_adr);
1754         }
1755
1756         chip->state = FL_WRITING_TO_BUFFER;
1757         map_write(map, write_cmd, cmd_adr);
1758         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1759         if (ret) {
1760                 /* Argh. Not ready for write to buffer */
1761                 map_word Xstatus = map_read(map, cmd_adr);
1762                 map_write(map, CMD(0x70), cmd_adr);
1763                 chip->state = FL_STATUS;
1764                 status = map_read(map, cmd_adr);
1765                 map_write(map, CMD(0x50), cmd_adr);
1766                 map_write(map, CMD(0x70), cmd_adr);
1767                 xip_enable(map, chip, cmd_adr);
1768                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1769                                 map->name, Xstatus.x[0], status.x[0]);
1770                 goto out;
1771         }
1772
1773         /* Figure out the number of words to write */
1774         word_gap = (-adr & (map_bankwidth(map)-1));
1775         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1776         if (!word_gap) {
1777                 words--;
1778         } else {
1779                 word_gap = map_bankwidth(map) - word_gap;
1780                 adr -= word_gap;
1781                 datum = map_word_ff(map);
1782         }
1783
1784         /* Write length of data to come */
1785         map_write(map, CMD(words), cmd_adr );
1786
1787         /* Write data */
1788         vec = *pvec;
1789         vec_seek = *pvec_seek;
1790         do {
1791                 int n = map_bankwidth(map) - word_gap;
1792                 if (n > vec->iov_len - vec_seek)
1793                         n = vec->iov_len - vec_seek;
1794                 if (n > len)
1795                         n = len;
1796
1797                 if (!word_gap && len < map_bankwidth(map))
1798                         datum = map_word_ff(map);
1799
1800                 datum = map_word_load_partial(map, datum,
1801                                               vec->iov_base + vec_seek,
1802                                               word_gap, n);
1803
1804                 len -= n;
1805                 word_gap += n;
1806                 if (!len || word_gap == map_bankwidth(map)) {
1807                         map_write(map, datum, adr);
1808                         adr += map_bankwidth(map);
1809                         word_gap = 0;
1810                 }
1811
1812                 vec_seek += n;
1813                 if (vec_seek == vec->iov_len) {
1814                         vec++;
1815                         vec_seek = 0;
1816                 }
1817         } while (len);
1818         *pvec = vec;
1819         *pvec_seek = vec_seek;
1820
1821         /* GO GO GO */
1822         map_write(map, CMD(0xd0), cmd_adr);
1823         chip->state = FL_WRITING;
1824
1825         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1826                                    initial_adr, initial_len,
1827                                    chip->buffer_write_time,
1828                                    chip->buffer_write_time_max);
1829         if (ret) {
1830                 map_write(map, CMD(0x70), cmd_adr);
1831                 chip->state = FL_STATUS;
1832                 xip_enable(map, chip, cmd_adr);
1833                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1834                 goto out;
1835         }
1836
1837         /* check for errors */
1838         status = map_read(map, cmd_adr);
1839         if (map_word_bitsset(map, status, CMD(0x1a))) {
1840                 unsigned long chipstatus = MERGESTATUS(status);
1841
1842                 /* reset status */
1843                 map_write(map, CMD(0x50), cmd_adr);
1844                 map_write(map, CMD(0x70), cmd_adr);
1845                 xip_enable(map, chip, cmd_adr);
1846
1847                 if (chipstatus & 0x02) {
1848                         ret = -EROFS;
1849                 } else if (chipstatus & 0x08) {
1850                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1851                         ret = -EIO;
1852                 } else {
1853                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1854                         ret = -EINVAL;
1855                 }
1856
1857                 goto out;
1858         }
1859
1860         xip_enable(map, chip, cmd_adr);
1861  out:   DISABLE_VPP(map);
1862         put_chip(map, chip, cmd_adr);
1863         mutex_unlock(&chip->mutex);
1864         return ret;
1865 }
1866
1867 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1868                                 unsigned long count, loff_t to, size_t *retlen)
1869 {
1870         struct map_info *map = mtd->priv;
1871         struct cfi_private *cfi = map->fldrv_priv;
1872         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1873         int ret = 0;
1874         int chipnum;
1875         unsigned long ofs, vec_seek, i;
1876         size_t len = 0;
1877
1878         for (i = 0; i < count; i++)
1879                 len += vecs[i].iov_len;
1880
1881         if (!len)
1882                 return 0;
1883
1884         chipnum = to >> cfi->chipshift;
1885         ofs = to - (chipnum << cfi->chipshift);
1886         vec_seek = 0;
1887
1888         do {
1889                 /* We must not cross write block boundaries */
1890                 int size = wbufsize - (ofs & (wbufsize-1));
1891
1892                 if (size > len)
1893                         size = len;
1894                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1895                                       ofs, &vecs, &vec_seek, size);
1896                 if (ret)
1897                         return ret;
1898
1899                 ofs += size;
1900                 (*retlen) += size;
1901                 len -= size;
1902
1903                 if (ofs >> cfi->chipshift) {
1904                         chipnum ++;
1905                         ofs = 0;
1906                         if (chipnum == cfi->numchips)
1907                                 return 0;
1908                 }
1909
1910                 /* Be nice and reschedule with the chip in a usable state for other
1911                    processes. */
1912                 cond_resched();
1913
1914         } while (len);
1915
1916         return 0;
1917 }
1918
1919 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1920                                        size_t len, size_t *retlen, const u_char *buf)
1921 {
1922         struct kvec vec;
1923
1924         vec.iov_base = (void *) buf;
1925         vec.iov_len = len;
1926
1927         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1928 }
1929
1930 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1931                                       unsigned long adr, int len, void *thunk)
1932 {
1933         struct cfi_private *cfi = map->fldrv_priv;
1934         map_word status;
1935         int retries = 3;
1936         int ret;
1937
1938         adr += chip->start;
1939
1940  retry:
1941         mutex_lock(&chip->mutex);
1942         ret = get_chip(map, chip, adr, FL_ERASING);
1943         if (ret) {
1944                 mutex_unlock(&chip->mutex);
1945                 return ret;
1946         }
1947
1948         XIP_INVAL_CACHED_RANGE(map, adr, len);
1949         ENABLE_VPP(map);
1950         xip_disable(map, chip, adr);
1951
1952         /* Clear the status register first */
1953         map_write(map, CMD(0x50), adr);
1954
1955         /* Now erase */
1956         map_write(map, CMD(0x20), adr);
1957         map_write(map, CMD(0xD0), adr);
1958         chip->state = FL_ERASING;
1959         chip->erase_suspended = 0;
1960         chip->in_progress_block_addr = adr;
1961         chip->in_progress_block_mask = ~(len - 1);
1962
1963         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1964                                    adr, len,
1965                                    chip->erase_time,
1966                                    chip->erase_time_max);
1967         if (ret) {
1968                 map_write(map, CMD(0x70), adr);
1969                 chip->state = FL_STATUS;
1970                 xip_enable(map, chip, adr);
1971                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1972                 goto out;
1973         }
1974
1975         /* We've broken this before. It doesn't hurt to be safe */
1976         map_write(map, CMD(0x70), adr);
1977         chip->state = FL_STATUS;
1978         status = map_read(map, adr);
1979
1980         /* check for errors */
1981         if (map_word_bitsset(map, status, CMD(0x3a))) {
1982                 unsigned long chipstatus = MERGESTATUS(status);
1983
1984                 /* Reset the error bits */
1985                 map_write(map, CMD(0x50), adr);
1986                 map_write(map, CMD(0x70), adr);
1987                 xip_enable(map, chip, adr);
1988
1989                 if ((chipstatus & 0x30) == 0x30) {
1990                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1991                         ret = -EINVAL;
1992                 } else if (chipstatus & 0x02) {
1993                         /* Protection bit set */
1994                         ret = -EROFS;
1995                 } else if (chipstatus & 0x8) {
1996                         /* Voltage */
1997                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1998                         ret = -EIO;
1999                 } else if (chipstatus & 0x20 && retries--) {
2000                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2001                         DISABLE_VPP(map);
2002                         put_chip(map, chip, adr);
2003                         mutex_unlock(&chip->mutex);
2004                         goto retry;
2005                 } else {
2006                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2007                         ret = -EIO;
2008                 }
2009
2010                 goto out;
2011         }
2012
2013         xip_enable(map, chip, adr);
2014  out:   DISABLE_VPP(map);
2015         put_chip(map, chip, adr);
2016         mutex_unlock(&chip->mutex);
2017         return ret;
2018 }
2019
2020 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2021 {
2022         return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2023                                 instr->len, NULL);
2024 }
2025
2026 static void cfi_intelext_sync (struct mtd_info *mtd)
2027 {
2028         struct map_info *map = mtd->priv;
2029         struct cfi_private *cfi = map->fldrv_priv;
2030         int i;
2031         struct flchip *chip;
2032         int ret = 0;
2033
2034         for (i=0; !ret && i<cfi->numchips; i++) {
2035                 chip = &cfi->chips[i];
2036
2037                 mutex_lock(&chip->mutex);
2038                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2039
2040                 if (!ret) {
2041                         chip->oldstate = chip->state;
2042                         chip->state = FL_SYNCING;
2043                         /* No need to wake_up() on this state change -
2044                          * as the whole point is that nobody can do anything
2045                          * with the chip now anyway.
2046                          */
2047                 }
2048                 mutex_unlock(&chip->mutex);
2049         }
2050
2051         /* Unlock the chips again */
2052
2053         for (i--; i >=0; i--) {
2054                 chip = &cfi->chips[i];
2055
2056                 mutex_lock(&chip->mutex);
2057
2058                 if (chip->state == FL_SYNCING) {
2059                         chip->state = chip->oldstate;
2060                         chip->oldstate = FL_READY;
2061                         wake_up(&chip->wq);
2062                 }
2063                 mutex_unlock(&chip->mutex);
2064         }
2065 }
2066
2067 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2068                                                 struct flchip *chip,
2069                                                 unsigned long adr,
2070                                                 int len, void *thunk)
2071 {
2072         struct cfi_private *cfi = map->fldrv_priv;
2073         int status, ofs_factor = cfi->interleave * cfi->device_type;
2074
2075         adr += chip->start;
2076         xip_disable(map, chip, adr+(2*ofs_factor));
2077         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2078         chip->state = FL_JEDEC_QUERY;
2079         status = cfi_read_query(map, adr+(2*ofs_factor));
2080         xip_enable(map, chip, 0);
2081         return status;
2082 }
2083
2084 #ifdef DEBUG_LOCK_BITS
2085 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2086                                                 struct flchip *chip,
2087                                                 unsigned long adr,
2088                                                 int len, void *thunk)
2089 {
2090         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2091                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2092         return 0;
2093 }
2094 #endif
2095
2096 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2097 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2098
2099 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2100                                        unsigned long adr, int len, void *thunk)
2101 {
2102         struct cfi_private *cfi = map->fldrv_priv;
2103         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2104         int mdelay;
2105         int ret;
2106
2107         adr += chip->start;
2108
2109         mutex_lock(&chip->mutex);
2110         ret = get_chip(map, chip, adr, FL_LOCKING);
2111         if (ret) {
2112                 mutex_unlock(&chip->mutex);
2113                 return ret;
2114         }
2115
2116         ENABLE_VPP(map);
2117         xip_disable(map, chip, adr);
2118
2119         map_write(map, CMD(0x60), adr);
2120         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2121                 map_write(map, CMD(0x01), adr);
2122                 chip->state = FL_LOCKING;
2123         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2124                 map_write(map, CMD(0xD0), adr);
2125                 chip->state = FL_UNLOCKING;
2126         } else
2127                 BUG();
2128
2129         /*
2130          * If Instant Individual Block Locking supported then no need
2131          * to delay.
2132          */
2133         /*
2134          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2135          * lets use a max of 1.5 seconds (1500ms) as timeout.
2136          *
2137          * See "Clear Block Lock-Bits Time" on page 40 in
2138          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2139          * from February 2003
2140          */
2141         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2142
2143         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2144         if (ret) {
2145                 map_write(map, CMD(0x70), adr);
2146                 chip->state = FL_STATUS;
2147                 xip_enable(map, chip, adr);
2148                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2149                 goto out;
2150         }
2151
2152         xip_enable(map, chip, adr);
2153  out:   DISABLE_VPP(map);
2154         put_chip(map, chip, adr);
2155         mutex_unlock(&chip->mutex);
2156         return ret;
2157 }
2158
2159 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2160 {
2161         int ret;
2162
2163 #ifdef DEBUG_LOCK_BITS
2164         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2165                __func__, ofs, len);
2166         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2167                 ofs, len, NULL);
2168 #endif
2169
2170         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2171                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2172
2173 #ifdef DEBUG_LOCK_BITS
2174         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2175                __func__, ret);
2176         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2177                 ofs, len, NULL);
2178 #endif
2179
2180         return ret;
2181 }
2182
2183 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2184 {
2185         int ret;
2186
2187 #ifdef DEBUG_LOCK_BITS
2188         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2189                __func__, ofs, len);
2190         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2191                 ofs, len, NULL);
2192 #endif
2193
2194         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2195                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2196
2197 #ifdef DEBUG_LOCK_BITS
2198         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2199                __func__, ret);
2200         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2201                 ofs, len, NULL);
2202 #endif
2203
2204         return ret;
2205 }
2206
2207 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2208                                   uint64_t len)
2209 {
2210         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2211                                 ofs, len, NULL) ? 1 : 0;
2212 }
2213
2214 #ifdef CONFIG_MTD_OTP
2215
2216 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2217                         u_long data_offset, u_char *buf, u_int size,
2218                         u_long prot_offset, u_int groupno, u_int groupsize);
2219
2220 static int __xipram
2221 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2222             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2223 {
2224         struct cfi_private *cfi = map->fldrv_priv;
2225         int ret;
2226
2227         mutex_lock(&chip->mutex);
2228         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2229         if (ret) {
2230                 mutex_unlock(&chip->mutex);
2231                 return ret;
2232         }
2233
2234         /* let's ensure we're not reading back cached data from array mode */
2235         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2236
2237         xip_disable(map, chip, chip->start);
2238         if (chip->state != FL_JEDEC_QUERY) {
2239                 map_write(map, CMD(0x90), chip->start);
2240                 chip->state = FL_JEDEC_QUERY;
2241         }
2242         map_copy_from(map, buf, chip->start + offset, size);
2243         xip_enable(map, chip, chip->start);
2244
2245         /* then ensure we don't keep OTP data in the cache */
2246         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2247
2248         put_chip(map, chip, chip->start);
2249         mutex_unlock(&chip->mutex);
2250         return 0;
2251 }
2252
2253 static int
2254 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2255              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2256 {
2257         int ret;
2258
2259         while (size) {
2260                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2261                 int gap = offset - bus_ofs;
2262                 int n = min_t(int, size, map_bankwidth(map)-gap);
2263                 map_word datum = map_word_ff(map);
2264
2265                 datum = map_word_load_partial(map, datum, buf, gap, n);
2266                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2267                 if (ret)
2268                         return ret;
2269
2270                 offset += n;
2271                 buf += n;
2272                 size -= n;
2273         }
2274
2275         return 0;
2276 }
2277
2278 static int
2279 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2280             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2281 {
2282         struct cfi_private *cfi = map->fldrv_priv;
2283         map_word datum;
2284
2285         /* make sure area matches group boundaries */
2286         if (size != grpsz)
2287                 return -EXDEV;
2288
2289         datum = map_word_ff(map);
2290         datum = map_word_clr(map, datum, CMD(1 << grpno));
2291         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2292 }
2293
2294 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2295                                  size_t *retlen, u_char *buf,
2296                                  otp_op_t action, int user_regs)
2297 {
2298         struct map_info *map = mtd->priv;
2299         struct cfi_private *cfi = map->fldrv_priv;
2300         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2301         struct flchip *chip;
2302         struct cfi_intelext_otpinfo *otp;
2303         u_long devsize, reg_prot_offset, data_offset;
2304         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2305         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2306         int ret;
2307
2308         *retlen = 0;
2309
2310         /* Check that we actually have some OTP registers */
2311         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2312                 return -ENODATA;
2313
2314         /* we need real chips here not virtual ones */
2315         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2316         chip_step = devsize >> cfi->chipshift;
2317         chip_num = 0;
2318
2319         /* Some chips have OTP located in the _top_ partition only.
2320            For example: Intel 28F256L18T (T means top-parameter device) */
2321         if (cfi->mfr == CFI_MFR_INTEL) {
2322                 switch (cfi->id) {
2323                 case 0x880b:
2324                 case 0x880c:
2325                 case 0x880d:
2326                         chip_num = chip_step - 1;
2327                 }
2328         }
2329
2330         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2331                 chip = &cfi->chips[chip_num];
2332                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2333
2334                 /* first OTP region */
2335                 field = 0;
2336                 reg_prot_offset = extp->ProtRegAddr;
2337                 reg_fact_groups = 1;
2338                 reg_fact_size = 1 << extp->FactProtRegSize;
2339                 reg_user_groups = 1;
2340                 reg_user_size = 1 << extp->UserProtRegSize;
2341
2342                 while (len > 0) {
2343                         /* flash geometry fixup */
2344                         data_offset = reg_prot_offset + 1;
2345                         data_offset *= cfi->interleave * cfi->device_type;
2346                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2347                         reg_fact_size *= cfi->interleave;
2348                         reg_user_size *= cfi->interleave;
2349
2350                         if (user_regs) {
2351                                 groups = reg_user_groups;
2352                                 groupsize = reg_user_size;
2353                                 /* skip over factory reg area */
2354                                 groupno = reg_fact_groups;
2355                                 data_offset += reg_fact_groups * reg_fact_size;
2356                         } else {
2357                                 groups = reg_fact_groups;
2358                                 groupsize = reg_fact_size;
2359                                 groupno = 0;
2360                         }
2361
2362                         while (len > 0 && groups > 0) {
2363                                 if (!action) {
2364                                         /*
2365                                          * Special case: if action is NULL
2366                                          * we fill buf with otp_info records.
2367                                          */
2368                                         struct otp_info *otpinfo;
2369                                         map_word lockword;
2370                                         len -= sizeof(struct otp_info);
2371                                         if (len <= 0)
2372                                                 return -ENOSPC;
2373                                         ret = do_otp_read(map, chip,
2374                                                           reg_prot_offset,
2375                                                           (u_char *)&lockword,
2376                                                           map_bankwidth(map),
2377                                                           0, 0,  0);
2378                                         if (ret)
2379                                                 return ret;
2380                                         otpinfo = (struct otp_info *)buf;
2381                                         otpinfo->start = from;
2382                                         otpinfo->length = groupsize;
2383                                         otpinfo->locked =
2384                                            !map_word_bitsset(map, lockword,
2385                                                              CMD(1 << groupno));
2386                                         from += groupsize;
2387                                         buf += sizeof(*otpinfo);
2388                                         *retlen += sizeof(*otpinfo);
2389                                 } else if (from >= groupsize) {
2390                                         from -= groupsize;
2391                                         data_offset += groupsize;
2392                                 } else {
2393                                         int size = groupsize;
2394                                         data_offset += from;
2395                                         size -= from;
2396                                         from = 0;
2397                                         if (size > len)
2398                                                 size = len;
2399                                         ret = action(map, chip, data_offset,
2400                                                      buf, size, reg_prot_offset,
2401                                                      groupno, groupsize);
2402                                         if (ret < 0)
2403                                                 return ret;
2404                                         buf += size;
2405                                         len -= size;
2406                                         *retlen += size;
2407                                         data_offset += size;
2408                                 }
2409                                 groupno++;
2410                                 groups--;
2411                         }
2412
2413                         /* next OTP region */
2414                         if (++field == extp->NumProtectionFields)
2415                                 break;
2416                         reg_prot_offset = otp->ProtRegAddr;
2417                         reg_fact_groups = otp->FactGroups;
2418                         reg_fact_size = 1 << otp->FactProtRegSize;
2419                         reg_user_groups = otp->UserGroups;
2420                         reg_user_size = 1 << otp->UserProtRegSize;
2421                         otp++;
2422                 }
2423         }
2424
2425         return 0;
2426 }
2427
2428 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2429                                            size_t len, size_t *retlen,
2430                                             u_char *buf)
2431 {
2432         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2433                                      buf, do_otp_read, 0);
2434 }
2435
2436 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2437                                            size_t len, size_t *retlen,
2438                                             u_char *buf)
2439 {
2440         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2441                                      buf, do_otp_read, 1);
2442 }
2443
2444 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2445                                             size_t len, size_t *retlen,
2446                                              u_char *buf)
2447 {
2448         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2449                                      buf, do_otp_write, 1);
2450 }
2451
2452 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2453                                            loff_t from, size_t len)
2454 {
2455         size_t retlen;
2456         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2457                                      NULL, do_otp_lock, 1);
2458 }
2459
2460 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2461                                            size_t *retlen, struct otp_info *buf)
2462
2463 {
2464         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2465                                      NULL, 0);
2466 }
2467
2468 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2469                                            size_t *retlen, struct otp_info *buf)
2470 {
2471         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2472                                      NULL, 1);
2473 }
2474
2475 #endif
2476
2477 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2478 {
2479         struct mtd_erase_region_info *region;
2480         int block, status, i;
2481         unsigned long adr;
2482         size_t len;
2483
2484         for (i = 0; i < mtd->numeraseregions; i++) {
2485                 region = &mtd->eraseregions[i];
2486                 if (!region->lockmap)
2487                         continue;
2488
2489                 for (block = 0; block < region->numblocks; block++){
2490                         len = region->erasesize;
2491                         adr = region->offset + block * len;
2492
2493                         status = cfi_varsize_frob(mtd,
2494                                         do_getlockstatus_oneblock, adr, len, NULL);
2495                         if (status)
2496                                 set_bit(block, region->lockmap);
2497                         else
2498                                 clear_bit(block, region->lockmap);
2499                 }
2500         }
2501 }
2502
2503 static int cfi_intelext_suspend(struct mtd_info *mtd)
2504 {
2505         struct map_info *map = mtd->priv;
2506         struct cfi_private *cfi = map->fldrv_priv;
2507         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2508         int i;
2509         struct flchip *chip;
2510         int ret = 0;
2511
2512         if ((mtd->flags & MTD_POWERUP_LOCK)
2513             && extp && (extp->FeatureSupport & (1 << 5)))
2514                 cfi_intelext_save_locks(mtd);
2515
2516         for (i=0; !ret && i<cfi->numchips; i++) {
2517                 chip = &cfi->chips[i];
2518
2519                 mutex_lock(&chip->mutex);
2520
2521                 switch (chip->state) {
2522                 case FL_READY:
2523                 case FL_STATUS:
2524                 case FL_CFI_QUERY:
2525                 case FL_JEDEC_QUERY:
2526                         if (chip->oldstate == FL_READY) {
2527                                 /* place the chip in a known state before suspend */
2528                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2529                                 chip->oldstate = chip->state;
2530                                 chip->state = FL_PM_SUSPENDED;
2531                                 /* No need to wake_up() on this state change -
2532                                  * as the whole point is that nobody can do anything
2533                                  * with the chip now anyway.
2534                                  */
2535                         } else {
2536                                 /* There seems to be an operation pending. We must wait for it. */
2537                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2538                                 ret = -EAGAIN;
2539                         }
2540                         break;
2541                 default:
2542                         /* Should we actually wait? Once upon a time these routines weren't
2543                            allowed to. Or should we return -EAGAIN, because the upper layers
2544                            ought to have already shut down anything which was using the device
2545                            anyway? The latter for now. */
2546                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2547                         ret = -EAGAIN;
2548                 case FL_PM_SUSPENDED:
2549                         break;
2550                 }
2551                 mutex_unlock(&chip->mutex);
2552         }
2553
2554         /* Unlock the chips again */
2555
2556         if (ret) {
2557                 for (i--; i >=0; i--) {
2558                         chip = &cfi->chips[i];
2559
2560                         mutex_lock(&chip->mutex);
2561
2562                         if (chip->state == FL_PM_SUSPENDED) {
2563                                 /* No need to force it into a known state here,
2564                                    because we're returning failure, and it didn't
2565                                    get power cycled */
2566                                 chip->state = chip->oldstate;
2567                                 chip->oldstate = FL_READY;
2568                                 wake_up(&chip->wq);
2569                         }
2570                         mutex_unlock(&chip->mutex);
2571                 }
2572         }
2573
2574         return ret;
2575 }
2576
2577 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2578 {
2579         struct mtd_erase_region_info *region;
2580         int block, i;
2581         unsigned long adr;
2582         size_t len;
2583
2584         for (i = 0; i < mtd->numeraseregions; i++) {
2585                 region = &mtd->eraseregions[i];
2586                 if (!region->lockmap)
2587                         continue;
2588
2589                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2590                         len = region->erasesize;
2591                         adr = region->offset + block * len;
2592                         cfi_intelext_unlock(mtd, adr, len);
2593                 }
2594         }
2595 }
2596
2597 static void cfi_intelext_resume(struct mtd_info *mtd)
2598 {
2599         struct map_info *map = mtd->priv;
2600         struct cfi_private *cfi = map->fldrv_priv;
2601         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2602         int i;
2603         struct flchip *chip;
2604
2605         for (i=0; i<cfi->numchips; i++) {
2606
2607                 chip = &cfi->chips[i];
2608
2609                 mutex_lock(&chip->mutex);
2610
2611                 /* Go to known state. Chip may have been power cycled */
2612                 if (chip->state == FL_PM_SUSPENDED) {
2613                         /* Refresh LH28F640BF Partition Config. Register */
2614                         fixup_LH28F640BF(mtd);
2615                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2616                         chip->oldstate = chip->state = FL_READY;
2617                         wake_up(&chip->wq);
2618                 }
2619
2620                 mutex_unlock(&chip->mutex);
2621         }
2622
2623         if ((mtd->flags & MTD_POWERUP_LOCK)
2624             && extp && (extp->FeatureSupport & (1 << 5)))
2625                 cfi_intelext_restore_locks(mtd);
2626 }
2627
2628 static int cfi_intelext_reset(struct mtd_info *mtd)
2629 {
2630         struct map_info *map = mtd->priv;
2631         struct cfi_private *cfi = map->fldrv_priv;
2632         int i, ret;
2633
2634         for (i=0; i < cfi->numchips; i++) {
2635                 struct flchip *chip = &cfi->chips[i];
2636
2637                 /* force the completion of any ongoing operation
2638                    and switch to array mode so any bootloader in
2639                    flash is accessible for soft reboot. */
2640                 mutex_lock(&chip->mutex);
2641                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2642                 if (!ret) {
2643                         map_write(map, CMD(0xff), chip->start);
2644                         chip->state = FL_SHUTDOWN;
2645                         put_chip(map, chip, chip->start);
2646                 }
2647                 mutex_unlock(&chip->mutex);
2648         }
2649
2650         return 0;
2651 }
2652
2653 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2654                                void *v)
2655 {
2656         struct mtd_info *mtd;
2657
2658         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2659         cfi_intelext_reset(mtd);
2660         return NOTIFY_DONE;
2661 }
2662
2663 static void cfi_intelext_destroy(struct mtd_info *mtd)
2664 {
2665         struct map_info *map = mtd->priv;
2666         struct cfi_private *cfi = map->fldrv_priv;
2667         struct mtd_erase_region_info *region;
2668         int i;
2669         cfi_intelext_reset(mtd);
2670         unregister_reboot_notifier(&mtd->reboot_notifier);
2671         kfree(cfi->cmdset_priv);
2672         kfree(cfi->cfiq);
2673         kfree(cfi->chips[0].priv);
2674         kfree(cfi);
2675         for (i = 0; i < mtd->numeraseregions; i++) {
2676                 region = &mtd->eraseregions[i];
2677                 kfree(region->lockmap);
2678         }
2679         kfree(mtd->eraseregions);
2680 }
2681
2682 MODULE_LICENSE("GPL");
2683 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2684 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2685 MODULE_ALIAS("cfi_cmdset_0003");
2686 MODULE_ALIAS("cfi_cmdset_0200");