GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / mmc / host / sdhci.c
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15
16 #include <linux/bitfield.h>
17 #include <linux/delay.h>
18 #include <linux/ktime.h>
19 #include <linux/highmem.h>
20 #include <linux/io.h>
21 #include <linux/module.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/slab.h>
24 #include <linux/scatterlist.h>
25 #include <linux/sizes.h>
26 #include <linux/swiotlb.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/of.h>
30
31 #include <linux/leds.h>
32
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/card.h>
36 #include <linux/mmc/sdio.h>
37 #include <linux/mmc/slot-gpio.h>
38
39 #include "sdhci.h"
40
41 #define DRIVER_NAME "sdhci"
42
43 #define DBG(f, x...) \
44         pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45
46 #define SDHCI_DUMP(f, x...) \
47         pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
48
49 #define MAX_TUNING_LOOP 40
50
51 static unsigned int debug_quirks = 0;
52 static unsigned int debug_quirks2;
53
54 static void sdhci_finish_data(struct sdhci_host *);
55
56 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
57
58 void sdhci_dumpregs(struct sdhci_host *host)
59 {
60         SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
61
62         SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
63                    sdhci_readl(host, SDHCI_DMA_ADDRESS),
64                    sdhci_readw(host, SDHCI_HOST_VERSION));
65         SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
66                    sdhci_readw(host, SDHCI_BLOCK_SIZE),
67                    sdhci_readw(host, SDHCI_BLOCK_COUNT));
68         SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
69                    sdhci_readl(host, SDHCI_ARGUMENT),
70                    sdhci_readw(host, SDHCI_TRANSFER_MODE));
71         SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
72                    sdhci_readl(host, SDHCI_PRESENT_STATE),
73                    sdhci_readb(host, SDHCI_HOST_CONTROL));
74         SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
75                    sdhci_readb(host, SDHCI_POWER_CONTROL),
76                    sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
77         SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
78                    sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
79                    sdhci_readw(host, SDHCI_CLOCK_CONTROL));
80         SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
81                    sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
82                    sdhci_readl(host, SDHCI_INT_STATUS));
83         SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
84                    sdhci_readl(host, SDHCI_INT_ENABLE),
85                    sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
86         SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
87                    sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
88                    sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
89         SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
90                    sdhci_readl(host, SDHCI_CAPABILITIES),
91                    sdhci_readl(host, SDHCI_CAPABILITIES_1));
92         SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
93                    sdhci_readw(host, SDHCI_COMMAND),
94                    sdhci_readl(host, SDHCI_MAX_CURRENT));
95         SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
96                    sdhci_readl(host, SDHCI_RESPONSE),
97                    sdhci_readl(host, SDHCI_RESPONSE + 4));
98         SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
99                    sdhci_readl(host, SDHCI_RESPONSE + 8),
100                    sdhci_readl(host, SDHCI_RESPONSE + 12));
101         SDHCI_DUMP("Host ctl2: 0x%08x\n",
102                    sdhci_readw(host, SDHCI_HOST_CONTROL2));
103
104         if (host->flags & SDHCI_USE_ADMA) {
105                 if (host->flags & SDHCI_USE_64_BIT_DMA) {
106                         SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
107                                    sdhci_readl(host, SDHCI_ADMA_ERROR),
108                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
109                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110                 } else {
111                         SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
112                                    sdhci_readl(host, SDHCI_ADMA_ERROR),
113                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS));
114                 }
115         }
116
117         SDHCI_DUMP("============================================\n");
118 }
119 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
120
121 /*****************************************************************************\
122  *                                                                           *
123  * Low level functions                                                       *
124  *                                                                           *
125 \*****************************************************************************/
126
127 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
128 {
129         return cmd->data || cmd->flags & MMC_RSP_BUSY;
130 }
131
132 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
133 {
134         u32 present;
135
136         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
137             !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
138                 return;
139
140         if (enable) {
141                 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
142                                       SDHCI_CARD_PRESENT;
143
144                 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
145                                        SDHCI_INT_CARD_INSERT;
146         } else {
147                 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
148         }
149
150         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
151         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
152 }
153
154 static void sdhci_enable_card_detection(struct sdhci_host *host)
155 {
156         sdhci_set_card_detection(host, true);
157 }
158
159 static void sdhci_disable_card_detection(struct sdhci_host *host)
160 {
161         sdhci_set_card_detection(host, false);
162 }
163
164 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
165 {
166         if (host->bus_on)
167                 return;
168         host->bus_on = true;
169         pm_runtime_get_noresume(host->mmc->parent);
170 }
171
172 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
173 {
174         if (!host->bus_on)
175                 return;
176         host->bus_on = false;
177         pm_runtime_put_noidle(host->mmc->parent);
178 }
179
180 void sdhci_reset(struct sdhci_host *host, u8 mask)
181 {
182         ktime_t timeout;
183
184         sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
185
186         if (mask & SDHCI_RESET_ALL) {
187                 host->clock = 0;
188                 /* Reset-all turns off SD Bus Power */
189                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
190                         sdhci_runtime_pm_bus_off(host);
191         }
192
193         /* Wait max 100 ms */
194         timeout = ktime_add_ms(ktime_get(), 100);
195
196         /* hw clears the bit when it's done */
197         while (1) {
198                 bool timedout = ktime_after(ktime_get(), timeout);
199
200                 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
201                         break;
202                 if (timedout) {
203                         pr_err("%s: Reset 0x%x never completed.\n",
204                                 mmc_hostname(host->mmc), (int)mask);
205                         sdhci_dumpregs(host);
206                         return;
207                 }
208                 udelay(10);
209         }
210 }
211 EXPORT_SYMBOL_GPL(sdhci_reset);
212
213 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
214 {
215         if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
216                 struct mmc_host *mmc = host->mmc;
217
218                 if (!mmc->ops->get_cd(mmc))
219                         return;
220         }
221
222         host->ops->reset(host, mask);
223
224         if (mask & SDHCI_RESET_ALL) {
225                 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
226                         if (host->ops->enable_dma)
227                                 host->ops->enable_dma(host);
228                 }
229
230                 /* Resetting the controller clears many */
231                 host->preset_enabled = false;
232         }
233 }
234
235 static void sdhci_set_default_irqs(struct sdhci_host *host)
236 {
237         host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
238                     SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
239                     SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
240                     SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
241                     SDHCI_INT_RESPONSE;
242
243         if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
244             host->tuning_mode == SDHCI_TUNING_MODE_3)
245                 host->ier |= SDHCI_INT_RETUNE;
246
247         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
248         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
249 }
250
251 static void sdhci_init(struct sdhci_host *host, int soft)
252 {
253         struct mmc_host *mmc = host->mmc;
254
255         if (soft)
256                 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
257         else
258                 sdhci_do_reset(host, SDHCI_RESET_ALL);
259
260         sdhci_set_default_irqs(host);
261
262         host->cqe_on = false;
263
264         if (soft) {
265                 /* force clock reconfiguration */
266                 host->clock = 0;
267                 host->reinit_uhs = true;
268                 mmc->ops->set_ios(mmc, &mmc->ios);
269         }
270 }
271
272 static void sdhci_reinit(struct sdhci_host *host)
273 {
274         sdhci_init(host, 0);
275         sdhci_enable_card_detection(host);
276 }
277
278 static void __sdhci_led_activate(struct sdhci_host *host)
279 {
280         u8 ctrl;
281
282         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
283         ctrl |= SDHCI_CTRL_LED;
284         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
285 }
286
287 static void __sdhci_led_deactivate(struct sdhci_host *host)
288 {
289         u8 ctrl;
290
291         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
292         ctrl &= ~SDHCI_CTRL_LED;
293         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
294 }
295
296 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
297 static void sdhci_led_control(struct led_classdev *led,
298                               enum led_brightness brightness)
299 {
300         struct sdhci_host *host = container_of(led, struct sdhci_host, led);
301         unsigned long flags;
302
303         spin_lock_irqsave(&host->lock, flags);
304
305         if (host->runtime_suspended)
306                 goto out;
307
308         if (brightness == LED_OFF)
309                 __sdhci_led_deactivate(host);
310         else
311                 __sdhci_led_activate(host);
312 out:
313         spin_unlock_irqrestore(&host->lock, flags);
314 }
315
316 static int sdhci_led_register(struct sdhci_host *host)
317 {
318         struct mmc_host *mmc = host->mmc;
319
320         snprintf(host->led_name, sizeof(host->led_name),
321                  "%s::", mmc_hostname(mmc));
322
323         host->led.name = host->led_name;
324         host->led.brightness = LED_OFF;
325         host->led.default_trigger = mmc_hostname(mmc);
326         host->led.brightness_set = sdhci_led_control;
327
328         return led_classdev_register(mmc_dev(mmc), &host->led);
329 }
330
331 static void sdhci_led_unregister(struct sdhci_host *host)
332 {
333         led_classdev_unregister(&host->led);
334 }
335
336 static inline void sdhci_led_activate(struct sdhci_host *host)
337 {
338 }
339
340 static inline void sdhci_led_deactivate(struct sdhci_host *host)
341 {
342 }
343
344 #else
345
346 static inline int sdhci_led_register(struct sdhci_host *host)
347 {
348         return 0;
349 }
350
351 static inline void sdhci_led_unregister(struct sdhci_host *host)
352 {
353 }
354
355 static inline void sdhci_led_activate(struct sdhci_host *host)
356 {
357         __sdhci_led_activate(host);
358 }
359
360 static inline void sdhci_led_deactivate(struct sdhci_host *host)
361 {
362         __sdhci_led_deactivate(host);
363 }
364
365 #endif
366
367 /*****************************************************************************\
368  *                                                                           *
369  * Core functions                                                            *
370  *                                                                           *
371 \*****************************************************************************/
372
373 static void sdhci_read_block_pio(struct sdhci_host *host)
374 {
375         unsigned long flags;
376         size_t blksize, len, chunk;
377         u32 uninitialized_var(scratch);
378         u8 *buf;
379
380         DBG("PIO reading\n");
381
382         blksize = host->data->blksz;
383         chunk = 0;
384
385         local_irq_save(flags);
386
387         while (blksize) {
388                 BUG_ON(!sg_miter_next(&host->sg_miter));
389
390                 len = min(host->sg_miter.length, blksize);
391
392                 blksize -= len;
393                 host->sg_miter.consumed = len;
394
395                 buf = host->sg_miter.addr;
396
397                 while (len) {
398                         if (chunk == 0) {
399                                 scratch = sdhci_readl(host, SDHCI_BUFFER);
400                                 chunk = 4;
401                         }
402
403                         *buf = scratch & 0xFF;
404
405                         buf++;
406                         scratch >>= 8;
407                         chunk--;
408                         len--;
409                 }
410         }
411
412         sg_miter_stop(&host->sg_miter);
413
414         local_irq_restore(flags);
415 }
416
417 static void sdhci_write_block_pio(struct sdhci_host *host)
418 {
419         unsigned long flags;
420         size_t blksize, len, chunk;
421         u32 scratch;
422         u8 *buf;
423
424         DBG("PIO writing\n");
425
426         blksize = host->data->blksz;
427         chunk = 0;
428         scratch = 0;
429
430         local_irq_save(flags);
431
432         while (blksize) {
433                 BUG_ON(!sg_miter_next(&host->sg_miter));
434
435                 len = min(host->sg_miter.length, blksize);
436
437                 blksize -= len;
438                 host->sg_miter.consumed = len;
439
440                 buf = host->sg_miter.addr;
441
442                 while (len) {
443                         scratch |= (u32)*buf << (chunk * 8);
444
445                         buf++;
446                         chunk++;
447                         len--;
448
449                         if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
450                                 sdhci_writel(host, scratch, SDHCI_BUFFER);
451                                 chunk = 0;
452                                 scratch = 0;
453                         }
454                 }
455         }
456
457         sg_miter_stop(&host->sg_miter);
458
459         local_irq_restore(flags);
460 }
461
462 static void sdhci_transfer_pio(struct sdhci_host *host)
463 {
464         u32 mask;
465
466         if (host->blocks == 0)
467                 return;
468
469         if (host->data->flags & MMC_DATA_READ)
470                 mask = SDHCI_DATA_AVAILABLE;
471         else
472                 mask = SDHCI_SPACE_AVAILABLE;
473
474         /*
475          * Some controllers (JMicron JMB38x) mess up the buffer bits
476          * for transfers < 4 bytes. As long as it is just one block,
477          * we can ignore the bits.
478          */
479         if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
480                 (host->data->blocks == 1))
481                 mask = ~0;
482
483         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
484                 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
485                         udelay(100);
486
487                 if (host->data->flags & MMC_DATA_READ)
488                         sdhci_read_block_pio(host);
489                 else
490                         sdhci_write_block_pio(host);
491
492                 host->blocks--;
493                 if (host->blocks == 0)
494                         break;
495         }
496
497         DBG("PIO transfer complete.\n");
498 }
499
500 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
501                                   struct mmc_data *data, int cookie)
502 {
503         int sg_count;
504
505         /*
506          * If the data buffers are already mapped, return the previous
507          * dma_map_sg() result.
508          */
509         if (data->host_cookie == COOKIE_PRE_MAPPED)
510                 return data->sg_count;
511
512         /* Bounce write requests to the bounce buffer */
513         if (host->bounce_buffer) {
514                 unsigned int length = data->blksz * data->blocks;
515
516                 if (length > host->bounce_buffer_size) {
517                         pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
518                                mmc_hostname(host->mmc), length,
519                                host->bounce_buffer_size);
520                         return -EIO;
521                 }
522                 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
523                         /* Copy the data to the bounce buffer */
524                         sg_copy_to_buffer(data->sg, data->sg_len,
525                                           host->bounce_buffer,
526                                           length);
527                 }
528                 /* Switch ownership to the DMA */
529                 dma_sync_single_for_device(host->mmc->parent,
530                                            host->bounce_addr,
531                                            host->bounce_buffer_size,
532                                            mmc_get_dma_dir(data));
533                 /* Just a dummy value */
534                 sg_count = 1;
535         } else {
536                 /* Just access the data directly from memory */
537                 sg_count = dma_map_sg(mmc_dev(host->mmc),
538                                       data->sg, data->sg_len,
539                                       mmc_get_dma_dir(data));
540         }
541
542         if (sg_count == 0)
543                 return -ENOSPC;
544
545         data->sg_count = sg_count;
546         data->host_cookie = cookie;
547
548         return sg_count;
549 }
550
551 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
552 {
553         local_irq_save(*flags);
554         return kmap_atomic(sg_page(sg)) + sg->offset;
555 }
556
557 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
558 {
559         kunmap_atomic(buffer);
560         local_irq_restore(*flags);
561 }
562
563 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
564                                   dma_addr_t addr, int len, unsigned cmd)
565 {
566         struct sdhci_adma2_64_desc *dma_desc = desc;
567
568         /* 32-bit and 64-bit descriptors have these members in same position */
569         dma_desc->cmd = cpu_to_le16(cmd);
570         dma_desc->len = cpu_to_le16(len);
571         dma_desc->addr_lo = cpu_to_le32((u32)addr);
572
573         if (host->flags & SDHCI_USE_64_BIT_DMA)
574                 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
575 }
576
577 static void sdhci_adma_mark_end(void *desc)
578 {
579         struct sdhci_adma2_64_desc *dma_desc = desc;
580
581         /* 32-bit and 64-bit descriptors have 'cmd' in same position */
582         dma_desc->cmd |= cpu_to_le16(ADMA2_END);
583 }
584
585 static void sdhci_adma_table_pre(struct sdhci_host *host,
586         struct mmc_data *data, int sg_count)
587 {
588         struct scatterlist *sg;
589         unsigned long flags;
590         dma_addr_t addr, align_addr;
591         void *desc, *align;
592         char *buffer;
593         int len, offset, i;
594
595         /*
596          * The spec does not specify endianness of descriptor table.
597          * We currently guess that it is LE.
598          */
599
600         host->sg_count = sg_count;
601
602         desc = host->adma_table;
603         align = host->align_buffer;
604
605         align_addr = host->align_addr;
606
607         for_each_sg(data->sg, sg, host->sg_count, i) {
608                 addr = sg_dma_address(sg);
609                 len = sg_dma_len(sg);
610
611                 /*
612                  * The SDHCI specification states that ADMA addresses must
613                  * be 32-bit aligned. If they aren't, then we use a bounce
614                  * buffer for the (up to three) bytes that screw up the
615                  * alignment.
616                  */
617                 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
618                          SDHCI_ADMA2_MASK;
619                 if (offset) {
620                         if (data->flags & MMC_DATA_WRITE) {
621                                 buffer = sdhci_kmap_atomic(sg, &flags);
622                                 memcpy(align, buffer, offset);
623                                 sdhci_kunmap_atomic(buffer, &flags);
624                         }
625
626                         /* tran, valid */
627                         sdhci_adma_write_desc(host, desc, align_addr, offset,
628                                               ADMA2_TRAN_VALID);
629
630                         BUG_ON(offset > 65536);
631
632                         align += SDHCI_ADMA2_ALIGN;
633                         align_addr += SDHCI_ADMA2_ALIGN;
634
635                         desc += host->desc_sz;
636
637                         addr += offset;
638                         len -= offset;
639                 }
640
641                 BUG_ON(len > 65536);
642
643                 if (len) {
644                         /* tran, valid */
645                         sdhci_adma_write_desc(host, desc, addr, len,
646                                               ADMA2_TRAN_VALID);
647                         desc += host->desc_sz;
648                 }
649
650                 /*
651                  * If this triggers then we have a calculation bug
652                  * somewhere. :/
653                  */
654                 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
655         }
656
657         if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
658                 /* Mark the last descriptor as the terminating descriptor */
659                 if (desc != host->adma_table) {
660                         desc -= host->desc_sz;
661                         sdhci_adma_mark_end(desc);
662                 }
663         } else {
664                 /* Add a terminating entry - nop, end, valid */
665                 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
666         }
667 }
668
669 static void sdhci_adma_table_post(struct sdhci_host *host,
670         struct mmc_data *data)
671 {
672         struct scatterlist *sg;
673         int i, size;
674         void *align;
675         char *buffer;
676         unsigned long flags;
677
678         if (data->flags & MMC_DATA_READ) {
679                 bool has_unaligned = false;
680
681                 /* Do a quick scan of the SG list for any unaligned mappings */
682                 for_each_sg(data->sg, sg, host->sg_count, i)
683                         if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
684                                 has_unaligned = true;
685                                 break;
686                         }
687
688                 if (has_unaligned) {
689                         dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
690                                             data->sg_len, DMA_FROM_DEVICE);
691
692                         align = host->align_buffer;
693
694                         for_each_sg(data->sg, sg, host->sg_count, i) {
695                                 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
696                                         size = SDHCI_ADMA2_ALIGN -
697                                                (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
698
699                                         buffer = sdhci_kmap_atomic(sg, &flags);
700                                         memcpy(buffer, align, size);
701                                         sdhci_kunmap_atomic(buffer, &flags);
702
703                                         align += SDHCI_ADMA2_ALIGN;
704                                 }
705                         }
706                 }
707         }
708 }
709
710 static u32 sdhci_sdma_address(struct sdhci_host *host)
711 {
712         if (host->bounce_buffer)
713                 return host->bounce_addr;
714         else
715                 return sg_dma_address(host->data->sg);
716 }
717
718 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
719                                          struct mmc_command *cmd,
720                                          struct mmc_data *data)
721 {
722         unsigned int target_timeout;
723
724         /* timeout in us */
725         if (!data) {
726                 target_timeout = cmd->busy_timeout * 1000;
727         } else {
728                 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
729                 if (host->clock && data->timeout_clks) {
730                         unsigned long long val;
731
732                         /*
733                          * data->timeout_clks is in units of clock cycles.
734                          * host->clock is in Hz.  target_timeout is in us.
735                          * Hence, us = 1000000 * cycles / Hz.  Round up.
736                          */
737                         val = 1000000ULL * data->timeout_clks;
738                         if (do_div(val, host->clock))
739                                 target_timeout++;
740                         target_timeout += val;
741                 }
742         }
743
744         return target_timeout;
745 }
746
747 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
748                                   struct mmc_command *cmd)
749 {
750         struct mmc_data *data = cmd->data;
751         struct mmc_host *mmc = host->mmc;
752         struct mmc_ios *ios = &mmc->ios;
753         unsigned char bus_width = 1 << ios->bus_width;
754         unsigned int blksz;
755         unsigned int freq;
756         u64 target_timeout;
757         u64 transfer_time;
758
759         target_timeout = sdhci_target_timeout(host, cmd, data);
760         target_timeout *= NSEC_PER_USEC;
761
762         if (data) {
763                 blksz = data->blksz;
764                 freq = host->mmc->actual_clock ? : host->clock;
765                 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
766                 do_div(transfer_time, freq);
767                 /* multiply by '2' to account for any unknowns */
768                 transfer_time = transfer_time * 2;
769                 /* calculate timeout for the entire data */
770                 host->data_timeout = data->blocks * target_timeout +
771                                      transfer_time;
772         } else {
773                 host->data_timeout = target_timeout;
774         }
775
776         if (host->data_timeout)
777                 host->data_timeout += MMC_CMD_TRANSFER_TIME;
778 }
779
780 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
781                              bool *too_big)
782 {
783         u8 count;
784         struct mmc_data *data = cmd->data;
785         unsigned target_timeout, current_timeout;
786
787         *too_big = true;
788
789         /*
790          * If the host controller provides us with an incorrect timeout
791          * value, just skip the check and use 0xE.  The hardware may take
792          * longer to time out, but that's much better than having a too-short
793          * timeout value.
794          */
795         if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
796                 return 0xE;
797
798         /* Unspecified timeout, assume max */
799         if (!data && !cmd->busy_timeout)
800                 return 0xE;
801
802         /* timeout in us */
803         target_timeout = sdhci_target_timeout(host, cmd, data);
804
805         /*
806          * Figure out needed cycles.
807          * We do this in steps in order to fit inside a 32 bit int.
808          * The first step is the minimum timeout, which will have a
809          * minimum resolution of 6 bits:
810          * (1) 2^13*1000 > 2^22,
811          * (2) host->timeout_clk < 2^16
812          *     =>
813          *     (1) / (2) > 2^6
814          */
815         count = 0;
816         current_timeout = (1 << 13) * 1000 / host->timeout_clk;
817         while (current_timeout < target_timeout) {
818                 count++;
819                 current_timeout <<= 1;
820                 if (count >= 0xF)
821                         break;
822         }
823
824         if (count >= 0xF) {
825                 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
826                         DBG("Too large timeout 0x%x requested for CMD%d!\n",
827                             count, cmd->opcode);
828                 count = 0xE;
829         } else {
830                 *too_big = false;
831         }
832
833         return count;
834 }
835
836 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
837 {
838         u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
839         u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
840
841         if (host->flags & SDHCI_REQ_USE_DMA)
842                 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
843         else
844                 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
845
846         if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
847                 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
848         else
849                 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
850
851         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
852         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
853 }
854
855 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
856 {
857         if (enable)
858                 host->ier |= SDHCI_INT_DATA_TIMEOUT;
859         else
860                 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
861         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
862         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
863 }
864
865 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
866 {
867         u8 count;
868
869         if (host->ops->set_timeout) {
870                 host->ops->set_timeout(host, cmd);
871         } else {
872                 bool too_big = false;
873
874                 count = sdhci_calc_timeout(host, cmd, &too_big);
875
876                 if (too_big &&
877                     host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
878                         sdhci_calc_sw_timeout(host, cmd);
879                         sdhci_set_data_timeout_irq(host, false);
880                 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
881                         sdhci_set_data_timeout_irq(host, true);
882                 }
883
884                 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
885         }
886 }
887
888 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
889 {
890         u8 ctrl;
891         struct mmc_data *data = cmd->data;
892
893         host->data_timeout = 0;
894
895         if (sdhci_data_line_cmd(cmd))
896                 sdhci_set_timeout(host, cmd);
897
898         if (!data)
899                 return;
900
901         WARN_ON(host->data);
902
903         /* Sanity checks */
904         BUG_ON(data->blksz * data->blocks > 524288);
905         BUG_ON(data->blksz > host->mmc->max_blk_size);
906         BUG_ON(data->blocks > 65535);
907
908         host->data = data;
909         host->data_early = 0;
910         host->data->bytes_xfered = 0;
911
912         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
913                 struct scatterlist *sg;
914                 unsigned int length_mask, offset_mask;
915                 int i;
916
917                 host->flags |= SDHCI_REQ_USE_DMA;
918
919                 /*
920                  * FIXME: This doesn't account for merging when mapping the
921                  * scatterlist.
922                  *
923                  * The assumption here being that alignment and lengths are
924                  * the same after DMA mapping to device address space.
925                  */
926                 length_mask = 0;
927                 offset_mask = 0;
928                 if (host->flags & SDHCI_USE_ADMA) {
929                         if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
930                                 length_mask = 3;
931                                 /*
932                                  * As we use up to 3 byte chunks to work
933                                  * around alignment problems, we need to
934                                  * check the offset as well.
935                                  */
936                                 offset_mask = 3;
937                         }
938                 } else {
939                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
940                                 length_mask = 3;
941                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
942                                 offset_mask = 3;
943                 }
944
945                 if (unlikely(length_mask | offset_mask)) {
946                         for_each_sg(data->sg, sg, data->sg_len, i) {
947                                 if (sg->length & length_mask) {
948                                         DBG("Reverting to PIO because of transfer size (%d)\n",
949                                             sg->length);
950                                         host->flags &= ~SDHCI_REQ_USE_DMA;
951                                         break;
952                                 }
953                                 if (sg->offset & offset_mask) {
954                                         DBG("Reverting to PIO because of bad alignment\n");
955                                         host->flags &= ~SDHCI_REQ_USE_DMA;
956                                         break;
957                                 }
958                         }
959                 }
960         }
961
962         if (host->flags & SDHCI_REQ_USE_DMA) {
963                 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
964
965                 if (sg_cnt <= 0) {
966                         /*
967                          * This only happens when someone fed
968                          * us an invalid request.
969                          */
970                         WARN_ON(1);
971                         host->flags &= ~SDHCI_REQ_USE_DMA;
972                 } else if (host->flags & SDHCI_USE_ADMA) {
973                         sdhci_adma_table_pre(host, data, sg_cnt);
974
975                         sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
976                         if (host->flags & SDHCI_USE_64_BIT_DMA)
977                                 sdhci_writel(host,
978                                              (u64)host->adma_addr >> 32,
979                                              SDHCI_ADMA_ADDRESS_HI);
980                 } else {
981                         WARN_ON(sg_cnt != 1);
982                         sdhci_writel(host, sdhci_sdma_address(host),
983                                      SDHCI_DMA_ADDRESS);
984                 }
985         }
986
987         /*
988          * Always adjust the DMA selection as some controllers
989          * (e.g. JMicron) can't do PIO properly when the selection
990          * is ADMA.
991          */
992         if (host->version >= SDHCI_SPEC_200) {
993                 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
994                 ctrl &= ~SDHCI_CTRL_DMA_MASK;
995                 if ((host->flags & SDHCI_REQ_USE_DMA) &&
996                         (host->flags & SDHCI_USE_ADMA)) {
997                         if (host->flags & SDHCI_USE_64_BIT_DMA)
998                                 ctrl |= SDHCI_CTRL_ADMA64;
999                         else
1000                                 ctrl |= SDHCI_CTRL_ADMA32;
1001                 } else {
1002                         ctrl |= SDHCI_CTRL_SDMA;
1003                 }
1004                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1005         }
1006
1007         if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1008                 int flags;
1009
1010                 flags = SG_MITER_ATOMIC;
1011                 if (host->data->flags & MMC_DATA_READ)
1012                         flags |= SG_MITER_TO_SG;
1013                 else
1014                         flags |= SG_MITER_FROM_SG;
1015                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1016                 host->blocks = data->blocks;
1017         }
1018
1019         sdhci_set_transfer_irqs(host);
1020
1021         /* Set the DMA boundary value and block size */
1022         sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1023                      SDHCI_BLOCK_SIZE);
1024         sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1025 }
1026
1027 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1028                                     struct mmc_request *mrq)
1029 {
1030         return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1031                !mrq->cap_cmd_during_tfr;
1032 }
1033
1034 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1035         struct mmc_command *cmd)
1036 {
1037         u16 mode = 0;
1038         struct mmc_data *data = cmd->data;
1039
1040         if (data == NULL) {
1041                 if (host->quirks2 &
1042                         SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1043                         /* must not clear SDHCI_TRANSFER_MODE when tuning */
1044                         if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1045                                 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1046                 } else {
1047                 /* clear Auto CMD settings for no data CMDs */
1048                         mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1049                         sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1050                                 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1051                 }
1052                 return;
1053         }
1054
1055         WARN_ON(!host->data);
1056
1057         if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1058                 mode = SDHCI_TRNS_BLK_CNT_EN;
1059
1060         if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1061                 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1062                 /*
1063                  * If we are sending CMD23, CMD12 never gets sent
1064                  * on successful completion (so no Auto-CMD12).
1065                  */
1066                 if (sdhci_auto_cmd12(host, cmd->mrq) &&
1067                     (cmd->opcode != SD_IO_RW_EXTENDED))
1068                         mode |= SDHCI_TRNS_AUTO_CMD12;
1069                 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1070                         mode |= SDHCI_TRNS_AUTO_CMD23;
1071                         sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1072                 }
1073         }
1074
1075         if (data->flags & MMC_DATA_READ)
1076                 mode |= SDHCI_TRNS_READ;
1077         if (host->flags & SDHCI_REQ_USE_DMA)
1078                 mode |= SDHCI_TRNS_DMA;
1079
1080         sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1081 }
1082
1083 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1084 {
1085         return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1086                 ((mrq->cmd && mrq->cmd->error) ||
1087                  (mrq->sbc && mrq->sbc->error) ||
1088                  (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1089                  (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1090 }
1091
1092 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1093 {
1094         int i;
1095
1096         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1097                 if (host->mrqs_done[i] == mrq) {
1098                         WARN_ON(1);
1099                         return;
1100                 }
1101         }
1102
1103         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1104                 if (!host->mrqs_done[i]) {
1105                         host->mrqs_done[i] = mrq;
1106                         break;
1107                 }
1108         }
1109
1110         WARN_ON(i >= SDHCI_MAX_MRQS);
1111
1112         tasklet_schedule(&host->finish_tasklet);
1113 }
1114
1115 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1116 {
1117         if (host->cmd && host->cmd->mrq == mrq)
1118                 host->cmd = NULL;
1119
1120         if (host->data_cmd && host->data_cmd->mrq == mrq)
1121                 host->data_cmd = NULL;
1122
1123         if (host->data && host->data->mrq == mrq)
1124                 host->data = NULL;
1125
1126         if (sdhci_needs_reset(host, mrq))
1127                 host->pending_reset = true;
1128
1129         __sdhci_finish_mrq(host, mrq);
1130 }
1131
1132 static void sdhci_finish_data(struct sdhci_host *host)
1133 {
1134         struct mmc_command *data_cmd = host->data_cmd;
1135         struct mmc_data *data = host->data;
1136
1137         host->data = NULL;
1138         host->data_cmd = NULL;
1139
1140         /*
1141          * The controller needs a reset of internal state machines upon error
1142          * conditions.
1143          */
1144         if (data->error) {
1145                 if (!host->cmd || host->cmd == data_cmd)
1146                         sdhci_do_reset(host, SDHCI_RESET_CMD);
1147                 sdhci_do_reset(host, SDHCI_RESET_DATA);
1148         }
1149
1150         if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1151             (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1152                 sdhci_adma_table_post(host, data);
1153
1154         /*
1155          * The specification states that the block count register must
1156          * be updated, but it does not specify at what point in the
1157          * data flow. That makes the register entirely useless to read
1158          * back so we have to assume that nothing made it to the card
1159          * in the event of an error.
1160          */
1161         if (data->error)
1162                 data->bytes_xfered = 0;
1163         else
1164                 data->bytes_xfered = data->blksz * data->blocks;
1165
1166         /*
1167          * Need to send CMD12 if -
1168          * a) open-ended multiblock transfer (no CMD23)
1169          * b) error in multiblock transfer
1170          */
1171         if (data->stop &&
1172             (data->error ||
1173              !data->mrq->sbc)) {
1174                 /*
1175                  * 'cap_cmd_during_tfr' request must not use the command line
1176                  * after mmc_command_done() has been called. It is upper layer's
1177                  * responsibility to send the stop command if required.
1178                  */
1179                 if (data->mrq->cap_cmd_during_tfr) {
1180                         sdhci_finish_mrq(host, data->mrq);
1181                 } else {
1182                         /* Avoid triggering warning in sdhci_send_command() */
1183                         host->cmd = NULL;
1184                         sdhci_send_command(host, data->stop);
1185                 }
1186         } else {
1187                 sdhci_finish_mrq(host, data->mrq);
1188         }
1189 }
1190
1191 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1192                             unsigned long timeout)
1193 {
1194         if (sdhci_data_line_cmd(mrq->cmd))
1195                 mod_timer(&host->data_timer, timeout);
1196         else
1197                 mod_timer(&host->timer, timeout);
1198 }
1199
1200 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1201 {
1202         if (sdhci_data_line_cmd(mrq->cmd))
1203                 del_timer(&host->data_timer);
1204         else
1205                 del_timer(&host->timer);
1206 }
1207
1208 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1209 {
1210         int flags;
1211         u32 mask;
1212         unsigned long timeout;
1213
1214         WARN_ON(host->cmd);
1215
1216         /* Initially, a command has no error */
1217         cmd->error = 0;
1218
1219         if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1220             cmd->opcode == MMC_STOP_TRANSMISSION)
1221                 cmd->flags |= MMC_RSP_BUSY;
1222
1223         /* Wait max 10 ms */
1224         timeout = 10;
1225
1226         mask = SDHCI_CMD_INHIBIT;
1227         if (sdhci_data_line_cmd(cmd))
1228                 mask |= SDHCI_DATA_INHIBIT;
1229
1230         /* We shouldn't wait for data inihibit for stop commands, even
1231            though they might use busy signaling */
1232         if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1233                 mask &= ~SDHCI_DATA_INHIBIT;
1234
1235         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1236                 if (timeout == 0) {
1237                         pr_err("%s: Controller never released inhibit bit(s).\n",
1238                                mmc_hostname(host->mmc));
1239                         sdhci_dumpregs(host);
1240                         cmd->error = -EIO;
1241                         sdhci_finish_mrq(host, cmd->mrq);
1242                         return;
1243                 }
1244                 timeout--;
1245                 mdelay(1);
1246         }
1247
1248         host->cmd = cmd;
1249         if (sdhci_data_line_cmd(cmd)) {
1250                 WARN_ON(host->data_cmd);
1251                 host->data_cmd = cmd;
1252         }
1253
1254         sdhci_prepare_data(host, cmd);
1255
1256         sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1257
1258         sdhci_set_transfer_mode(host, cmd);
1259
1260         if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1261                 pr_err("%s: Unsupported response type!\n",
1262                         mmc_hostname(host->mmc));
1263                 cmd->error = -EINVAL;
1264                 sdhci_finish_mrq(host, cmd->mrq);
1265                 return;
1266         }
1267
1268         if (!(cmd->flags & MMC_RSP_PRESENT))
1269                 flags = SDHCI_CMD_RESP_NONE;
1270         else if (cmd->flags & MMC_RSP_136)
1271                 flags = SDHCI_CMD_RESP_LONG;
1272         else if (cmd->flags & MMC_RSP_BUSY)
1273                 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1274         else
1275                 flags = SDHCI_CMD_RESP_SHORT;
1276
1277         if (cmd->flags & MMC_RSP_CRC)
1278                 flags |= SDHCI_CMD_CRC;
1279         if (cmd->flags & MMC_RSP_OPCODE)
1280                 flags |= SDHCI_CMD_INDEX;
1281
1282         /* CMD19 is special in that the Data Present Select should be set */
1283         if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1284             cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1285                 flags |= SDHCI_CMD_DATA;
1286
1287         timeout = jiffies;
1288         if (host->data_timeout)
1289                 timeout += nsecs_to_jiffies(host->data_timeout);
1290         else if (!cmd->data && cmd->busy_timeout > 9000)
1291                 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1292         else
1293                 timeout += 10 * HZ;
1294         sdhci_mod_timer(host, cmd->mrq, timeout);
1295
1296         sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1297 }
1298 EXPORT_SYMBOL_GPL(sdhci_send_command);
1299
1300 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1301 {
1302         int i, reg;
1303
1304         for (i = 0; i < 4; i++) {
1305                 reg = SDHCI_RESPONSE + (3 - i) * 4;
1306                 cmd->resp[i] = sdhci_readl(host, reg);
1307         }
1308
1309         if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1310                 return;
1311
1312         /* CRC is stripped so we need to do some shifting */
1313         for (i = 0; i < 4; i++) {
1314                 cmd->resp[i] <<= 8;
1315                 if (i != 3)
1316                         cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1317         }
1318 }
1319
1320 static void sdhci_finish_command(struct sdhci_host *host)
1321 {
1322         struct mmc_command *cmd = host->cmd;
1323
1324         host->cmd = NULL;
1325
1326         if (cmd->flags & MMC_RSP_PRESENT) {
1327                 if (cmd->flags & MMC_RSP_136) {
1328                         sdhci_read_rsp_136(host, cmd);
1329                 } else {
1330                         cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1331                 }
1332         }
1333
1334         if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1335                 mmc_command_done(host->mmc, cmd->mrq);
1336
1337         /*
1338          * The host can send and interrupt when the busy state has
1339          * ended, allowing us to wait without wasting CPU cycles.
1340          * The busy signal uses DAT0 so this is similar to waiting
1341          * for data to complete.
1342          *
1343          * Note: The 1.0 specification is a bit ambiguous about this
1344          *       feature so there might be some problems with older
1345          *       controllers.
1346          */
1347         if (cmd->flags & MMC_RSP_BUSY) {
1348                 if (cmd->data) {
1349                         DBG("Cannot wait for busy signal when also doing a data transfer");
1350                 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1351                            cmd == host->data_cmd) {
1352                         /* Command complete before busy is ended */
1353                         return;
1354                 }
1355         }
1356
1357         /* Finished CMD23, now send actual command. */
1358         if (cmd == cmd->mrq->sbc) {
1359                 sdhci_send_command(host, cmd->mrq->cmd);
1360         } else {
1361
1362                 /* Processed actual command. */
1363                 if (host->data && host->data_early)
1364                         sdhci_finish_data(host);
1365
1366                 if (!cmd->data)
1367                         sdhci_finish_mrq(host, cmd->mrq);
1368         }
1369 }
1370
1371 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1372 {
1373         u16 preset = 0;
1374
1375         switch (host->timing) {
1376         case MMC_TIMING_MMC_HS:
1377         case MMC_TIMING_SD_HS:
1378                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1379                 break;
1380         case MMC_TIMING_UHS_SDR12:
1381                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1382                 break;
1383         case MMC_TIMING_UHS_SDR25:
1384                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1385                 break;
1386         case MMC_TIMING_UHS_SDR50:
1387                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1388                 break;
1389         case MMC_TIMING_UHS_SDR104:
1390         case MMC_TIMING_MMC_HS200:
1391                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1392                 break;
1393         case MMC_TIMING_UHS_DDR50:
1394         case MMC_TIMING_MMC_DDR52:
1395                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1396                 break;
1397         case MMC_TIMING_MMC_HS400:
1398                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1399                 break;
1400         default:
1401                 pr_warn("%s: Invalid UHS-I mode selected\n",
1402                         mmc_hostname(host->mmc));
1403                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1404                 break;
1405         }
1406         return preset;
1407 }
1408
1409 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1410                    unsigned int *actual_clock)
1411 {
1412         int div = 0; /* Initialized for compiler warning */
1413         int real_div = div, clk_mul = 1;
1414         u16 clk = 0;
1415         bool switch_base_clk = false;
1416
1417         if (host->version >= SDHCI_SPEC_300) {
1418                 if (host->preset_enabled) {
1419                         u16 pre_val;
1420
1421                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1422                         pre_val = sdhci_get_preset_value(host);
1423                         div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1424                         if (host->clk_mul &&
1425                                 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1426                                 clk = SDHCI_PROG_CLOCK_MODE;
1427                                 real_div = div + 1;
1428                                 clk_mul = host->clk_mul;
1429                         } else {
1430                                 real_div = max_t(int, 1, div << 1);
1431                         }
1432                         goto clock_set;
1433                 }
1434
1435                 /*
1436                  * Check if the Host Controller supports Programmable Clock
1437                  * Mode.
1438                  */
1439                 if (host->clk_mul) {
1440                         for (div = 1; div <= 1024; div++) {
1441                                 if ((host->max_clk * host->clk_mul / div)
1442                                         <= clock)
1443                                         break;
1444                         }
1445                         if ((host->max_clk * host->clk_mul / div) <= clock) {
1446                                 /*
1447                                  * Set Programmable Clock Mode in the Clock
1448                                  * Control register.
1449                                  */
1450                                 clk = SDHCI_PROG_CLOCK_MODE;
1451                                 real_div = div;
1452                                 clk_mul = host->clk_mul;
1453                                 div--;
1454                         } else {
1455                                 /*
1456                                  * Divisor can be too small to reach clock
1457                                  * speed requirement. Then use the base clock.
1458                                  */
1459                                 switch_base_clk = true;
1460                         }
1461                 }
1462
1463                 if (!host->clk_mul || switch_base_clk) {
1464                         /* Version 3.00 divisors must be a multiple of 2. */
1465                         if (host->max_clk <= clock)
1466                                 div = 1;
1467                         else {
1468                                 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1469                                      div += 2) {
1470                                         if ((host->max_clk / div) <= clock)
1471                                                 break;
1472                                 }
1473                         }
1474                         real_div = div;
1475                         div >>= 1;
1476                         if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1477                                 && !div && host->max_clk <= 25000000)
1478                                 div = 1;
1479                 }
1480         } else {
1481                 /* Version 2.00 divisors must be a power of 2. */
1482                 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1483                         if ((host->max_clk / div) <= clock)
1484                                 break;
1485                 }
1486                 real_div = div;
1487                 div >>= 1;
1488         }
1489
1490 clock_set:
1491         if (real_div)
1492                 *actual_clock = (host->max_clk * clk_mul) / real_div;
1493         clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1494         clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1495                 << SDHCI_DIVIDER_HI_SHIFT;
1496
1497         return clk;
1498 }
1499 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1500
1501 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1502 {
1503         ktime_t timeout;
1504
1505         clk |= SDHCI_CLOCK_INT_EN;
1506         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1507
1508         /* Wait max 20 ms */
1509         timeout = ktime_add_ms(ktime_get(), 20);
1510         while (1) {
1511                 bool timedout = ktime_after(ktime_get(), timeout);
1512
1513                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1514                 if (clk & SDHCI_CLOCK_INT_STABLE)
1515                         break;
1516                 if (timedout) {
1517                         pr_err("%s: Internal clock never stabilised.\n",
1518                                mmc_hostname(host->mmc));
1519                         sdhci_dumpregs(host);
1520                         return;
1521                 }
1522                 udelay(10);
1523         }
1524
1525         clk |= SDHCI_CLOCK_CARD_EN;
1526         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1527 }
1528 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1529
1530 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1531 {
1532         u16 clk;
1533
1534         host->mmc->actual_clock = 0;
1535
1536         sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1537
1538         if (clock == 0)
1539                 return;
1540
1541         clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1542         sdhci_enable_clk(host, clk);
1543 }
1544 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1545
1546 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1547                                 unsigned short vdd)
1548 {
1549         struct mmc_host *mmc = host->mmc;
1550
1551         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1552
1553         if (mode != MMC_POWER_OFF)
1554                 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1555         else
1556                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1557 }
1558
1559 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1560                            unsigned short vdd)
1561 {
1562         u8 pwr = 0;
1563
1564         if (mode != MMC_POWER_OFF) {
1565                 switch (1 << vdd) {
1566                 case MMC_VDD_165_195:
1567                 /*
1568                  * Without a regulator, SDHCI does not support 2.0v
1569                  * so we only get here if the driver deliberately
1570                  * added the 2.0v range to ocr_avail. Map it to 1.8v
1571                  * for the purpose of turning on the power.
1572                  */
1573                 case MMC_VDD_20_21:
1574                         pwr = SDHCI_POWER_180;
1575                         break;
1576                 case MMC_VDD_29_30:
1577                 case MMC_VDD_30_31:
1578                         pwr = SDHCI_POWER_300;
1579                         break;
1580                 case MMC_VDD_32_33:
1581                 case MMC_VDD_33_34:
1582                 /*
1583                  * 3.4 ~ 3.6V are valid only for those platforms where it's
1584                  * known that the voltage range is supported by hardware.
1585                  */
1586                 case MMC_VDD_34_35:
1587                 case MMC_VDD_35_36:
1588                         pwr = SDHCI_POWER_330;
1589                         break;
1590                 default:
1591                         WARN(1, "%s: Invalid vdd %#x\n",
1592                              mmc_hostname(host->mmc), vdd);
1593                         break;
1594                 }
1595         }
1596
1597         if (host->pwr == pwr)
1598                 return;
1599
1600         host->pwr = pwr;
1601
1602         if (pwr == 0) {
1603                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1604                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1605                         sdhci_runtime_pm_bus_off(host);
1606         } else {
1607                 /*
1608                  * Spec says that we should clear the power reg before setting
1609                  * a new value. Some controllers don't seem to like this though.
1610                  */
1611                 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1612                         sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1613
1614                 /*
1615                  * At least the Marvell CaFe chip gets confused if we set the
1616                  * voltage and set turn on power at the same time, so set the
1617                  * voltage first.
1618                  */
1619                 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1620                         sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1621
1622                 pwr |= SDHCI_POWER_ON;
1623
1624                 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1625
1626                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1627                         sdhci_runtime_pm_bus_on(host);
1628
1629                 /*
1630                  * Some controllers need an extra 10ms delay of 10ms before
1631                  * they can apply clock after applying power
1632                  */
1633                 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1634                         mdelay(10);
1635         }
1636 }
1637 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1638
1639 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1640                      unsigned short vdd)
1641 {
1642         if (IS_ERR(host->mmc->supply.vmmc))
1643                 sdhci_set_power_noreg(host, mode, vdd);
1644         else
1645                 sdhci_set_power_reg(host, mode, vdd);
1646 }
1647 EXPORT_SYMBOL_GPL(sdhci_set_power);
1648
1649 /*****************************************************************************\
1650  *                                                                           *
1651  * MMC callbacks                                                             *
1652  *                                                                           *
1653 \*****************************************************************************/
1654
1655 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1656 {
1657         struct sdhci_host *host;
1658         int present;
1659         unsigned long flags;
1660
1661         host = mmc_priv(mmc);
1662
1663         /* Firstly check card presence */
1664         present = mmc->ops->get_cd(mmc);
1665
1666         spin_lock_irqsave(&host->lock, flags);
1667
1668         sdhci_led_activate(host);
1669
1670         /*
1671          * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1672          * requests if Auto-CMD12 is enabled.
1673          */
1674         if (sdhci_auto_cmd12(host, mrq)) {
1675                 if (mrq->stop) {
1676                         mrq->data->stop = NULL;
1677                         mrq->stop = NULL;
1678                 }
1679         }
1680
1681         if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1682                 mrq->cmd->error = -ENOMEDIUM;
1683                 sdhci_finish_mrq(host, mrq);
1684         } else {
1685                 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1686                         sdhci_send_command(host, mrq->sbc);
1687                 else
1688                         sdhci_send_command(host, mrq->cmd);
1689         }
1690
1691         mmiowb();
1692         spin_unlock_irqrestore(&host->lock, flags);
1693 }
1694
1695 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1696 {
1697         u8 ctrl;
1698
1699         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1700         if (width == MMC_BUS_WIDTH_8) {
1701                 ctrl &= ~SDHCI_CTRL_4BITBUS;
1702                 ctrl |= SDHCI_CTRL_8BITBUS;
1703         } else {
1704                 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1705                         ctrl &= ~SDHCI_CTRL_8BITBUS;
1706                 if (width == MMC_BUS_WIDTH_4)
1707                         ctrl |= SDHCI_CTRL_4BITBUS;
1708                 else
1709                         ctrl &= ~SDHCI_CTRL_4BITBUS;
1710         }
1711         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1712 }
1713 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1714
1715 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1716 {
1717         u16 ctrl_2;
1718
1719         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1720         /* Select Bus Speed Mode for host */
1721         ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1722         if ((timing == MMC_TIMING_MMC_HS200) ||
1723             (timing == MMC_TIMING_UHS_SDR104))
1724                 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1725         else if (timing == MMC_TIMING_UHS_SDR12)
1726                 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1727         else if (timing == MMC_TIMING_UHS_SDR25)
1728                 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1729         else if (timing == MMC_TIMING_UHS_SDR50)
1730                 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1731         else if ((timing == MMC_TIMING_UHS_DDR50) ||
1732                  (timing == MMC_TIMING_MMC_DDR52))
1733                 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1734         else if (timing == MMC_TIMING_MMC_HS400)
1735                 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1736         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1737 }
1738 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1739
1740 static bool sdhci_timing_has_preset(unsigned char timing)
1741 {
1742         switch (timing) {
1743         case MMC_TIMING_UHS_SDR12:
1744         case MMC_TIMING_UHS_SDR25:
1745         case MMC_TIMING_UHS_SDR50:
1746         case MMC_TIMING_UHS_SDR104:
1747         case MMC_TIMING_UHS_DDR50:
1748         case MMC_TIMING_MMC_DDR52:
1749                 return true;
1750         };
1751         return false;
1752 }
1753
1754 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
1755 {
1756         return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1757                sdhci_timing_has_preset(timing);
1758 }
1759
1760 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
1761 {
1762         /*
1763          * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
1764          * Frequency. Check if preset values need to be enabled, or the Driver
1765          * Strength needs updating. Note, clock changes are handled separately.
1766          */
1767         return !host->preset_enabled &&
1768                (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
1769 }
1770
1771 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1772 {
1773         struct sdhci_host *host = mmc_priv(mmc);
1774         bool reinit_uhs = host->reinit_uhs;
1775         bool turning_on_clk = false;
1776         u8 ctrl;
1777
1778         host->reinit_uhs = false;
1779
1780         if (ios->power_mode == MMC_POWER_UNDEFINED)
1781                 return;
1782
1783         if (host->flags & SDHCI_DEVICE_DEAD) {
1784                 if (!IS_ERR(mmc->supply.vmmc) &&
1785                     ios->power_mode == MMC_POWER_OFF)
1786                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1787                 return;
1788         }
1789
1790         /*
1791          * Reset the chip on each power off.
1792          * Should clear out any weird states.
1793          */
1794         if (ios->power_mode == MMC_POWER_OFF) {
1795                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1796                 sdhci_reinit(host);
1797         }
1798
1799         if (host->version >= SDHCI_SPEC_300 &&
1800                 (ios->power_mode == MMC_POWER_UP) &&
1801                 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1802                 sdhci_enable_preset_value(host, false);
1803
1804         if (!ios->clock || ios->clock != host->clock) {
1805                 turning_on_clk = ios->clock && !host->clock;
1806
1807                 host->ops->set_clock(host, ios->clock);
1808                 host->clock = ios->clock;
1809
1810                 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1811                     host->clock) {
1812                         host->timeout_clk = host->mmc->actual_clock ?
1813                                                 host->mmc->actual_clock / 1000 :
1814                                                 host->clock / 1000;
1815                         host->mmc->max_busy_timeout =
1816                                 host->ops->get_max_timeout_count ?
1817                                 host->ops->get_max_timeout_count(host) :
1818                                 1 << 27;
1819                         host->mmc->max_busy_timeout /= host->timeout_clk;
1820                 }
1821         }
1822
1823         if (host->ops->set_power)
1824                 host->ops->set_power(host, ios->power_mode, ios->vdd);
1825         else
1826                 sdhci_set_power(host, ios->power_mode, ios->vdd);
1827
1828         if (host->ops->platform_send_init_74_clocks)
1829                 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1830
1831         host->ops->set_bus_width(host, ios->bus_width);
1832
1833         /*
1834          * Special case to avoid multiple clock changes during voltage
1835          * switching.
1836          */
1837         if (!reinit_uhs &&
1838             turning_on_clk &&
1839             host->timing == ios->timing &&
1840             host->version >= SDHCI_SPEC_300 &&
1841             !sdhci_presetable_values_change(host, ios))
1842                 return;
1843
1844         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1845
1846         if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1847                 if (ios->timing == MMC_TIMING_SD_HS ||
1848                      ios->timing == MMC_TIMING_MMC_HS ||
1849                      ios->timing == MMC_TIMING_MMC_HS400 ||
1850                      ios->timing == MMC_TIMING_MMC_HS200 ||
1851                      ios->timing == MMC_TIMING_MMC_DDR52 ||
1852                      ios->timing == MMC_TIMING_UHS_SDR50 ||
1853                      ios->timing == MMC_TIMING_UHS_SDR104 ||
1854                      ios->timing == MMC_TIMING_UHS_DDR50 ||
1855                      ios->timing == MMC_TIMING_UHS_SDR25)
1856                         ctrl |= SDHCI_CTRL_HISPD;
1857                 else
1858                         ctrl &= ~SDHCI_CTRL_HISPD;
1859         }
1860
1861         if (host->version >= SDHCI_SPEC_300) {
1862                 u16 clk, ctrl_2;
1863
1864                 if (!host->preset_enabled) {
1865                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1866                         /*
1867                          * We only need to set Driver Strength if the
1868                          * preset value enable is not set.
1869                          */
1870                         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1871                         ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1872                         if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1873                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1874                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1875                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1876                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1877                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1878                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1879                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1880                         else {
1881                                 pr_warn("%s: invalid driver type, default to driver type B\n",
1882                                         mmc_hostname(mmc));
1883                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1884                         }
1885
1886                         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1887                         host->drv_type = ios->drv_type;
1888                 } else {
1889                         /*
1890                          * According to SDHC Spec v3.00, if the Preset Value
1891                          * Enable in the Host Control 2 register is set, we
1892                          * need to reset SD Clock Enable before changing High
1893                          * Speed Enable to avoid generating clock gliches.
1894                          */
1895
1896                         /* Reset SD Clock Enable */
1897                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1898                         clk &= ~SDHCI_CLOCK_CARD_EN;
1899                         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1900
1901                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1902
1903                         /* Re-enable SD Clock */
1904                         host->ops->set_clock(host, host->clock);
1905                 }
1906
1907                 /* Reset SD Clock Enable */
1908                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1909                 clk &= ~SDHCI_CLOCK_CARD_EN;
1910                 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1911
1912                 host->ops->set_uhs_signaling(host, ios->timing);
1913                 host->timing = ios->timing;
1914
1915                 if (sdhci_preset_needed(host, ios->timing)) {
1916                         u16 preset;
1917
1918                         sdhci_enable_preset_value(host, true);
1919                         preset = sdhci_get_preset_value(host);
1920                         ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
1921                                                   preset);
1922                         host->drv_type = ios->drv_type;
1923                 }
1924
1925                 /* Re-enable SD Clock */
1926                 host->ops->set_clock(host, host->clock);
1927         } else
1928                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1929
1930         /*
1931          * Some (ENE) controllers go apeshit on some ios operation,
1932          * signalling timeout and CRC errors even on CMD0. Resetting
1933          * it on each ios seems to solve the problem.
1934          */
1935         if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1936                 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1937
1938         mmiowb();
1939 }
1940 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1941
1942 static int sdhci_get_cd(struct mmc_host *mmc)
1943 {
1944         struct sdhci_host *host = mmc_priv(mmc);
1945         int gpio_cd = mmc_gpio_get_cd(mmc);
1946
1947         if (host->flags & SDHCI_DEVICE_DEAD)
1948                 return 0;
1949
1950         /* If nonremovable, assume that the card is always present. */
1951         if (!mmc_card_is_removable(host->mmc))
1952                 return 1;
1953
1954         /*
1955          * Try slot gpio detect, if defined it take precedence
1956          * over build in controller functionality
1957          */
1958         if (gpio_cd >= 0)
1959                 return !!gpio_cd;
1960
1961         /* If polling, assume that the card is always present. */
1962         if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1963                 return 1;
1964
1965         /* Host native card detect */
1966         return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1967 }
1968
1969 static int sdhci_check_ro(struct sdhci_host *host)
1970 {
1971         unsigned long flags;
1972         int is_readonly;
1973
1974         spin_lock_irqsave(&host->lock, flags);
1975
1976         if (host->flags & SDHCI_DEVICE_DEAD)
1977                 is_readonly = 0;
1978         else if (host->ops->get_ro)
1979                 is_readonly = host->ops->get_ro(host);
1980         else
1981                 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1982                                 & SDHCI_WRITE_PROTECT);
1983
1984         spin_unlock_irqrestore(&host->lock, flags);
1985
1986         /* This quirk needs to be replaced by a callback-function later */
1987         return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1988                 !is_readonly : is_readonly;
1989 }
1990
1991 #define SAMPLE_COUNT    5
1992
1993 static int sdhci_get_ro(struct mmc_host *mmc)
1994 {
1995         struct sdhci_host *host = mmc_priv(mmc);
1996         int i, ro_count;
1997
1998         if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1999                 return sdhci_check_ro(host);
2000
2001         ro_count = 0;
2002         for (i = 0; i < SAMPLE_COUNT; i++) {
2003                 if (sdhci_check_ro(host)) {
2004                         if (++ro_count > SAMPLE_COUNT / 2)
2005                                 return 1;
2006                 }
2007                 msleep(30);
2008         }
2009         return 0;
2010 }
2011
2012 static void sdhci_hw_reset(struct mmc_host *mmc)
2013 {
2014         struct sdhci_host *host = mmc_priv(mmc);
2015
2016         if (host->ops && host->ops->hw_reset)
2017                 host->ops->hw_reset(host);
2018 }
2019
2020 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2021 {
2022         if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2023                 if (enable)
2024                         host->ier |= SDHCI_INT_CARD_INT;
2025                 else
2026                         host->ier &= ~SDHCI_INT_CARD_INT;
2027
2028                 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2029                 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2030                 mmiowb();
2031         }
2032 }
2033
2034 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2035 {
2036         struct sdhci_host *host = mmc_priv(mmc);
2037         unsigned long flags;
2038
2039         if (enable)
2040                 pm_runtime_get_noresume(host->mmc->parent);
2041
2042         spin_lock_irqsave(&host->lock, flags);
2043         if (enable)
2044                 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
2045         else
2046                 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
2047
2048         sdhci_enable_sdio_irq_nolock(host, enable);
2049         spin_unlock_irqrestore(&host->lock, flags);
2050
2051         if (!enable)
2052                 pm_runtime_put_noidle(host->mmc->parent);
2053 }
2054 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2055
2056 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2057                                       struct mmc_ios *ios)
2058 {
2059         struct sdhci_host *host = mmc_priv(mmc);
2060         u16 ctrl;
2061         int ret;
2062
2063         /*
2064          * Signal Voltage Switching is only applicable for Host Controllers
2065          * v3.00 and above.
2066          */
2067         if (host->version < SDHCI_SPEC_300)
2068                 return 0;
2069
2070         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2071
2072         switch (ios->signal_voltage) {
2073         case MMC_SIGNAL_VOLTAGE_330:
2074                 if (!(host->flags & SDHCI_SIGNALING_330))
2075                         return -EINVAL;
2076                 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2077                 ctrl &= ~SDHCI_CTRL_VDD_180;
2078                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2079
2080                 if (!IS_ERR(mmc->supply.vqmmc)) {
2081                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2082                         if (ret) {
2083                                 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2084                                         mmc_hostname(mmc));
2085                                 return -EIO;
2086                         }
2087                 }
2088                 /* Wait for 5ms */
2089                 usleep_range(5000, 5500);
2090
2091                 /* 3.3V regulator output should be stable within 5 ms */
2092                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2093                 if (!(ctrl & SDHCI_CTRL_VDD_180))
2094                         return 0;
2095
2096                 pr_warn("%s: 3.3V regulator output did not became stable\n",
2097                         mmc_hostname(mmc));
2098
2099                 return -EAGAIN;
2100         case MMC_SIGNAL_VOLTAGE_180:
2101                 if (!(host->flags & SDHCI_SIGNALING_180))
2102                         return -EINVAL;
2103                 if (!IS_ERR(mmc->supply.vqmmc)) {
2104                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2105                         if (ret) {
2106                                 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2107                                         mmc_hostname(mmc));
2108                                 return -EIO;
2109                         }
2110                 }
2111
2112                 /*
2113                  * Enable 1.8V Signal Enable in the Host Control2
2114                  * register
2115                  */
2116                 ctrl |= SDHCI_CTRL_VDD_180;
2117                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2118
2119                 /* Some controller need to do more when switching */
2120                 if (host->ops->voltage_switch)
2121                         host->ops->voltage_switch(host);
2122
2123                 /* 1.8V regulator output should be stable within 5 ms */
2124                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2125                 if (ctrl & SDHCI_CTRL_VDD_180)
2126                         return 0;
2127
2128                 pr_warn("%s: 1.8V regulator output did not became stable\n",
2129                         mmc_hostname(mmc));
2130
2131                 return -EAGAIN;
2132         case MMC_SIGNAL_VOLTAGE_120:
2133                 if (!(host->flags & SDHCI_SIGNALING_120))
2134                         return -EINVAL;
2135                 if (!IS_ERR(mmc->supply.vqmmc)) {
2136                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2137                         if (ret) {
2138                                 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2139                                         mmc_hostname(mmc));
2140                                 return -EIO;
2141                         }
2142                 }
2143                 return 0;
2144         default:
2145                 /* No signal voltage switch required */
2146                 return 0;
2147         }
2148 }
2149 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2150
2151 static int sdhci_card_busy(struct mmc_host *mmc)
2152 {
2153         struct sdhci_host *host = mmc_priv(mmc);
2154         u32 present_state;
2155
2156         /* Check whether DAT[0] is 0 */
2157         present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2158
2159         return !(present_state & SDHCI_DATA_0_LVL_MASK);
2160 }
2161
2162 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2163 {
2164         struct sdhci_host *host = mmc_priv(mmc);
2165         unsigned long flags;
2166
2167         spin_lock_irqsave(&host->lock, flags);
2168         host->flags |= SDHCI_HS400_TUNING;
2169         spin_unlock_irqrestore(&host->lock, flags);
2170
2171         return 0;
2172 }
2173
2174 void sdhci_start_tuning(struct sdhci_host *host)
2175 {
2176         u16 ctrl;
2177
2178         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2179         ctrl |= SDHCI_CTRL_EXEC_TUNING;
2180         if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2181                 ctrl |= SDHCI_CTRL_TUNED_CLK;
2182         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2183
2184         /*
2185          * As per the Host Controller spec v3.00, tuning command
2186          * generates Buffer Read Ready interrupt, so enable that.
2187          *
2188          * Note: The spec clearly says that when tuning sequence
2189          * is being performed, the controller does not generate
2190          * interrupts other than Buffer Read Ready interrupt. But
2191          * to make sure we don't hit a controller bug, we _only_
2192          * enable Buffer Read Ready interrupt here.
2193          */
2194         sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2195         sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2196 }
2197 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2198
2199 void sdhci_end_tuning(struct sdhci_host *host)
2200 {
2201         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2202         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2203 }
2204 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2205
2206 void sdhci_reset_tuning(struct sdhci_host *host)
2207 {
2208         u16 ctrl;
2209
2210         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2211         ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2212         ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2213         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2214 }
2215 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2216
2217 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2218 {
2219         sdhci_reset_tuning(host);
2220
2221         sdhci_do_reset(host, SDHCI_RESET_CMD);
2222         sdhci_do_reset(host, SDHCI_RESET_DATA);
2223
2224         sdhci_end_tuning(host);
2225
2226         mmc_abort_tuning(host->mmc, opcode);
2227 }
2228
2229 /*
2230  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2231  * tuning command does not have a data payload (or rather the hardware does it
2232  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2233  * interrupt setup is different to other commands and there is no timeout
2234  * interrupt so special handling is needed.
2235  */
2236 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2237 {
2238         struct mmc_host *mmc = host->mmc;
2239         struct mmc_command cmd = {};
2240         struct mmc_request mrq = {};
2241         unsigned long flags;
2242         u32 b = host->sdma_boundary;
2243
2244         spin_lock_irqsave(&host->lock, flags);
2245
2246         cmd.opcode = opcode;
2247         cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2248         cmd.mrq = &mrq;
2249
2250         mrq.cmd = &cmd;
2251         /*
2252          * In response to CMD19, the card sends 64 bytes of tuning
2253          * block to the Host Controller. So we set the block size
2254          * to 64 here.
2255          */
2256         if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2257             mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2258                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2259         else
2260                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2261
2262         /*
2263          * The tuning block is sent by the card to the host controller.
2264          * So we set the TRNS_READ bit in the Transfer Mode register.
2265          * This also takes care of setting DMA Enable and Multi Block
2266          * Select in the same register to 0.
2267          */
2268         sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2269
2270         sdhci_send_command(host, &cmd);
2271
2272         host->cmd = NULL;
2273
2274         sdhci_del_timer(host, &mrq);
2275
2276         host->tuning_done = 0;
2277
2278         mmiowb();
2279         spin_unlock_irqrestore(&host->lock, flags);
2280
2281         /* Wait for Buffer Read Ready interrupt */
2282         wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2283                            msecs_to_jiffies(50));
2284
2285 }
2286 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2287
2288 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2289 {
2290         int i;
2291
2292         /*
2293          * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2294          * of loops reaches 40 times.
2295          */
2296         for (i = 0; i < MAX_TUNING_LOOP; i++) {
2297                 u16 ctrl;
2298
2299                 sdhci_send_tuning(host, opcode);
2300
2301                 if (!host->tuning_done) {
2302                         pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2303                                  mmc_hostname(host->mmc));
2304                         sdhci_abort_tuning(host, opcode);
2305                         return;
2306                 }
2307
2308                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2309                 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2310                         if (ctrl & SDHCI_CTRL_TUNED_CLK)
2311                                 return; /* Success! */
2312                         break;
2313                 }
2314
2315                 /* Spec does not require a delay between tuning cycles */
2316                 if (host->tuning_delay > 0)
2317                         mdelay(host->tuning_delay);
2318         }
2319
2320         pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2321                 mmc_hostname(host->mmc));
2322         sdhci_reset_tuning(host);
2323 }
2324
2325 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2326 {
2327         struct sdhci_host *host = mmc_priv(mmc);
2328         int err = 0;
2329         unsigned int tuning_count = 0;
2330         bool hs400_tuning;
2331
2332         hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2333
2334         if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2335                 tuning_count = host->tuning_count;
2336
2337         /*
2338          * The Host Controller needs tuning in case of SDR104 and DDR50
2339          * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2340          * the Capabilities register.
2341          * If the Host Controller supports the HS200 mode then the
2342          * tuning function has to be executed.
2343          */
2344         switch (host->timing) {
2345         /* HS400 tuning is done in HS200 mode */
2346         case MMC_TIMING_MMC_HS400:
2347                 err = -EINVAL;
2348                 goto out;
2349
2350         case MMC_TIMING_MMC_HS200:
2351                 /*
2352                  * Periodic re-tuning for HS400 is not expected to be needed, so
2353                  * disable it here.
2354                  */
2355                 if (hs400_tuning)
2356                         tuning_count = 0;
2357                 break;
2358
2359         case MMC_TIMING_UHS_SDR104:
2360         case MMC_TIMING_UHS_DDR50:
2361                 break;
2362
2363         case MMC_TIMING_UHS_SDR50:
2364                 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2365                         break;
2366                 /* FALLTHROUGH */
2367
2368         default:
2369                 goto out;
2370         }
2371
2372         if (host->ops->platform_execute_tuning) {
2373                 err = host->ops->platform_execute_tuning(host, opcode);
2374                 goto out;
2375         }
2376
2377         host->mmc->retune_period = tuning_count;
2378
2379         if (host->tuning_delay < 0)
2380                 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2381
2382         sdhci_start_tuning(host);
2383
2384         __sdhci_execute_tuning(host, opcode);
2385
2386         sdhci_end_tuning(host);
2387 out:
2388         host->flags &= ~SDHCI_HS400_TUNING;
2389
2390         return err;
2391 }
2392 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2393
2394 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2395 {
2396         /* Host Controller v3.00 defines preset value registers */
2397         if (host->version < SDHCI_SPEC_300)
2398                 return;
2399
2400         /*
2401          * We only enable or disable Preset Value if they are not already
2402          * enabled or disabled respectively. Otherwise, we bail out.
2403          */
2404         if (host->preset_enabled != enable) {
2405                 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2406
2407                 if (enable)
2408                         ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2409                 else
2410                         ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2411
2412                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2413
2414                 if (enable)
2415                         host->flags |= SDHCI_PV_ENABLED;
2416                 else
2417                         host->flags &= ~SDHCI_PV_ENABLED;
2418
2419                 host->preset_enabled = enable;
2420         }
2421 }
2422
2423 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2424                                 int err)
2425 {
2426         struct sdhci_host *host = mmc_priv(mmc);
2427         struct mmc_data *data = mrq->data;
2428
2429         if (data->host_cookie != COOKIE_UNMAPPED)
2430                 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2431                              mmc_get_dma_dir(data));
2432
2433         data->host_cookie = COOKIE_UNMAPPED;
2434 }
2435
2436 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2437 {
2438         struct sdhci_host *host = mmc_priv(mmc);
2439
2440         mrq->data->host_cookie = COOKIE_UNMAPPED;
2441
2442         /*
2443          * No pre-mapping in the pre hook if we're using the bounce buffer,
2444          * for that we would need two bounce buffers since one buffer is
2445          * in flight when this is getting called.
2446          */
2447         if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2448                 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2449 }
2450
2451 static inline bool sdhci_has_requests(struct sdhci_host *host)
2452 {
2453         return host->cmd || host->data_cmd;
2454 }
2455
2456 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2457 {
2458         if (host->data_cmd) {
2459                 host->data_cmd->error = err;
2460                 sdhci_finish_mrq(host, host->data_cmd->mrq);
2461         }
2462
2463         if (host->cmd) {
2464                 host->cmd->error = err;
2465                 sdhci_finish_mrq(host, host->cmd->mrq);
2466         }
2467 }
2468
2469 static void sdhci_card_event(struct mmc_host *mmc)
2470 {
2471         struct sdhci_host *host = mmc_priv(mmc);
2472         unsigned long flags;
2473         int present;
2474
2475         /* First check if client has provided their own card event */
2476         if (host->ops->card_event)
2477                 host->ops->card_event(host);
2478
2479         present = mmc->ops->get_cd(mmc);
2480
2481         spin_lock_irqsave(&host->lock, flags);
2482
2483         /* Check sdhci_has_requests() first in case we are runtime suspended */
2484         if (sdhci_has_requests(host) && !present) {
2485                 pr_err("%s: Card removed during transfer!\n",
2486                         mmc_hostname(host->mmc));
2487                 pr_err("%s: Resetting controller.\n",
2488                         mmc_hostname(host->mmc));
2489
2490                 sdhci_do_reset(host, SDHCI_RESET_CMD);
2491                 sdhci_do_reset(host, SDHCI_RESET_DATA);
2492
2493                 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2494         }
2495
2496         spin_unlock_irqrestore(&host->lock, flags);
2497 }
2498
2499 static const struct mmc_host_ops sdhci_ops = {
2500         .request        = sdhci_request,
2501         .post_req       = sdhci_post_req,
2502         .pre_req        = sdhci_pre_req,
2503         .set_ios        = sdhci_set_ios,
2504         .get_cd         = sdhci_get_cd,
2505         .get_ro         = sdhci_get_ro,
2506         .hw_reset       = sdhci_hw_reset,
2507         .enable_sdio_irq = sdhci_enable_sdio_irq,
2508         .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
2509         .prepare_hs400_tuning           = sdhci_prepare_hs400_tuning,
2510         .execute_tuning                 = sdhci_execute_tuning,
2511         .card_event                     = sdhci_card_event,
2512         .card_busy      = sdhci_card_busy,
2513 };
2514
2515 /*****************************************************************************\
2516  *                                                                           *
2517  * Tasklets                                                                  *
2518  *                                                                           *
2519 \*****************************************************************************/
2520
2521 static bool sdhci_request_done(struct sdhci_host *host)
2522 {
2523         unsigned long flags;
2524         struct mmc_request *mrq;
2525         int i;
2526
2527         spin_lock_irqsave(&host->lock, flags);
2528
2529         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2530                 mrq = host->mrqs_done[i];
2531                 if (mrq)
2532                         break;
2533         }
2534
2535         if (!mrq) {
2536                 spin_unlock_irqrestore(&host->lock, flags);
2537                 return true;
2538         }
2539
2540         sdhci_del_timer(host, mrq);
2541
2542         /*
2543          * Always unmap the data buffers if they were mapped by
2544          * sdhci_prepare_data() whenever we finish with a request.
2545          * This avoids leaking DMA mappings on error.
2546          */
2547         if (host->flags & SDHCI_REQ_USE_DMA) {
2548                 struct mmc_data *data = mrq->data;
2549
2550                 if (data && data->host_cookie == COOKIE_MAPPED) {
2551                         if (host->bounce_buffer) {
2552                                 /*
2553                                  * On reads, copy the bounced data into the
2554                                  * sglist
2555                                  */
2556                                 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2557                                         unsigned int length = data->bytes_xfered;
2558
2559                                         if (length > host->bounce_buffer_size) {
2560                                                 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2561                                                        mmc_hostname(host->mmc),
2562                                                        host->bounce_buffer_size,
2563                                                        data->bytes_xfered);
2564                                                 /* Cap it down and continue */
2565                                                 length = host->bounce_buffer_size;
2566                                         }
2567                                         dma_sync_single_for_cpu(
2568                                                 host->mmc->parent,
2569                                                 host->bounce_addr,
2570                                                 host->bounce_buffer_size,
2571                                                 DMA_FROM_DEVICE);
2572                                         sg_copy_from_buffer(data->sg,
2573                                                 data->sg_len,
2574                                                 host->bounce_buffer,
2575                                                 length);
2576                                 } else {
2577                                         /* No copying, just switch ownership */
2578                                         dma_sync_single_for_cpu(
2579                                                 host->mmc->parent,
2580                                                 host->bounce_addr,
2581                                                 host->bounce_buffer_size,
2582                                                 mmc_get_dma_dir(data));
2583                                 }
2584                         } else {
2585                                 /* Unmap the raw data */
2586                                 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2587                                              data->sg_len,
2588                                              mmc_get_dma_dir(data));
2589                         }
2590                         data->host_cookie = COOKIE_UNMAPPED;
2591                 }
2592         }
2593
2594         /*
2595          * The controller needs a reset of internal state machines
2596          * upon error conditions.
2597          */
2598         if (sdhci_needs_reset(host, mrq)) {
2599                 /*
2600                  * Do not finish until command and data lines are available for
2601                  * reset. Note there can only be one other mrq, so it cannot
2602                  * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2603                  * would both be null.
2604                  */
2605                 if (host->cmd || host->data_cmd) {
2606                         spin_unlock_irqrestore(&host->lock, flags);
2607                         return true;
2608                 }
2609
2610                 /* Some controllers need this kick or reset won't work here */
2611                 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2612                         /* This is to force an update */
2613                         host->ops->set_clock(host, host->clock);
2614
2615                 /* Spec says we should do both at the same time, but Ricoh
2616                    controllers do not like that. */
2617                 sdhci_do_reset(host, SDHCI_RESET_CMD);
2618                 sdhci_do_reset(host, SDHCI_RESET_DATA);
2619
2620                 host->pending_reset = false;
2621         }
2622
2623         if (!sdhci_has_requests(host))
2624                 sdhci_led_deactivate(host);
2625
2626         host->mrqs_done[i] = NULL;
2627
2628         mmiowb();
2629         spin_unlock_irqrestore(&host->lock, flags);
2630
2631         mmc_request_done(host->mmc, mrq);
2632
2633         return false;
2634 }
2635
2636 static void sdhci_tasklet_finish(unsigned long param)
2637 {
2638         struct sdhci_host *host = (struct sdhci_host *)param;
2639
2640         while (!sdhci_request_done(host))
2641                 ;
2642 }
2643
2644 static void sdhci_timeout_timer(struct timer_list *t)
2645 {
2646         struct sdhci_host *host;
2647         unsigned long flags;
2648
2649         host = from_timer(host, t, timer);
2650
2651         spin_lock_irqsave(&host->lock, flags);
2652
2653         if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2654                 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2655                        mmc_hostname(host->mmc));
2656                 sdhci_dumpregs(host);
2657
2658                 host->cmd->error = -ETIMEDOUT;
2659                 sdhci_finish_mrq(host, host->cmd->mrq);
2660         }
2661
2662         mmiowb();
2663         spin_unlock_irqrestore(&host->lock, flags);
2664 }
2665
2666 static void sdhci_timeout_data_timer(struct timer_list *t)
2667 {
2668         struct sdhci_host *host;
2669         unsigned long flags;
2670
2671         host = from_timer(host, t, data_timer);
2672
2673         spin_lock_irqsave(&host->lock, flags);
2674
2675         if (host->data || host->data_cmd ||
2676             (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2677                 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2678                        mmc_hostname(host->mmc));
2679                 sdhci_dumpregs(host);
2680
2681                 if (host->data) {
2682                         host->data->error = -ETIMEDOUT;
2683                         sdhci_finish_data(host);
2684                 } else if (host->data_cmd) {
2685                         host->data_cmd->error = -ETIMEDOUT;
2686                         sdhci_finish_mrq(host, host->data_cmd->mrq);
2687                 } else {
2688                         host->cmd->error = -ETIMEDOUT;
2689                         sdhci_finish_mrq(host, host->cmd->mrq);
2690                 }
2691         }
2692
2693         mmiowb();
2694         spin_unlock_irqrestore(&host->lock, flags);
2695 }
2696
2697 /*****************************************************************************\
2698  *                                                                           *
2699  * Interrupt handling                                                        *
2700  *                                                                           *
2701 \*****************************************************************************/
2702
2703 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2704 {
2705         /* Handle auto-CMD12 error */
2706         if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
2707                 struct mmc_request *mrq = host->data_cmd->mrq;
2708                 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2709                 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2710                                    SDHCI_INT_DATA_TIMEOUT :
2711                                    SDHCI_INT_DATA_CRC;
2712
2713                 /* Treat auto-CMD12 error the same as data error */
2714                 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
2715                         *intmask_p |= data_err_bit;
2716                         return;
2717                 }
2718         }
2719
2720         if (!host->cmd) {
2721                 /*
2722                  * SDHCI recovers from errors by resetting the cmd and data
2723                  * circuits.  Until that is done, there very well might be more
2724                  * interrupts, so ignore them in that case.
2725                  */
2726                 if (host->pending_reset)
2727                         return;
2728                 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2729                        mmc_hostname(host->mmc), (unsigned)intmask);
2730                 sdhci_dumpregs(host);
2731                 return;
2732         }
2733
2734         if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2735                        SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2736                 if (intmask & SDHCI_INT_TIMEOUT)
2737                         host->cmd->error = -ETIMEDOUT;
2738                 else
2739                         host->cmd->error = -EILSEQ;
2740
2741                 /* Treat data command CRC error the same as data CRC error */
2742                 if (host->cmd->data &&
2743                     (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2744                      SDHCI_INT_CRC) {
2745                         host->cmd = NULL;
2746                         *intmask_p |= SDHCI_INT_DATA_CRC;
2747                         return;
2748                 }
2749
2750                 sdhci_finish_mrq(host, host->cmd->mrq);
2751                 return;
2752         }
2753
2754         /* Handle auto-CMD23 error */
2755         if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
2756                 struct mmc_request *mrq = host->cmd->mrq;
2757                 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2758                 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2759                           -ETIMEDOUT :
2760                           -EILSEQ;
2761
2762                 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
2763                         mrq->sbc->error = err;
2764                         sdhci_finish_mrq(host, mrq);
2765                         return;
2766                 }
2767         }
2768
2769         if (intmask & SDHCI_INT_RESPONSE)
2770                 sdhci_finish_command(host);
2771 }
2772
2773 static void sdhci_adma_show_error(struct sdhci_host *host)
2774 {
2775         void *desc = host->adma_table;
2776         dma_addr_t dma = host->adma_addr;
2777
2778         sdhci_dumpregs(host);
2779
2780         while (true) {
2781                 struct sdhci_adma2_64_desc *dma_desc = desc;
2782
2783                 if (host->flags & SDHCI_USE_64_BIT_DMA)
2784                         SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2785                             (unsigned long long)dma,
2786                             le32_to_cpu(dma_desc->addr_hi),
2787                             le32_to_cpu(dma_desc->addr_lo),
2788                             le16_to_cpu(dma_desc->len),
2789                             le16_to_cpu(dma_desc->cmd));
2790                 else
2791                         SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2792                             (unsigned long long)dma,
2793                             le32_to_cpu(dma_desc->addr_lo),
2794                             le16_to_cpu(dma_desc->len),
2795                             le16_to_cpu(dma_desc->cmd));
2796
2797                 desc += host->desc_sz;
2798                 dma += host->desc_sz;
2799
2800                 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2801                         break;
2802         }
2803 }
2804
2805 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2806 {
2807         u32 command;
2808
2809         /* CMD19 generates _only_ Buffer Read Ready interrupt */
2810         if (intmask & SDHCI_INT_DATA_AVAIL) {
2811                 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2812                 if (command == MMC_SEND_TUNING_BLOCK ||
2813                     command == MMC_SEND_TUNING_BLOCK_HS200) {
2814                         host->tuning_done = 1;
2815                         wake_up(&host->buf_ready_int);
2816                         return;
2817                 }
2818         }
2819
2820         if (!host->data) {
2821                 struct mmc_command *data_cmd = host->data_cmd;
2822
2823                 /*
2824                  * The "data complete" interrupt is also used to
2825                  * indicate that a busy state has ended. See comment
2826                  * above in sdhci_cmd_irq().
2827                  */
2828                 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2829                         if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2830                                 host->data_cmd = NULL;
2831                                 data_cmd->error = -ETIMEDOUT;
2832                                 sdhci_finish_mrq(host, data_cmd->mrq);
2833                                 return;
2834                         }
2835                         if (intmask & SDHCI_INT_DATA_END) {
2836                                 host->data_cmd = NULL;
2837                                 /*
2838                                  * Some cards handle busy-end interrupt
2839                                  * before the command completed, so make
2840                                  * sure we do things in the proper order.
2841                                  */
2842                                 if (host->cmd == data_cmd)
2843                                         return;
2844
2845                                 sdhci_finish_mrq(host, data_cmd->mrq);
2846                                 return;
2847                         }
2848                 }
2849
2850                 /*
2851                  * SDHCI recovers from errors by resetting the cmd and data
2852                  * circuits. Until that is done, there very well might be more
2853                  * interrupts, so ignore them in that case.
2854                  */
2855                 if (host->pending_reset)
2856                         return;
2857
2858                 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2859                        mmc_hostname(host->mmc), (unsigned)intmask);
2860                 sdhci_dumpregs(host);
2861
2862                 return;
2863         }
2864
2865         if (intmask & SDHCI_INT_DATA_TIMEOUT)
2866                 host->data->error = -ETIMEDOUT;
2867         else if (intmask & SDHCI_INT_DATA_END_BIT)
2868                 host->data->error = -EILSEQ;
2869         else if ((intmask & SDHCI_INT_DATA_CRC) &&
2870                 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2871                         != MMC_BUS_TEST_R)
2872                 host->data->error = -EILSEQ;
2873         else if (intmask & SDHCI_INT_ADMA_ERROR) {
2874                 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2875                        intmask);
2876                 sdhci_adma_show_error(host);
2877                 host->data->error = -EIO;
2878                 if (host->ops->adma_workaround)
2879                         host->ops->adma_workaround(host, intmask);
2880         }
2881
2882         if (host->data->error)
2883                 sdhci_finish_data(host);
2884         else {
2885                 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2886                         sdhci_transfer_pio(host);
2887
2888                 /*
2889                  * We currently don't do anything fancy with DMA
2890                  * boundaries, but as we can't disable the feature
2891                  * we need to at least restart the transfer.
2892                  *
2893                  * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2894                  * should return a valid address to continue from, but as
2895                  * some controllers are faulty, don't trust them.
2896                  */
2897                 if (intmask & SDHCI_INT_DMA_END) {
2898                         u32 dmastart, dmanow;
2899
2900                         dmastart = sdhci_sdma_address(host);
2901                         dmanow = dmastart + host->data->bytes_xfered;
2902                         /*
2903                          * Force update to the next DMA block boundary.
2904                          */
2905                         dmanow = (dmanow &
2906                                 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2907                                 SDHCI_DEFAULT_BOUNDARY_SIZE;
2908                         host->data->bytes_xfered = dmanow - dmastart;
2909                         DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2910                             dmastart, host->data->bytes_xfered, dmanow);
2911                         sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2912                 }
2913
2914                 if (intmask & SDHCI_INT_DATA_END) {
2915                         if (host->cmd == host->data_cmd) {
2916                                 /*
2917                                  * Data managed to finish before the
2918                                  * command completed. Make sure we do
2919                                  * things in the proper order.
2920                                  */
2921                                 host->data_early = 1;
2922                         } else {
2923                                 sdhci_finish_data(host);
2924                         }
2925                 }
2926         }
2927 }
2928
2929 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2930 {
2931         irqreturn_t result = IRQ_NONE;
2932         struct sdhci_host *host = dev_id;
2933         u32 intmask, mask, unexpected = 0;
2934         int max_loops = 16;
2935
2936         spin_lock(&host->lock);
2937
2938         if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2939                 spin_unlock(&host->lock);
2940                 return IRQ_NONE;
2941         }
2942
2943         intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2944         if (!intmask || intmask == 0xffffffff) {
2945                 result = IRQ_NONE;
2946                 goto out;
2947         }
2948
2949         do {
2950                 DBG("IRQ status 0x%08x\n", intmask);
2951
2952                 if (host->ops->irq) {
2953                         intmask = host->ops->irq(host, intmask);
2954                         if (!intmask)
2955                                 goto cont;
2956                 }
2957
2958                 /* Clear selected interrupts. */
2959                 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2960                                   SDHCI_INT_BUS_POWER);
2961                 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2962
2963                 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2964                         u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2965                                       SDHCI_CARD_PRESENT;
2966
2967                         /*
2968                          * There is a observation on i.mx esdhc.  INSERT
2969                          * bit will be immediately set again when it gets
2970                          * cleared, if a card is inserted.  We have to mask
2971                          * the irq to prevent interrupt storm which will
2972                          * freeze the system.  And the REMOVE gets the
2973                          * same situation.
2974                          *
2975                          * More testing are needed here to ensure it works
2976                          * for other platforms though.
2977                          */
2978                         host->ier &= ~(SDHCI_INT_CARD_INSERT |
2979                                        SDHCI_INT_CARD_REMOVE);
2980                         host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2981                                                SDHCI_INT_CARD_INSERT;
2982                         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2983                         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2984
2985                         sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2986                                      SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2987
2988                         host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2989                                                        SDHCI_INT_CARD_REMOVE);
2990                         result = IRQ_WAKE_THREAD;
2991                 }
2992
2993                 if (intmask & SDHCI_INT_CMD_MASK)
2994                         sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
2995
2996                 if (intmask & SDHCI_INT_DATA_MASK)
2997                         sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2998
2999                 if (intmask & SDHCI_INT_BUS_POWER)
3000                         pr_err("%s: Card is consuming too much power!\n",
3001                                 mmc_hostname(host->mmc));
3002
3003                 if (intmask & SDHCI_INT_RETUNE)
3004                         mmc_retune_needed(host->mmc);
3005
3006                 if ((intmask & SDHCI_INT_CARD_INT) &&
3007                     (host->ier & SDHCI_INT_CARD_INT)) {
3008                         sdhci_enable_sdio_irq_nolock(host, false);
3009                         host->thread_isr |= SDHCI_INT_CARD_INT;
3010                         result = IRQ_WAKE_THREAD;
3011                 }
3012
3013                 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3014                              SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3015                              SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3016                              SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3017
3018                 if (intmask) {
3019                         unexpected |= intmask;
3020                         sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3021                 }
3022 cont:
3023                 if (result == IRQ_NONE)
3024                         result = IRQ_HANDLED;
3025
3026                 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3027         } while (intmask && --max_loops);
3028 out:
3029         spin_unlock(&host->lock);
3030
3031         if (unexpected) {
3032                 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3033                            mmc_hostname(host->mmc), unexpected);
3034                 sdhci_dumpregs(host);
3035         }
3036
3037         return result;
3038 }
3039
3040 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3041 {
3042         struct sdhci_host *host = dev_id;
3043         unsigned long flags;
3044         u32 isr;
3045
3046         spin_lock_irqsave(&host->lock, flags);
3047         isr = host->thread_isr;
3048         host->thread_isr = 0;
3049         spin_unlock_irqrestore(&host->lock, flags);
3050
3051         if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3052                 struct mmc_host *mmc = host->mmc;
3053
3054                 mmc->ops->card_event(mmc);
3055                 mmc_detect_change(mmc, msecs_to_jiffies(200));
3056         }
3057
3058         if (isr & SDHCI_INT_CARD_INT) {
3059                 sdio_run_irqs(host->mmc);
3060
3061                 spin_lock_irqsave(&host->lock, flags);
3062                 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3063                         sdhci_enable_sdio_irq_nolock(host, true);
3064                 spin_unlock_irqrestore(&host->lock, flags);
3065         }
3066
3067         return isr ? IRQ_HANDLED : IRQ_NONE;
3068 }
3069
3070 /*****************************************************************************\
3071  *                                                                           *
3072  * Suspend/resume                                                            *
3073  *                                                                           *
3074 \*****************************************************************************/
3075
3076 #ifdef CONFIG_PM
3077
3078 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3079 {
3080         return mmc_card_is_removable(host->mmc) &&
3081                !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3082                !mmc_can_gpio_cd(host->mmc);
3083 }
3084
3085 /*
3086  * To enable wakeup events, the corresponding events have to be enabled in
3087  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3088  * Table' in the SD Host Controller Standard Specification.
3089  * It is useless to restore SDHCI_INT_ENABLE state in
3090  * sdhci_disable_irq_wakeups() since it will be set by
3091  * sdhci_enable_card_detection() or sdhci_init().
3092  */
3093 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3094 {
3095         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3096                   SDHCI_WAKE_ON_INT;
3097         u32 irq_val = 0;
3098         u8 wake_val = 0;
3099         u8 val;
3100
3101         if (sdhci_cd_irq_can_wakeup(host)) {
3102                 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3103                 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3104         }
3105
3106         if (mmc_card_wake_sdio_irq(host->mmc)) {
3107                 wake_val |= SDHCI_WAKE_ON_INT;
3108                 irq_val |= SDHCI_INT_CARD_INT;
3109         }
3110
3111         if (!irq_val)
3112                 return false;
3113
3114         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3115         val &= ~mask;
3116         val |= wake_val;
3117         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3118
3119         sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3120
3121         host->irq_wake_enabled = !enable_irq_wake(host->irq);
3122
3123         return host->irq_wake_enabled;
3124 }
3125
3126 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3127 {
3128         u8 val;
3129         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3130                         | SDHCI_WAKE_ON_INT;
3131
3132         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3133         val &= ~mask;
3134         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3135
3136         disable_irq_wake(host->irq);
3137
3138         host->irq_wake_enabled = false;
3139 }
3140
3141 int sdhci_suspend_host(struct sdhci_host *host)
3142 {
3143         sdhci_disable_card_detection(host);
3144
3145         mmc_retune_timer_stop(host->mmc);
3146
3147         if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3148             !sdhci_enable_irq_wakeups(host)) {
3149                 host->ier = 0;
3150                 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3151                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3152                 free_irq(host->irq, host);
3153         }
3154
3155         return 0;
3156 }
3157
3158 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3159
3160 int sdhci_resume_host(struct sdhci_host *host)
3161 {
3162         struct mmc_host *mmc = host->mmc;
3163         int ret = 0;
3164
3165         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3166                 if (host->ops->enable_dma)
3167                         host->ops->enable_dma(host);
3168         }
3169
3170         if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3171             (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3172                 /* Card keeps power but host controller does not */
3173                 sdhci_init(host, 0);
3174                 host->pwr = 0;
3175                 host->clock = 0;
3176                 host->reinit_uhs = true;
3177                 mmc->ops->set_ios(mmc, &mmc->ios);
3178         } else {
3179                 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3180                 mmiowb();
3181         }
3182
3183         if (host->irq_wake_enabled) {
3184                 sdhci_disable_irq_wakeups(host);
3185         } else {
3186                 ret = request_threaded_irq(host->irq, sdhci_irq,
3187                                            sdhci_thread_irq, IRQF_SHARED,
3188                                            mmc_hostname(host->mmc), host);
3189                 if (ret)
3190                         return ret;
3191         }
3192
3193         sdhci_enable_card_detection(host);
3194
3195         return ret;
3196 }
3197
3198 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3199
3200 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3201 {
3202         unsigned long flags;
3203
3204         mmc_retune_timer_stop(host->mmc);
3205
3206         spin_lock_irqsave(&host->lock, flags);
3207         host->ier &= SDHCI_INT_CARD_INT;
3208         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3209         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3210         spin_unlock_irqrestore(&host->lock, flags);
3211
3212         synchronize_hardirq(host->irq);
3213
3214         spin_lock_irqsave(&host->lock, flags);
3215         host->runtime_suspended = true;
3216         spin_unlock_irqrestore(&host->lock, flags);
3217
3218         return 0;
3219 }
3220 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3221
3222 int sdhci_runtime_resume_host(struct sdhci_host *host)
3223 {
3224         struct mmc_host *mmc = host->mmc;
3225         unsigned long flags;
3226         int host_flags = host->flags;
3227
3228         if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3229                 if (host->ops->enable_dma)
3230                         host->ops->enable_dma(host);
3231         }
3232
3233         sdhci_init(host, 0);
3234
3235         if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3236             mmc->ios.power_mode != MMC_POWER_OFF) {
3237                 /* Force clock and power re-program */
3238                 host->pwr = 0;
3239                 host->clock = 0;
3240                 host->reinit_uhs = true;
3241                 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3242                 mmc->ops->set_ios(mmc, &mmc->ios);
3243
3244                 if ((host_flags & SDHCI_PV_ENABLED) &&
3245                     !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3246                         spin_lock_irqsave(&host->lock, flags);
3247                         sdhci_enable_preset_value(host, true);
3248                         spin_unlock_irqrestore(&host->lock, flags);
3249                 }
3250
3251                 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3252                     mmc->ops->hs400_enhanced_strobe)
3253                         mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3254         }
3255
3256         spin_lock_irqsave(&host->lock, flags);
3257
3258         host->runtime_suspended = false;
3259
3260         /* Enable SDIO IRQ */
3261         if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3262                 sdhci_enable_sdio_irq_nolock(host, true);
3263
3264         /* Enable Card Detection */
3265         sdhci_enable_card_detection(host);
3266
3267         spin_unlock_irqrestore(&host->lock, flags);
3268
3269         return 0;
3270 }
3271 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3272
3273 #endif /* CONFIG_PM */
3274
3275 /*****************************************************************************\
3276  *                                                                           *
3277  * Command Queue Engine (CQE) helpers                                        *
3278  *                                                                           *
3279 \*****************************************************************************/
3280
3281 void sdhci_cqe_enable(struct mmc_host *mmc)
3282 {
3283         struct sdhci_host *host = mmc_priv(mmc);
3284         unsigned long flags;
3285         u8 ctrl;
3286
3287         spin_lock_irqsave(&host->lock, flags);
3288
3289         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3290         ctrl &= ~SDHCI_CTRL_DMA_MASK;
3291         if (host->flags & SDHCI_USE_64_BIT_DMA)
3292                 ctrl |= SDHCI_CTRL_ADMA64;
3293         else
3294                 ctrl |= SDHCI_CTRL_ADMA32;
3295         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3296
3297         sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3298                      SDHCI_BLOCK_SIZE);
3299
3300         /* Set maximum timeout */
3301         sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3302
3303         host->ier = host->cqe_ier;
3304
3305         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3306         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3307
3308         host->cqe_on = true;
3309
3310         pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3311                  mmc_hostname(mmc), host->ier,
3312                  sdhci_readl(host, SDHCI_INT_STATUS));
3313
3314         mmiowb();
3315         spin_unlock_irqrestore(&host->lock, flags);
3316 }
3317 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3318
3319 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3320 {
3321         struct sdhci_host *host = mmc_priv(mmc);
3322         unsigned long flags;
3323
3324         spin_lock_irqsave(&host->lock, flags);
3325
3326         sdhci_set_default_irqs(host);
3327
3328         host->cqe_on = false;
3329
3330         if (recovery) {
3331                 sdhci_do_reset(host, SDHCI_RESET_CMD);
3332                 sdhci_do_reset(host, SDHCI_RESET_DATA);
3333         }
3334
3335         pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3336                  mmc_hostname(mmc), host->ier,
3337                  sdhci_readl(host, SDHCI_INT_STATUS));
3338
3339         mmiowb();
3340         spin_unlock_irqrestore(&host->lock, flags);
3341 }
3342 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3343
3344 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3345                    int *data_error)
3346 {
3347         u32 mask;
3348
3349         if (!host->cqe_on)
3350                 return false;
3351
3352         if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3353                 *cmd_error = -EILSEQ;
3354         else if (intmask & SDHCI_INT_TIMEOUT)
3355                 *cmd_error = -ETIMEDOUT;
3356         else
3357                 *cmd_error = 0;
3358
3359         if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3360                 *data_error = -EILSEQ;
3361         else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3362                 *data_error = -ETIMEDOUT;
3363         else if (intmask & SDHCI_INT_ADMA_ERROR)
3364                 *data_error = -EIO;
3365         else
3366                 *data_error = 0;
3367
3368         /* Clear selected interrupts. */
3369         mask = intmask & host->cqe_ier;
3370         sdhci_writel(host, mask, SDHCI_INT_STATUS);
3371
3372         if (intmask & SDHCI_INT_BUS_POWER)
3373                 pr_err("%s: Card is consuming too much power!\n",
3374                        mmc_hostname(host->mmc));
3375
3376         intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3377         if (intmask) {
3378                 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3379                 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3380                        mmc_hostname(host->mmc), intmask);
3381                 sdhci_dumpregs(host);
3382         }
3383
3384         return true;
3385 }
3386 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3387
3388 /*****************************************************************************\
3389  *                                                                           *
3390  * Device allocation/registration                                            *
3391  *                                                                           *
3392 \*****************************************************************************/
3393
3394 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3395         size_t priv_size)
3396 {
3397         struct mmc_host *mmc;
3398         struct sdhci_host *host;
3399
3400         WARN_ON(dev == NULL);
3401
3402         mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3403         if (!mmc)
3404                 return ERR_PTR(-ENOMEM);
3405
3406         host = mmc_priv(mmc);
3407         host->mmc = mmc;
3408         host->mmc_host_ops = sdhci_ops;
3409         mmc->ops = &host->mmc_host_ops;
3410
3411         host->flags = SDHCI_SIGNALING_330;
3412
3413         host->cqe_ier     = SDHCI_CQE_INT_MASK;
3414         host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3415
3416         host->tuning_delay = -1;
3417
3418         host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3419
3420         return host;
3421 }
3422
3423 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3424
3425 static int sdhci_set_dma_mask(struct sdhci_host *host)
3426 {
3427         struct mmc_host *mmc = host->mmc;
3428         struct device *dev = mmc_dev(mmc);
3429         int ret = -EINVAL;
3430
3431         if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3432                 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3433
3434         /* Try 64-bit mask if hardware is capable  of it */
3435         if (host->flags & SDHCI_USE_64_BIT_DMA) {
3436                 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3437                 if (ret) {
3438                         pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3439                                 mmc_hostname(mmc));
3440                         host->flags &= ~SDHCI_USE_64_BIT_DMA;
3441                 }
3442         }
3443
3444         /* 32-bit mask as default & fallback */
3445         if (ret) {
3446                 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3447                 if (ret)
3448                         pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3449                                 mmc_hostname(mmc));
3450         }
3451
3452         return ret;
3453 }
3454
3455 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3456 {
3457         u16 v;
3458         u64 dt_caps_mask = 0;
3459         u64 dt_caps = 0;
3460
3461         if (host->read_caps)
3462                 return;
3463
3464         host->read_caps = true;
3465
3466         if (debug_quirks)
3467                 host->quirks = debug_quirks;
3468
3469         if (debug_quirks2)
3470                 host->quirks2 = debug_quirks2;
3471
3472         sdhci_do_reset(host, SDHCI_RESET_ALL);
3473
3474         of_property_read_u64(mmc_dev(host->mmc)->of_node,
3475                              "sdhci-caps-mask", &dt_caps_mask);
3476         of_property_read_u64(mmc_dev(host->mmc)->of_node,
3477                              "sdhci-caps", &dt_caps);
3478
3479         v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3480         host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3481
3482         if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3483                 return;
3484
3485         if (caps) {
3486                 host->caps = *caps;
3487         } else {
3488                 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3489                 host->caps &= ~lower_32_bits(dt_caps_mask);
3490                 host->caps |= lower_32_bits(dt_caps);
3491         }
3492
3493         if (host->version < SDHCI_SPEC_300)
3494                 return;
3495
3496         if (caps1) {
3497                 host->caps1 = *caps1;
3498         } else {
3499                 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3500                 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3501                 host->caps1 |= upper_32_bits(dt_caps);
3502         }
3503 }
3504 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3505
3506 static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3507 {
3508         struct mmc_host *mmc = host->mmc;
3509         unsigned int max_blocks;
3510         unsigned int bounce_size;
3511         int ret;
3512
3513         /*
3514          * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3515          * has diminishing returns, this is probably because SD/MMC
3516          * cards are usually optimized to handle this size of requests.
3517          */
3518         bounce_size = SZ_64K;
3519         /*
3520          * Adjust downwards to maximum request size if this is less
3521          * than our segment size, else hammer down the maximum
3522          * request size to the maximum buffer size.
3523          */
3524         if (mmc->max_req_size < bounce_size)
3525                 bounce_size = mmc->max_req_size;
3526         max_blocks = bounce_size / 512;
3527
3528         /*
3529          * When we just support one segment, we can get significant
3530          * speedups by the help of a bounce buffer to group scattered
3531          * reads/writes together.
3532          */
3533         host->bounce_buffer = devm_kmalloc(mmc->parent,
3534                                            bounce_size,
3535                                            GFP_KERNEL);
3536         if (!host->bounce_buffer) {
3537                 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3538                        mmc_hostname(mmc),
3539                        bounce_size);
3540                 /*
3541                  * Exiting with zero here makes sure we proceed with
3542                  * mmc->max_segs == 1.
3543                  */
3544                 return 0;
3545         }
3546
3547         host->bounce_addr = dma_map_single(mmc->parent,
3548                                            host->bounce_buffer,
3549                                            bounce_size,
3550                                            DMA_BIDIRECTIONAL);
3551         ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3552         if (ret)
3553                 /* Again fall back to max_segs == 1 */
3554                 return 0;
3555         host->bounce_buffer_size = bounce_size;
3556
3557         /* Lie about this since we're bouncing */
3558         mmc->max_segs = max_blocks;
3559         mmc->max_seg_size = bounce_size;
3560         mmc->max_req_size = bounce_size;
3561
3562         pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3563                 mmc_hostname(mmc), max_blocks, bounce_size);
3564
3565         return 0;
3566 }
3567
3568 int sdhci_setup_host(struct sdhci_host *host)
3569 {
3570         struct mmc_host *mmc;
3571         u32 max_current_caps;
3572         unsigned int ocr_avail;
3573         unsigned int override_timeout_clk;
3574         u32 max_clk;
3575         int ret;
3576
3577         WARN_ON(host == NULL);
3578         if (host == NULL)
3579                 return -EINVAL;
3580
3581         mmc = host->mmc;
3582
3583         /*
3584          * If there are external regulators, get them. Note this must be done
3585          * early before resetting the host and reading the capabilities so that
3586          * the host can take the appropriate action if regulators are not
3587          * available.
3588          */
3589         ret = mmc_regulator_get_supply(mmc);
3590         if (ret)
3591                 return ret;
3592
3593         DBG("Version:   0x%08x | Present:  0x%08x\n",
3594             sdhci_readw(host, SDHCI_HOST_VERSION),
3595             sdhci_readl(host, SDHCI_PRESENT_STATE));
3596         DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3597             sdhci_readl(host, SDHCI_CAPABILITIES),
3598             sdhci_readl(host, SDHCI_CAPABILITIES_1));
3599
3600         sdhci_read_caps(host);
3601
3602         override_timeout_clk = host->timeout_clk;
3603
3604         if (host->version > SDHCI_SPEC_300) {
3605                 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3606                        mmc_hostname(mmc), host->version);
3607         }
3608
3609         if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
3610                 mmc->caps2 &= ~MMC_CAP2_CQE;
3611
3612         if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3613                 host->flags |= SDHCI_USE_SDMA;
3614         else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3615                 DBG("Controller doesn't have SDMA capability\n");
3616         else
3617                 host->flags |= SDHCI_USE_SDMA;
3618
3619         if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3620                 (host->flags & SDHCI_USE_SDMA)) {
3621                 DBG("Disabling DMA as it is marked broken\n");
3622                 host->flags &= ~SDHCI_USE_SDMA;
3623         }
3624
3625         if ((host->version >= SDHCI_SPEC_200) &&
3626                 (host->caps & SDHCI_CAN_DO_ADMA2))
3627                 host->flags |= SDHCI_USE_ADMA;
3628
3629         if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3630                 (host->flags & SDHCI_USE_ADMA)) {
3631                 DBG("Disabling ADMA as it is marked broken\n");
3632                 host->flags &= ~SDHCI_USE_ADMA;
3633         }
3634
3635         /*
3636          * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3637          * and *must* do 64-bit DMA.  A driver has the opportunity to change
3638          * that during the first call to ->enable_dma().  Similarly
3639          * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3640          * implement.
3641          */
3642         if (host->caps & SDHCI_CAN_64BIT)
3643                 host->flags |= SDHCI_USE_64_BIT_DMA;
3644
3645         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3646                 ret = sdhci_set_dma_mask(host);
3647
3648                 if (!ret && host->ops->enable_dma)
3649                         ret = host->ops->enable_dma(host);
3650
3651                 if (ret) {
3652                         pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3653                                 mmc_hostname(mmc));
3654                         host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3655
3656                         ret = 0;
3657                 }
3658         }
3659
3660         /* SDMA does not support 64-bit DMA */
3661         if (host->flags & SDHCI_USE_64_BIT_DMA)
3662                 host->flags &= ~SDHCI_USE_SDMA;
3663
3664         if (host->flags & SDHCI_USE_ADMA) {
3665                 dma_addr_t dma;
3666                 void *buf;
3667
3668                 /*
3669                  * The DMA descriptor table size is calculated as the maximum
3670                  * number of segments times 2, to allow for an alignment
3671                  * descriptor for each segment, plus 1 for a nop end descriptor,
3672                  * all multipled by the descriptor size.
3673                  */
3674                 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3675                         host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3676                                               SDHCI_ADMA2_64_DESC_SZ;
3677                         host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3678                 } else {
3679                         host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3680                                               SDHCI_ADMA2_32_DESC_SZ;
3681                         host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3682                 }
3683
3684                 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3685                 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3686                                          host->adma_table_sz, &dma, GFP_KERNEL);
3687                 if (!buf) {
3688                         pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3689                                 mmc_hostname(mmc));
3690                         host->flags &= ~SDHCI_USE_ADMA;
3691                 } else if ((dma + host->align_buffer_sz) &
3692                            (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3693                         pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3694                                 mmc_hostname(mmc));
3695                         host->flags &= ~SDHCI_USE_ADMA;
3696                         dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3697                                           host->adma_table_sz, buf, dma);
3698                 } else {
3699                         host->align_buffer = buf;
3700                         host->align_addr = dma;
3701
3702                         host->adma_table = buf + host->align_buffer_sz;
3703                         host->adma_addr = dma + host->align_buffer_sz;
3704                 }
3705         }
3706
3707         /*
3708          * If we use DMA, then it's up to the caller to set the DMA
3709          * mask, but PIO does not need the hw shim so we set a new
3710          * mask here in that case.
3711          */
3712         if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3713                 host->dma_mask = DMA_BIT_MASK(64);
3714                 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3715         }
3716
3717         if (host->version >= SDHCI_SPEC_300)
3718                 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3719                         >> SDHCI_CLOCK_BASE_SHIFT;
3720         else
3721                 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3722                         >> SDHCI_CLOCK_BASE_SHIFT;
3723
3724         host->max_clk *= 1000000;
3725         if (host->max_clk == 0 || host->quirks &
3726                         SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3727                 if (!host->ops->get_max_clock) {
3728                         pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3729                                mmc_hostname(mmc));
3730                         ret = -ENODEV;
3731                         goto undma;
3732                 }
3733                 host->max_clk = host->ops->get_max_clock(host);
3734         }
3735
3736         /*
3737          * In case of Host Controller v3.00, find out whether clock
3738          * multiplier is supported.
3739          */
3740         host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3741                         SDHCI_CLOCK_MUL_SHIFT;
3742
3743         /*
3744          * In case the value in Clock Multiplier is 0, then programmable
3745          * clock mode is not supported, otherwise the actual clock
3746          * multiplier is one more than the value of Clock Multiplier
3747          * in the Capabilities Register.
3748          */
3749         if (host->clk_mul)
3750                 host->clk_mul += 1;
3751
3752         /*
3753          * Set host parameters.
3754          */
3755         max_clk = host->max_clk;
3756
3757         if (host->ops->get_min_clock)
3758                 mmc->f_min = host->ops->get_min_clock(host);
3759         else if (host->version >= SDHCI_SPEC_300) {
3760                 if (host->clk_mul)
3761                         max_clk = host->max_clk * host->clk_mul;
3762                 /*
3763                  * Divided Clock Mode minimum clock rate is always less than
3764                  * Programmable Clock Mode minimum clock rate.
3765                  */
3766                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3767         } else
3768                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3769
3770         if (!mmc->f_max || mmc->f_max > max_clk)
3771                 mmc->f_max = max_clk;
3772
3773         if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3774                 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3775                                         SDHCI_TIMEOUT_CLK_SHIFT;
3776
3777                 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3778                         host->timeout_clk *= 1000;
3779
3780                 if (host->timeout_clk == 0) {
3781                         if (!host->ops->get_timeout_clock) {
3782                                 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3783                                         mmc_hostname(mmc));
3784                                 ret = -ENODEV;
3785                                 goto undma;
3786                         }
3787
3788                         host->timeout_clk =
3789                                 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3790                                              1000);
3791                 }
3792
3793                 if (override_timeout_clk)
3794                         host->timeout_clk = override_timeout_clk;
3795
3796                 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3797                         host->ops->get_max_timeout_count(host) : 1 << 27;
3798                 mmc->max_busy_timeout /= host->timeout_clk;
3799         }
3800
3801         if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3802             !host->ops->get_max_timeout_count)
3803                 mmc->max_busy_timeout = 0;
3804
3805         mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3806         mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3807
3808         if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3809                 host->flags |= SDHCI_AUTO_CMD12;
3810
3811         /* Auto-CMD23 stuff only works in ADMA or PIO. */
3812         if ((host->version >= SDHCI_SPEC_300) &&
3813             ((host->flags & SDHCI_USE_ADMA) ||
3814              !(host->flags & SDHCI_USE_SDMA)) &&
3815              !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3816                 host->flags |= SDHCI_AUTO_CMD23;
3817                 DBG("Auto-CMD23 available\n");
3818         } else {
3819                 DBG("Auto-CMD23 unavailable\n");
3820         }
3821
3822         /*
3823          * A controller may support 8-bit width, but the board itself
3824          * might not have the pins brought out.  Boards that support
3825          * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3826          * their platform code before calling sdhci_add_host(), and we
3827          * won't assume 8-bit width for hosts without that CAP.
3828          */
3829         if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3830                 mmc->caps |= MMC_CAP_4_BIT_DATA;
3831
3832         if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3833                 mmc->caps &= ~MMC_CAP_CMD23;
3834
3835         if (host->caps & SDHCI_CAN_DO_HISPD)
3836                 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3837
3838         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3839             mmc_card_is_removable(mmc) &&
3840             mmc_gpio_get_cd(host->mmc) < 0)
3841                 mmc->caps |= MMC_CAP_NEEDS_POLL;
3842
3843         if (!IS_ERR(mmc->supply.vqmmc)) {
3844                 ret = regulator_enable(mmc->supply.vqmmc);
3845
3846                 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
3847                 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3848                                                     1950000))
3849                         host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3850                                          SDHCI_SUPPORT_SDR50 |
3851                                          SDHCI_SUPPORT_DDR50);
3852
3853                 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
3854                 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
3855                                                     3600000))
3856                         host->flags &= ~SDHCI_SIGNALING_330;
3857
3858                 if (ret) {
3859                         pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3860                                 mmc_hostname(mmc), ret);
3861                         mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3862                 }
3863         }
3864
3865         if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3866                 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3867                                  SDHCI_SUPPORT_DDR50);
3868                 /*
3869                  * The SDHCI controller in a SoC might support HS200/HS400
3870                  * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
3871                  * but if the board is modeled such that the IO lines are not
3872                  * connected to 1.8v then HS200/HS400 cannot be supported.
3873                  * Disable HS200/HS400 if the board does not have 1.8v connected
3874                  * to the IO lines. (Applicable for other modes in 1.8v)
3875                  */
3876                 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
3877                 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
3878         }
3879
3880         /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3881         if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3882                            SDHCI_SUPPORT_DDR50))
3883                 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3884
3885         /* SDR104 supports also implies SDR50 support */
3886         if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3887                 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3888                 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3889                  * field can be promoted to support HS200.
3890                  */
3891                 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3892                         mmc->caps2 |= MMC_CAP2_HS200;
3893         } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3894                 mmc->caps |= MMC_CAP_UHS_SDR50;
3895         }
3896
3897         if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3898             (host->caps1 & SDHCI_SUPPORT_HS400))
3899                 mmc->caps2 |= MMC_CAP2_HS400;
3900
3901         if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3902             (IS_ERR(mmc->supply.vqmmc) ||
3903              !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3904                                              1300000)))
3905                 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3906
3907         if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3908             !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3909                 mmc->caps |= MMC_CAP_UHS_DDR50;
3910
3911         /* Does the host need tuning for SDR50? */
3912         if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3913                 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3914
3915         /* Driver Type(s) (A, C, D) supported by the host */
3916         if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3917                 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3918         if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3919                 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3920         if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3921                 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3922
3923         /* Initial value for re-tuning timer count */
3924         host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3925                              SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3926
3927         /*
3928          * In case Re-tuning Timer is not disabled, the actual value of
3929          * re-tuning timer will be 2 ^ (n - 1).
3930          */
3931         if (host->tuning_count)
3932                 host->tuning_count = 1 << (host->tuning_count - 1);
3933
3934         /* Re-tuning mode supported by the Host Controller */
3935         host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3936                              SDHCI_RETUNING_MODE_SHIFT;
3937
3938         ocr_avail = 0;
3939
3940         /*
3941          * According to SD Host Controller spec v3.00, if the Host System
3942          * can afford more than 150mA, Host Driver should set XPC to 1. Also
3943          * the value is meaningful only if Voltage Support in the Capabilities
3944          * register is set. The actual current value is 4 times the register
3945          * value.
3946          */
3947         max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3948         if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3949                 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3950                 if (curr > 0) {
3951
3952                         /* convert to SDHCI_MAX_CURRENT format */
3953                         curr = curr/1000;  /* convert to mA */
3954                         curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3955
3956                         curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3957                         max_current_caps =
3958                                 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3959                                 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3960                                 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3961                 }
3962         }
3963
3964         if (host->caps & SDHCI_CAN_VDD_330) {
3965                 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3966
3967                 mmc->max_current_330 = ((max_current_caps &
3968                                    SDHCI_MAX_CURRENT_330_MASK) >>
3969                                    SDHCI_MAX_CURRENT_330_SHIFT) *
3970                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3971         }
3972         if (host->caps & SDHCI_CAN_VDD_300) {
3973                 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3974
3975                 mmc->max_current_300 = ((max_current_caps &
3976                                    SDHCI_MAX_CURRENT_300_MASK) >>
3977                                    SDHCI_MAX_CURRENT_300_SHIFT) *
3978                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3979         }
3980         if (host->caps & SDHCI_CAN_VDD_180) {
3981                 ocr_avail |= MMC_VDD_165_195;
3982
3983                 mmc->max_current_180 = ((max_current_caps &
3984                                    SDHCI_MAX_CURRENT_180_MASK) >>
3985                                    SDHCI_MAX_CURRENT_180_SHIFT) *
3986                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3987         }
3988
3989         /* If OCR set by host, use it instead. */
3990         if (host->ocr_mask)
3991                 ocr_avail = host->ocr_mask;
3992
3993         /* If OCR set by external regulators, give it highest prio. */
3994         if (mmc->ocr_avail)
3995                 ocr_avail = mmc->ocr_avail;
3996
3997         mmc->ocr_avail = ocr_avail;
3998         mmc->ocr_avail_sdio = ocr_avail;
3999         if (host->ocr_avail_sdio)
4000                 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4001         mmc->ocr_avail_sd = ocr_avail;
4002         if (host->ocr_avail_sd)
4003                 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4004         else /* normal SD controllers don't support 1.8V */
4005                 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4006         mmc->ocr_avail_mmc = ocr_avail;
4007         if (host->ocr_avail_mmc)
4008                 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4009
4010         if (mmc->ocr_avail == 0) {
4011                 pr_err("%s: Hardware doesn't report any support voltages.\n",
4012                        mmc_hostname(mmc));
4013                 ret = -ENODEV;
4014                 goto unreg;
4015         }
4016
4017         if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4018                           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4019                           MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4020             (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4021                 host->flags |= SDHCI_SIGNALING_180;
4022
4023         if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4024                 host->flags |= SDHCI_SIGNALING_120;
4025
4026         spin_lock_init(&host->lock);
4027
4028         /*
4029          * Maximum number of sectors in one transfer. Limited by SDMA boundary
4030          * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4031          * is less anyway.
4032          */
4033         mmc->max_req_size = 524288;
4034
4035         /*
4036          * Maximum number of segments. Depends on if the hardware
4037          * can do scatter/gather or not.
4038          */
4039         if (host->flags & SDHCI_USE_ADMA) {
4040                 mmc->max_segs = SDHCI_MAX_SEGS;
4041         } else if (host->flags & SDHCI_USE_SDMA) {
4042                 mmc->max_segs = 1;
4043                 if (swiotlb_max_segment()) {
4044                         unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4045                                                 IO_TLB_SEGSIZE;
4046                         mmc->max_req_size = min(mmc->max_req_size,
4047                                                 max_req_size);
4048                 }
4049         } else { /* PIO */
4050                 mmc->max_segs = SDHCI_MAX_SEGS;
4051         }
4052
4053         /*
4054          * Maximum segment size. Could be one segment with the maximum number
4055          * of bytes. When doing hardware scatter/gather, each entry cannot
4056          * be larger than 64 KiB though.
4057          */
4058         if (host->flags & SDHCI_USE_ADMA) {
4059                 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4060                         mmc->max_seg_size = 65535;
4061                 else
4062                         mmc->max_seg_size = 65536;
4063         } else {
4064                 mmc->max_seg_size = mmc->max_req_size;
4065         }
4066
4067         /*
4068          * Maximum block size. This varies from controller to controller and
4069          * is specified in the capabilities register.
4070          */
4071         if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4072                 mmc->max_blk_size = 2;
4073         } else {
4074                 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4075                                 SDHCI_MAX_BLOCK_SHIFT;
4076                 if (mmc->max_blk_size >= 3) {
4077                         pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4078                                 mmc_hostname(mmc));
4079                         mmc->max_blk_size = 0;
4080                 }
4081         }
4082
4083         mmc->max_blk_size = 512 << mmc->max_blk_size;
4084
4085         /*
4086          * Maximum block count.
4087          */
4088         mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4089
4090         if (mmc->max_segs == 1) {
4091                 /* This may alter mmc->*_blk_* parameters */
4092                 ret = sdhci_allocate_bounce_buffer(host);
4093                 if (ret)
4094                         return ret;
4095         }
4096
4097         return 0;
4098
4099 unreg:
4100         if (!IS_ERR(mmc->supply.vqmmc))
4101                 regulator_disable(mmc->supply.vqmmc);
4102 undma:
4103         if (host->align_buffer)
4104                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4105                                   host->adma_table_sz, host->align_buffer,
4106                                   host->align_addr);
4107         host->adma_table = NULL;
4108         host->align_buffer = NULL;
4109
4110         return ret;
4111 }
4112 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4113
4114 void sdhci_cleanup_host(struct sdhci_host *host)
4115 {
4116         struct mmc_host *mmc = host->mmc;
4117
4118         if (!IS_ERR(mmc->supply.vqmmc))
4119                 regulator_disable(mmc->supply.vqmmc);
4120
4121         if (host->align_buffer)
4122                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4123                                   host->adma_table_sz, host->align_buffer,
4124                                   host->align_addr);
4125         host->adma_table = NULL;
4126         host->align_buffer = NULL;
4127 }
4128 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4129
4130 int __sdhci_add_host(struct sdhci_host *host)
4131 {
4132         struct mmc_host *mmc = host->mmc;
4133         int ret;
4134
4135         /*
4136          * Init tasklets.
4137          */
4138         tasklet_init(&host->finish_tasklet,
4139                 sdhci_tasklet_finish, (unsigned long)host);
4140
4141         timer_setup(&host->timer, sdhci_timeout_timer, 0);
4142         timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4143
4144         init_waitqueue_head(&host->buf_ready_int);
4145
4146         sdhci_init(host, 0);
4147
4148         ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4149                                    IRQF_SHARED, mmc_hostname(mmc), host);
4150         if (ret) {
4151                 pr_err("%s: Failed to request IRQ %d: %d\n",
4152                        mmc_hostname(mmc), host->irq, ret);
4153                 goto untasklet;
4154         }
4155
4156         ret = sdhci_led_register(host);
4157         if (ret) {
4158                 pr_err("%s: Failed to register LED device: %d\n",
4159                        mmc_hostname(mmc), ret);
4160                 goto unirq;
4161         }
4162
4163         mmiowb();
4164
4165         ret = mmc_add_host(mmc);
4166         if (ret)
4167                 goto unled;
4168
4169         pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4170                 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4171                 (host->flags & SDHCI_USE_ADMA) ?
4172                 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4173                 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4174
4175         sdhci_enable_card_detection(host);
4176
4177         return 0;
4178
4179 unled:
4180         sdhci_led_unregister(host);
4181 unirq:
4182         sdhci_do_reset(host, SDHCI_RESET_ALL);
4183         sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4184         sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4185         free_irq(host->irq, host);
4186 untasklet:
4187         tasklet_kill(&host->finish_tasklet);
4188
4189         return ret;
4190 }
4191 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4192
4193 int sdhci_add_host(struct sdhci_host *host)
4194 {
4195         int ret;
4196
4197         ret = sdhci_setup_host(host);
4198         if (ret)
4199                 return ret;
4200
4201         ret = __sdhci_add_host(host);
4202         if (ret)
4203                 goto cleanup;
4204
4205         return 0;
4206
4207 cleanup:
4208         sdhci_cleanup_host(host);
4209
4210         return ret;
4211 }
4212 EXPORT_SYMBOL_GPL(sdhci_add_host);
4213
4214 void sdhci_remove_host(struct sdhci_host *host, int dead)
4215 {
4216         struct mmc_host *mmc = host->mmc;
4217         unsigned long flags;
4218
4219         if (dead) {
4220                 spin_lock_irqsave(&host->lock, flags);
4221
4222                 host->flags |= SDHCI_DEVICE_DEAD;
4223
4224                 if (sdhci_has_requests(host)) {
4225                         pr_err("%s: Controller removed during "
4226                                 " transfer!\n", mmc_hostname(mmc));
4227                         sdhci_error_out_mrqs(host, -ENOMEDIUM);
4228                 }
4229
4230                 spin_unlock_irqrestore(&host->lock, flags);
4231         }
4232
4233         sdhci_disable_card_detection(host);
4234
4235         mmc_remove_host(mmc);
4236
4237         sdhci_led_unregister(host);
4238
4239         if (!dead)
4240                 sdhci_do_reset(host, SDHCI_RESET_ALL);
4241
4242         sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4243         sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4244         free_irq(host->irq, host);
4245
4246         del_timer_sync(&host->timer);
4247         del_timer_sync(&host->data_timer);
4248
4249         tasklet_kill(&host->finish_tasklet);
4250
4251         if (!IS_ERR(mmc->supply.vqmmc))
4252                 regulator_disable(mmc->supply.vqmmc);
4253
4254         if (host->align_buffer)
4255                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4256                                   host->adma_table_sz, host->align_buffer,
4257                                   host->align_addr);
4258
4259         host->adma_table = NULL;
4260         host->align_buffer = NULL;
4261 }
4262
4263 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4264
4265 void sdhci_free_host(struct sdhci_host *host)
4266 {
4267         mmc_free_host(host->mmc);
4268 }
4269
4270 EXPORT_SYMBOL_GPL(sdhci_free_host);
4271
4272 /*****************************************************************************\
4273  *                                                                           *
4274  * Driver init/exit                                                          *
4275  *                                                                           *
4276 \*****************************************************************************/
4277
4278 static int __init sdhci_drv_init(void)
4279 {
4280         pr_info(DRIVER_NAME
4281                 ": Secure Digital Host Controller Interface driver\n");
4282         pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4283
4284         return 0;
4285 }
4286
4287 static void __exit sdhci_drv_exit(void)
4288 {
4289 }
4290
4291 module_init(sdhci_drv_init);
4292 module_exit(sdhci_drv_exit);
4293
4294 module_param(debug_quirks, uint, 0444);
4295 module_param(debug_quirks2, uint, 0444);
4296
4297 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4298 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4299 MODULE_LICENSE("GPL");
4300
4301 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4302 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");