2 * linux/drivers/mmc/host/tmio_mmc_pio.c
4 * Copyright (C) 2016 Sang Engineering, Wolfram Sang
5 * Copyright (C) 2015-16 Renesas Electronics Corporation
6 * Copyright (C) 2011 Guennadi Liakhovetski
7 * Copyright (C) 2007 Ian Molton
8 * Copyright (C) 2004 Ian Molton
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * Driver for the MMC / SD / SDIO IP found in:
16 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
18 * This driver draws mainly on scattered spec sheets, Reverse engineering
19 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
20 * support). (Further 4 bit support from a later datasheet).
23 * Investigate using a workqueue for PIO transfers
26 * Better Power management
27 * Handle MMC errors better
28 * double buffer support
32 #include <linux/delay.h>
33 #include <linux/device.h>
34 #include <linux/highmem.h>
35 #include <linux/interrupt.h>
37 #include <linux/irq.h>
38 #include <linux/mfd/tmio.h>
39 #include <linux/mmc/host.h>
40 #include <linux/mmc/mmc.h>
41 #include <linux/mmc/slot-gpio.h>
42 #include <linux/module.h>
43 #include <linux/pagemap.h>
44 #include <linux/platform_device.h>
45 #include <linux/pm_qos.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/regulator/consumer.h>
48 #include <linux/mmc/sdio.h>
49 #include <linux/scatterlist.h>
50 #include <linux/spinlock.h>
51 #include <linux/workqueue.h>
55 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
57 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
58 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
61 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
63 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
64 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
67 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
69 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
72 static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
74 host->sg_len = data->sg_len;
75 host->sg_ptr = data->sg;
76 host->sg_orig = data->sg;
80 static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
82 host->sg_ptr = sg_next(host->sg_ptr);
84 return --host->sg_len;
87 #define CMDREQ_TIMEOUT 5000
89 #ifdef CONFIG_MMC_DEBUG
91 #define STATUS_TO_TEXT(a, status, i) \
93 if (status & TMIO_STAT_##a) { \
100 static void pr_debug_status(u32 status)
103 pr_debug("status: %08x = ", status);
104 STATUS_TO_TEXT(CARD_REMOVE, status, i);
105 STATUS_TO_TEXT(CARD_INSERT, status, i);
106 STATUS_TO_TEXT(SIGSTATE, status, i);
107 STATUS_TO_TEXT(WRPROTECT, status, i);
108 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
109 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
110 STATUS_TO_TEXT(SIGSTATE_A, status, i);
111 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
112 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
113 STATUS_TO_TEXT(ILL_FUNC, status, i);
114 STATUS_TO_TEXT(CMD_BUSY, status, i);
115 STATUS_TO_TEXT(CMDRESPEND, status, i);
116 STATUS_TO_TEXT(DATAEND, status, i);
117 STATUS_TO_TEXT(CRCFAIL, status, i);
118 STATUS_TO_TEXT(DATATIMEOUT, status, i);
119 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
120 STATUS_TO_TEXT(RXOVERFLOW, status, i);
121 STATUS_TO_TEXT(TXUNDERRUN, status, i);
122 STATUS_TO_TEXT(RXRDY, status, i);
123 STATUS_TO_TEXT(TXRQ, status, i);
124 STATUS_TO_TEXT(ILL_ACCESS, status, i);
129 #define pr_debug_status(s) do { } while (0)
132 static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
134 struct tmio_mmc_host *host = mmc_priv(mmc);
136 if (enable && !host->sdio_irq_enabled) {
137 /* Keep device active while SDIO irq is enabled */
138 pm_runtime_get_sync(mmc_dev(mmc));
139 host->sdio_irq_enabled = true;
141 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
142 ~TMIO_SDIO_STAT_IOIRQ;
143 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
144 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
145 } else if (!enable && host->sdio_irq_enabled) {
146 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
147 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
148 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
150 host->sdio_irq_enabled = false;
151 pm_runtime_mark_last_busy(mmc_dev(mmc));
152 pm_runtime_put_autosuspend(mmc_dev(mmc));
156 static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
158 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
159 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
160 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
162 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
163 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
168 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
170 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
171 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
175 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
176 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
177 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
180 static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
181 unsigned int new_clock)
185 if (new_clock == 0) {
186 tmio_mmc_clk_stop(host);
190 if (host->clk_update)
191 clock = host->clk_update(host, new_clock) / 512;
193 clock = host->mmc->f_min;
195 for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
198 /* 1/1 clock is option */
199 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
202 if (host->set_clk_div)
203 host->set_clk_div(host->pdev, (clk >> 22) & 1);
205 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
206 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
207 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
208 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
211 tmio_mmc_clk_start(host);
214 static void tmio_mmc_reset(struct tmio_mmc_host *host)
216 /* FIXME - should we set stop clock reg here */
217 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
218 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
219 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
221 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
222 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
223 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
227 static void tmio_mmc_reset_work(struct work_struct *work)
229 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
230 delayed_reset_work.work);
231 struct mmc_request *mrq;
234 spin_lock_irqsave(&host->lock, flags);
238 * is request already finished? Since we use a non-blocking
239 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
240 * us, so, have to check for IS_ERR(host->mrq)
242 if (IS_ERR_OR_NULL(mrq)
243 || time_is_after_jiffies(host->last_req_ts +
244 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
245 spin_unlock_irqrestore(&host->lock, flags);
249 dev_warn(&host->pdev->dev,
250 "timeout waiting for hardware interrupt (CMD%u)\n",
254 host->data->error = -ETIMEDOUT;
256 host->cmd->error = -ETIMEDOUT;
258 mrq->cmd->error = -ETIMEDOUT;
262 host->force_pio = false;
264 spin_unlock_irqrestore(&host->lock, flags);
266 tmio_mmc_reset(host);
268 /* Ready for new calls */
271 tmio_mmc_abort_dma(host);
272 mmc_request_done(host->mmc, mrq);
275 /* called with host->lock held, interrupts disabled */
276 static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
278 struct mmc_request *mrq;
281 spin_lock_irqsave(&host->lock, flags);
284 if (IS_ERR_OR_NULL(mrq)) {
285 spin_unlock_irqrestore(&host->lock, flags);
291 host->force_pio = false;
293 cancel_delayed_work(&host->delayed_reset_work);
296 spin_unlock_irqrestore(&host->lock, flags);
298 if (mrq->cmd->error || (mrq->data && mrq->data->error))
299 tmio_mmc_abort_dma(host);
301 mmc_request_done(host->mmc, mrq);
304 static void tmio_mmc_done_work(struct work_struct *work)
306 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
308 tmio_mmc_finish_request(host);
311 /* These are the bitmasks the tmio chip requires to implement the MMC response
312 * types. Note that R1 and R6 are the same in this scheme. */
313 #define APP_CMD 0x0040
314 #define RESP_NONE 0x0300
315 #define RESP_R1 0x0400
316 #define RESP_R1B 0x0500
317 #define RESP_R2 0x0600
318 #define RESP_R3 0x0700
319 #define DATA_PRESENT 0x0800
320 #define TRANSFER_READ 0x1000
321 #define TRANSFER_MULTI 0x2000
322 #define SECURITY_CMD 0x4000
323 #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
325 static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
327 struct mmc_data *data = host->data;
329 u32 irq_mask = TMIO_MASK_CMD;
331 /* CMD12 is handled by hardware */
332 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
333 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
337 switch (mmc_resp_type(cmd)) {
338 case MMC_RSP_NONE: c |= RESP_NONE; break;
340 case MMC_RSP_R1_NO_CRC:
342 case MMC_RSP_R1B: c |= RESP_R1B; break;
343 case MMC_RSP_R2: c |= RESP_R2; break;
344 case MMC_RSP_R3: c |= RESP_R3; break;
346 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
352 /* FIXME - this seems to be ok commented out but the spec suggest this bit
353 * should be set when issuing app commands.
354 * if(cmd->flags & MMC_FLAG_ACMD)
359 if (data->blocks > 1) {
360 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
364 * Disable auto CMD12 at IO_RW_EXTENDED when
365 * multiple block transfer
367 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
368 (cmd->opcode == SD_IO_RW_EXTENDED))
371 if (data->flags & MMC_DATA_READ)
375 if (!host->native_hotplug)
376 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
377 tmio_mmc_enable_mmc_irqs(host, irq_mask);
379 /* Fire off the command */
380 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
381 sd_ctrl_write16(host, CTL_SD_CMD, c);
386 static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
390 int is_read = host->data->flags & MMC_DATA_READ;
397 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
399 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
401 /* if count was even number */
405 /* if count was odd number */
406 buf8 = (u8 *)(buf + (count >> 1));
411 * driver and this function are assuming that
412 * it is used as little endian
415 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
417 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
421 * This chip always returns (at least?) as much data as you ask for.
422 * I'm unsure what happens if you ask for less than a block. This should be
423 * looked into to ensure that a funny length read doesn't hose the controller.
425 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
427 struct mmc_data *data = host->data;
433 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
434 pr_err("PIO IRQ in DMA mode!\n");
437 pr_debug("Spurious PIO IRQ\n");
441 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
442 buf = (unsigned short *)(sg_virt + host->sg_off);
444 count = host->sg_ptr->length - host->sg_off;
445 if (count > data->blksz)
448 pr_debug("count: %08x offset: %08x flags %08x\n",
449 count, host->sg_off, data->flags);
451 /* Transfer the data */
452 tmio_mmc_transfer_data(host, buf, count);
454 host->sg_off += count;
456 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
458 if (host->sg_off == host->sg_ptr->length)
459 tmio_mmc_next_sg(host);
464 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
466 if (host->sg_ptr == &host->bounce_sg) {
468 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
469 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
470 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
474 /* needs to be called with host->lock held */
475 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
477 struct mmc_data *data = host->data;
478 struct mmc_command *stop;
483 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
488 /* FIXME - return correct transfer count on errors */
490 data->bytes_xfered = data->blocks * data->blksz;
492 data->bytes_xfered = 0;
494 pr_debug("Completed data request\n");
497 * FIXME: other drivers allow an optional stop command of any given type
498 * which we dont do, as the chip can auto generate them.
499 * Perhaps we can be smarter about when to use auto CMD12 and
500 * only issue the auto request when we know this is the desired
501 * stop command, allowing fallback to the stop command the
502 * upper layers expect. For now, we do what works.
505 if (data->flags & MMC_DATA_READ) {
506 if (host->chan_rx && !host->force_pio)
507 tmio_mmc_check_bounce_buffer(host);
508 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
511 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
516 if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg)
517 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
522 schedule_work(&host->done);
525 static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
527 struct mmc_data *data;
528 spin_lock(&host->lock);
534 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
535 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
539 * Has all data been written out yet? Testing on SuperH showed,
540 * that in most cases the first interrupt comes already with the
541 * BUSY status bit clear, but on some operations, like mount or
542 * in the beginning of a write / sync / umount, there is one
543 * DATAEND interrupt with the BUSY bit set, in this cases
544 * waiting for one more interrupt fixes the problem.
546 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
547 if (status & TMIO_STAT_SCLKDIVEN)
550 if (!(status & TMIO_STAT_CMD_BUSY))
555 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
556 tasklet_schedule(&host->dma_complete);
558 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
559 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
560 tasklet_schedule(&host->dma_complete);
562 tmio_mmc_do_data_irq(host);
563 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
566 spin_unlock(&host->lock);
569 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
572 struct mmc_command *cmd = host->cmd;
575 spin_lock(&host->lock);
578 pr_debug("Spurious CMD irq\n");
584 /* This controller is sicker than the PXA one. Not only do we need to
585 * drop the top 8 bits of the first response word, we also need to
586 * modify the order of the response for short response command types.
589 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
590 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
592 if (cmd->flags & MMC_RSP_136) {
593 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
594 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
595 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
597 } else if (cmd->flags & MMC_RSP_R3) {
598 cmd->resp[0] = cmd->resp[3];
601 if (stat & TMIO_STAT_CMDTIMEOUT)
602 cmd->error = -ETIMEDOUT;
603 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
604 cmd->error = -EILSEQ;
606 /* If there is data to handle we enable data IRQs here, and
607 * we will ultimatley finish the request in the data_end handler.
608 * If theres no data or we encountered an error, finish now.
610 if (host->data && !cmd->error) {
611 if (host->data->flags & MMC_DATA_READ) {
612 if (host->force_pio || !host->chan_rx)
613 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
615 tasklet_schedule(&host->dma_issue);
617 if (host->force_pio || !host->chan_tx)
618 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
620 tasklet_schedule(&host->dma_issue);
623 schedule_work(&host->done);
627 spin_unlock(&host->lock);
630 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
631 int ireg, int status)
633 struct mmc_host *mmc = host->mmc;
635 /* Card insert / remove attempts */
636 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
637 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
638 TMIO_STAT_CARD_REMOVE);
639 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
640 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
641 !work_pending(&mmc->detect.work))
642 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
649 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
650 int ireg, int status)
652 /* Command completion */
653 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
654 tmio_mmc_ack_mmc_irqs(host,
655 TMIO_STAT_CMDRESPEND |
656 TMIO_STAT_CMDTIMEOUT);
657 tmio_mmc_cmd_irq(host, status);
662 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
663 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
664 tmio_mmc_pio_irq(host);
668 /* Data transfer completion */
669 if (ireg & TMIO_STAT_DATAEND) {
670 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
671 tmio_mmc_data_irq(host);
678 static bool tmio_mmc_sdio_irq(int irq, void *devid)
680 struct tmio_mmc_host *host = devid;
681 struct mmc_host *mmc = host->mmc;
682 struct tmio_mmc_data *pdata = host->pdata;
683 unsigned int ireg, status;
684 unsigned int sdio_status;
686 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
689 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
690 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
692 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
693 if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
696 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
698 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
699 mmc_signal_sdio_irq(mmc);
704 irqreturn_t tmio_mmc_irq(int irq, void *devid)
706 struct tmio_mmc_host *host = devid;
707 unsigned int ireg, status;
709 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
710 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
712 pr_debug_status(status);
713 pr_debug_status(ireg);
715 /* Clear the status except the interrupt status */
716 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
718 if (__tmio_mmc_card_detect_irq(host, ireg, status))
720 if (__tmio_mmc_sdcard_irq(host, ireg, status))
723 if (tmio_mmc_sdio_irq(irq, devid))
728 EXPORT_SYMBOL(tmio_mmc_irq);
730 static int tmio_mmc_start_data(struct tmio_mmc_host *host,
731 struct mmc_data *data)
733 struct tmio_mmc_data *pdata = host->pdata;
735 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
736 data->blksz, data->blocks);
738 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
739 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
740 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
741 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
743 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
744 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
745 mmc_hostname(host->mmc), data->blksz);
750 tmio_mmc_init_sg(host, data);
753 /* Set transfer length / blocksize */
754 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
755 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
757 tmio_mmc_start_dma(host, data);
762 /* Process requests from the MMC layer */
763 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
765 struct tmio_mmc_host *host = mmc_priv(mmc);
769 spin_lock_irqsave(&host->lock, flags);
772 pr_debug("request not null\n");
773 if (IS_ERR(host->mrq)) {
774 spin_unlock_irqrestore(&host->lock, flags);
775 mrq->cmd->error = -EAGAIN;
776 mmc_request_done(mmc, mrq);
781 host->last_req_ts = jiffies;
785 spin_unlock_irqrestore(&host->lock, flags);
788 ret = tmio_mmc_start_data(host, mrq->data);
793 ret = tmio_mmc_start_command(host, mrq->cmd);
795 schedule_delayed_work(&host->delayed_reset_work,
796 msecs_to_jiffies(CMDREQ_TIMEOUT));
801 host->force_pio = false;
803 mrq->cmd->error = ret;
804 mmc_request_done(mmc, mrq);
807 static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
809 if (!host->clk_enable)
812 return host->clk_enable(host);
815 static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
817 struct mmc_host *mmc = host->mmc;
820 /* .set_ios() is returning void, so, no chance to report an error */
823 host->set_pwr(host->pdev, 1);
825 if (!IS_ERR(mmc->supply.vmmc)) {
826 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
828 * Attention: empiric value. With a b43 WiFi SDIO card this
829 * delay proved necessary for reliable card-insertion probing.
830 * 100us were not enough. Is this the same 140us delay, as in
831 * tmio_mmc_set_ios()?
836 * It seems, VccQ should be switched on after Vcc, this is also what the
837 * omap_hsmmc.c driver does.
839 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
840 ret = regulator_enable(mmc->supply.vqmmc);
845 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
849 static void tmio_mmc_power_off(struct tmio_mmc_host *host)
851 struct mmc_host *mmc = host->mmc;
853 if (!IS_ERR(mmc->supply.vqmmc))
854 regulator_disable(mmc->supply.vqmmc);
856 if (!IS_ERR(mmc->supply.vmmc))
857 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
860 host->set_pwr(host->pdev, 0);
863 static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
864 unsigned char bus_width)
866 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
867 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
869 /* reg now applies to MMC_BUS_WIDTH_4 */
870 if (bus_width == MMC_BUS_WIDTH_1)
871 reg |= CARD_OPT_WIDTH;
872 else if (bus_width == MMC_BUS_WIDTH_8)
873 reg |= CARD_OPT_WIDTH8;
875 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
878 /* Set MMC clock / power.
879 * Note: This controller uses a simple divider scheme therefore it cannot
880 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
881 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
884 static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
886 struct tmio_mmc_host *host = mmc_priv(mmc);
887 struct device *dev = &host->pdev->dev;
890 mutex_lock(&host->ios_lock);
892 spin_lock_irqsave(&host->lock, flags);
894 if (IS_ERR(host->mrq)) {
896 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
897 current->comm, task_pid_nr(current),
898 ios->clock, ios->power_mode);
899 host->mrq = ERR_PTR(-EINTR);
902 "%s.%d: CMD%u active since %lu, now %lu!\n",
903 current->comm, task_pid_nr(current),
904 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
906 spin_unlock_irqrestore(&host->lock, flags);
908 mutex_unlock(&host->ios_lock);
912 host->mrq = ERR_PTR(-EBUSY);
914 spin_unlock_irqrestore(&host->lock, flags);
916 switch (ios->power_mode) {
918 tmio_mmc_power_off(host);
919 tmio_mmc_clk_stop(host);
922 tmio_mmc_power_on(host, ios->vdd);
923 tmio_mmc_set_clock(host, ios->clock);
924 tmio_mmc_set_bus_width(host, ios->bus_width);
927 tmio_mmc_set_clock(host, ios->clock);
928 tmio_mmc_set_bus_width(host, ios->bus_width);
932 /* Let things settle. delay taken from winCE driver */
934 if (PTR_ERR(host->mrq) == -EINTR)
935 dev_dbg(&host->pdev->dev,
936 "%s.%d: IOS interrupted: clk %u, mode %u",
937 current->comm, task_pid_nr(current),
938 ios->clock, ios->power_mode);
941 host->clk_cache = ios->clock;
943 mutex_unlock(&host->ios_lock);
946 static int tmio_mmc_get_ro(struct mmc_host *mmc)
948 struct tmio_mmc_host *host = mmc_priv(mmc);
949 struct tmio_mmc_data *pdata = host->pdata;
950 int ret = mmc_gpio_get_ro(mmc);
954 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
955 (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
960 static int tmio_multi_io_quirk(struct mmc_card *card,
961 unsigned int direction, int blk_size)
963 struct tmio_mmc_host *host = mmc_priv(card->host);
965 if (host->multi_io_quirk)
966 return host->multi_io_quirk(card, direction, blk_size);
971 static struct mmc_host_ops tmio_mmc_ops = {
972 .request = tmio_mmc_request,
973 .set_ios = tmio_mmc_set_ios,
974 .get_ro = tmio_mmc_get_ro,
975 .get_cd = mmc_gpio_get_cd,
976 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
977 .multi_io_quirk = tmio_multi_io_quirk,
980 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
982 struct tmio_mmc_data *pdata = host->pdata;
983 struct mmc_host *mmc = host->mmc;
985 mmc_regulator_get_supply(mmc);
987 /* use ocr_mask if no regulator */
989 mmc->ocr_avail = pdata->ocr_mask;
993 * There is possibility that regulator has not been probed
996 return -EPROBE_DEFER;
1001 static void tmio_mmc_of_parse(struct platform_device *pdev,
1002 struct tmio_mmc_data *pdata)
1004 const struct device_node *np = pdev->dev.of_node;
1008 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1009 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
1012 struct tmio_mmc_host*
1013 tmio_mmc_host_alloc(struct platform_device *pdev)
1015 struct tmio_mmc_host *host;
1016 struct mmc_host *mmc;
1018 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1022 host = mmc_priv(mmc);
1028 EXPORT_SYMBOL(tmio_mmc_host_alloc);
1030 void tmio_mmc_host_free(struct tmio_mmc_host *host)
1032 mmc_free_host(host->mmc);
1034 EXPORT_SYMBOL(tmio_mmc_host_free);
1036 int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1037 struct tmio_mmc_data *pdata)
1039 struct platform_device *pdev = _host->pdev;
1040 struct mmc_host *mmc = _host->mmc;
1041 struct resource *res_ctl;
1043 u32 irq_mask = TMIO_MASK_CMD;
1045 tmio_mmc_of_parse(pdev, pdata);
1047 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1048 _host->write16_hook = NULL;
1050 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1054 ret = mmc_of_parse(mmc);
1058 _host->pdata = pdata;
1059 platform_set_drvdata(pdev, mmc);
1061 _host->set_pwr = pdata->set_pwr;
1062 _host->set_clk_div = pdata->set_clk_div;
1064 ret = tmio_mmc_init_ocr(_host);
1068 _host->ctl = devm_ioremap(&pdev->dev,
1069 res_ctl->start, resource_size(res_ctl));
1075 tmio_mmc_ops.card_busy = _host->card_busy;
1076 tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
1077 mmc->ops = &tmio_mmc_ops;
1079 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
1080 mmc->caps2 |= pdata->capabilities2;
1082 mmc->max_blk_size = 512;
1083 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1085 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1086 mmc->max_seg_size = mmc->max_req_size;
1088 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
1089 mmc->caps & MMC_CAP_NEEDS_POLL ||
1090 !mmc_card_is_removable(mmc) ||
1091 mmc->slot.cd_irq >= 0);
1094 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1095 * hotplug gets disabled. It seems RuntimePM related yet we need further
1096 * research. Since we are planning a PM overhaul anyway, let's enforce
1097 * for now the device being active by enabling native hotplug always.
1099 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1100 _host->native_hotplug = true;
1102 if (tmio_mmc_clk_enable(_host) < 0) {
1103 mmc->f_max = pdata->hclk;
1104 mmc->f_min = mmc->f_max / 512;
1108 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
1109 * looping forever...
1111 if (mmc->f_min == 0) {
1117 * While using internal tmio hardware logic for card detection, we need
1118 * to ensure it stays powered for it to work.
1120 if (_host->native_hotplug)
1121 pm_runtime_get_noresume(&pdev->dev);
1123 tmio_mmc_clk_stop(_host);
1124 tmio_mmc_reset(_host);
1126 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1127 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1129 /* Unmask the IRQs we want to know about */
1130 if (!_host->chan_rx)
1131 irq_mask |= TMIO_MASK_READOP;
1132 if (!_host->chan_tx)
1133 irq_mask |= TMIO_MASK_WRITEOP;
1134 if (!_host->native_hotplug)
1135 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1137 _host->sdcard_irq_mask &= ~irq_mask;
1139 _host->sdio_irq_enabled = false;
1140 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
1141 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1142 sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
1143 sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000);
1146 spin_lock_init(&_host->lock);
1147 mutex_init(&_host->ios_lock);
1149 /* Init delayed work for request timeouts */
1150 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1151 INIT_WORK(&_host->done, tmio_mmc_done_work);
1153 /* See if we also get DMA */
1154 tmio_mmc_request_dma(_host, pdata);
1156 pm_runtime_set_active(&pdev->dev);
1157 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1158 pm_runtime_use_autosuspend(&pdev->dev);
1159 pm_runtime_enable(&pdev->dev);
1161 ret = mmc_add_host(mmc);
1163 tmio_mmc_host_remove(_host);
1167 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1169 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1170 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1172 tmio_mmc_host_remove(_host);
1175 mmc_gpiod_request_cd_irq(mmc);
1184 EXPORT_SYMBOL(tmio_mmc_host_probe);
1186 void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1188 struct platform_device *pdev = host->pdev;
1189 struct mmc_host *mmc = host->mmc;
1191 if (!host->native_hotplug)
1192 pm_runtime_get_sync(&pdev->dev);
1194 dev_pm_qos_hide_latency_limit(&pdev->dev);
1196 mmc_remove_host(mmc);
1197 cancel_work_sync(&host->done);
1198 cancel_delayed_work_sync(&host->delayed_reset_work);
1199 tmio_mmc_release_dma(host);
1201 pm_runtime_put_sync(&pdev->dev);
1202 pm_runtime_disable(&pdev->dev);
1204 EXPORT_SYMBOL(tmio_mmc_host_remove);
1207 int tmio_mmc_host_runtime_suspend(struct device *dev)
1209 struct mmc_host *mmc = dev_get_drvdata(dev);
1210 struct tmio_mmc_host *host = mmc_priv(mmc);
1212 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1214 if (host->clk_cache)
1215 tmio_mmc_clk_stop(host);
1217 if (host->clk_disable)
1218 host->clk_disable(host);
1222 EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1224 int tmio_mmc_host_runtime_resume(struct device *dev)
1226 struct mmc_host *mmc = dev_get_drvdata(dev);
1227 struct tmio_mmc_host *host = mmc_priv(mmc);
1229 tmio_mmc_reset(host);
1230 tmio_mmc_clk_enable(host);
1232 if (host->clk_cache)
1233 tmio_mmc_set_clock(host, host->clk_cache);
1235 tmio_mmc_enable_dma(host, true);
1239 EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1242 MODULE_LICENSE("GPL v2");