1 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
16 #include <linux/iopoll.h>
17 #include <linux/module.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/slab.h>
20 #include <linux/scatterlist.h>
21 #include <linux/platform_device.h>
22 #include <linux/ktime.h>
24 #include <linux/mmc/mmc.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
34 struct mmc_request *mrq;
36 #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
37 #define CQHCI_COMPLETED BIT(1)
38 #define CQHCI_HOST_CRC BIT(2)
39 #define CQHCI_HOST_TIMEOUT BIT(3)
40 #define CQHCI_HOST_OTHER BIT(4)
43 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
45 return cq_host->desc_base + (tag * cq_host->slot_sz);
48 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
50 u8 *desc = get_desc(cq_host, tag);
52 return desc + cq_host->task_desc_len;
55 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
57 return cq_host->trans_desc_dma_base +
58 (cq_host->mmc->max_segs * tag *
59 cq_host->trans_desc_len);
62 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
64 return cq_host->trans_desc_base +
65 (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
68 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
71 dma_addr_t trans_temp;
73 link_temp = get_link_desc(cq_host, tag);
74 trans_temp = get_trans_desc_dma(cq_host, tag);
76 memset(link_temp, 0, cq_host->link_desc_len);
77 if (cq_host->link_desc_len > 8)
80 if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
81 *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
85 *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
88 __le64 *data_addr = (__le64 __force *)(link_temp + 4);
90 data_addr[0] = cpu_to_le64(trans_temp);
92 __le32 *data_addr = (__le32 __force *)(link_temp + 4);
94 data_addr[0] = cpu_to_le32(trans_temp);
98 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
100 cqhci_writel(cq_host, set, CQHCI_ISTE);
101 cqhci_writel(cq_host, set, CQHCI_ISGE);
104 #define DRV_NAME "cqhci"
106 #define CQHCI_DUMP(f, x...) \
107 pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
109 static void cqhci_dumpregs(struct cqhci_host *cq_host)
111 struct mmc_host *mmc = cq_host->mmc;
113 CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
115 CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
116 cqhci_readl(cq_host, CQHCI_CAP),
117 cqhci_readl(cq_host, CQHCI_VER));
118 CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
119 cqhci_readl(cq_host, CQHCI_CFG),
120 cqhci_readl(cq_host, CQHCI_CTL));
121 CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
122 cqhci_readl(cq_host, CQHCI_IS),
123 cqhci_readl(cq_host, CQHCI_ISTE));
124 CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
125 cqhci_readl(cq_host, CQHCI_ISGE),
126 cqhci_readl(cq_host, CQHCI_IC));
127 CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
128 cqhci_readl(cq_host, CQHCI_TDLBA),
129 cqhci_readl(cq_host, CQHCI_TDLBAU));
130 CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
131 cqhci_readl(cq_host, CQHCI_TDBR),
132 cqhci_readl(cq_host, CQHCI_TCN));
133 CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
134 cqhci_readl(cq_host, CQHCI_DQS),
135 cqhci_readl(cq_host, CQHCI_DPT));
136 CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
137 cqhci_readl(cq_host, CQHCI_TCLR),
138 cqhci_readl(cq_host, CQHCI_SSC1));
139 CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
140 cqhci_readl(cq_host, CQHCI_SSC2),
141 cqhci_readl(cq_host, CQHCI_CRDCT));
142 CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
143 cqhci_readl(cq_host, CQHCI_RMEM),
144 cqhci_readl(cq_host, CQHCI_TERRI));
145 CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
146 cqhci_readl(cq_host, CQHCI_CRI),
147 cqhci_readl(cq_host, CQHCI_CRA));
149 if (cq_host->ops->dumpregs)
150 cq_host->ops->dumpregs(mmc);
152 CQHCI_DUMP(": ===========================================\n");
156 * The allocated descriptor table for task, link & transfer descritors
159 * |task desc | |->|----------|
160 * |----------| | |trans desc|
161 * |link desc-|->| |----------|
164 * no. of slots max-segs
167 * The idea here is to create the [task+trans] table and mark & point the
168 * link desc to the transfer desc table on a per slot basis.
170 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
174 /* task descriptor can be 64/128 bit irrespective of arch */
175 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
176 cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
177 CQHCI_TASK_DESC_SZ, CQHCI_CFG);
178 cq_host->task_desc_len = 16;
180 cq_host->task_desc_len = 8;
184 * 96 bits length of transfer desc instead of 128 bits which means
185 * ADMA would expect next valid descriptor at the 96th bit
188 if (cq_host->dma64) {
189 if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
190 cq_host->trans_desc_len = 12;
192 cq_host->trans_desc_len = 16;
193 cq_host->link_desc_len = 16;
195 cq_host->trans_desc_len = 8;
196 cq_host->link_desc_len = 8;
199 /* total size of a slot: 1 task & 1 transfer (link) */
200 cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
202 cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
204 cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
205 cq_host->mmc->cqe_qdepth;
207 pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
208 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
212 * allocate a dma-mapped chunk of memory for the descriptors
213 * allocate a dma-mapped chunk of memory for link descriptors
214 * setup each link-desc memory offset per slot-number to
215 * the descriptor table.
217 cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
219 &cq_host->desc_dma_base,
221 if (!cq_host->desc_base)
224 cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
226 &cq_host->trans_desc_dma_base,
228 if (!cq_host->trans_desc_base) {
229 dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
231 cq_host->desc_dma_base);
232 cq_host->desc_base = NULL;
233 cq_host->desc_dma_base = 0;
237 pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
238 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
239 (unsigned long long)cq_host->desc_dma_base,
240 (unsigned long long)cq_host->trans_desc_dma_base);
242 for (; i < (cq_host->num_slots); i++)
243 setup_trans_desc(cq_host, i);
248 static void __cqhci_enable(struct cqhci_host *cq_host)
250 struct mmc_host *mmc = cq_host->mmc;
253 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
255 /* Configuration must not be changed while enabled */
256 if (cqcfg & CQHCI_ENABLE) {
257 cqcfg &= ~CQHCI_ENABLE;
258 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
261 cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
263 if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
266 if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
267 cqcfg |= CQHCI_TASK_DESC_SZ;
269 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
271 cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
273 cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
276 cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
278 cqhci_set_irqs(cq_host, 0);
280 cqcfg |= CQHCI_ENABLE;
282 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
284 if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
285 cqhci_writel(cq_host, 0, CQHCI_CTL);
289 if (cq_host->ops->enable)
290 cq_host->ops->enable(mmc);
292 /* Ensure all writes are done before interrupts are enabled */
295 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
297 cq_host->activated = true;
300 static void __cqhci_disable(struct cqhci_host *cq_host)
304 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
305 cqcfg &= ~CQHCI_ENABLE;
306 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
308 cq_host->mmc->cqe_on = false;
310 cq_host->activated = false;
313 int cqhci_suspend(struct mmc_host *mmc)
315 struct cqhci_host *cq_host = mmc->cqe_private;
317 if (cq_host->enabled)
318 __cqhci_disable(cq_host);
322 EXPORT_SYMBOL(cqhci_suspend);
324 int cqhci_resume(struct mmc_host *mmc)
326 /* Re-enable is done upon first request */
329 EXPORT_SYMBOL(cqhci_resume);
331 static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
333 struct cqhci_host *cq_host = mmc->cqe_private;
336 if (cq_host->enabled)
339 cq_host->rca = card->rca;
341 err = cqhci_host_alloc_tdl(cq_host);
345 __cqhci_enable(cq_host);
347 cq_host->enabled = true;
350 cqhci_dumpregs(cq_host);
355 /* CQHCI is idle and should halt immediately, so set a small timeout */
356 #define CQHCI_OFF_TIMEOUT 100
358 static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
360 return cqhci_readl(cq_host, CQHCI_CTL);
363 static void cqhci_off(struct mmc_host *mmc)
365 struct cqhci_host *cq_host = mmc->cqe_private;
369 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
372 if (cq_host->ops->disable)
373 cq_host->ops->disable(mmc, false);
375 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
377 err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
378 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
380 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
382 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
387 static void cqhci_disable(struct mmc_host *mmc)
389 struct cqhci_host *cq_host = mmc->cqe_private;
391 if (!cq_host->enabled)
396 __cqhci_disable(cq_host);
398 dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
399 cq_host->trans_desc_base,
400 cq_host->trans_desc_dma_base);
402 dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
404 cq_host->desc_dma_base);
406 cq_host->trans_desc_base = NULL;
407 cq_host->desc_base = NULL;
409 cq_host->enabled = false;
412 static void cqhci_prep_task_desc(struct mmc_request *mrq,
413 u64 *data, bool intr)
415 u32 req_flags = mrq->data->flags;
417 *data = CQHCI_VALID(1) |
421 CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
422 CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
423 CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
424 CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
425 CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
426 CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
427 CQHCI_BLK_COUNT(mrq->data->blocks) |
428 CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
430 pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
431 mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
434 static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
437 struct mmc_data *data = mrq->data;
442 sg_count = dma_map_sg(mmc_dev(host), data->sg,
444 (data->flags & MMC_DATA_WRITE) ?
445 DMA_TO_DEVICE : DMA_FROM_DEVICE);
447 pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
454 static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
457 __le32 *attr = (__le32 __force *)desc;
459 *attr = (CQHCI_VALID(1) |
460 CQHCI_END(end ? 1 : 0) |
463 CQHCI_DAT_LENGTH(len));
466 __le64 *dataddr = (__le64 __force *)(desc + 4);
468 dataddr[0] = cpu_to_le64(addr);
470 __le32 *dataddr = (__le32 __force *)(desc + 4);
472 dataddr[0] = cpu_to_le32(addr);
476 static int cqhci_prep_tran_desc(struct mmc_request *mrq,
477 struct cqhci_host *cq_host, int tag)
479 struct mmc_data *data = mrq->data;
480 int i, sg_count, len;
482 bool dma64 = cq_host->dma64;
485 struct scatterlist *sg;
487 sg_count = cqhci_dma_map(mrq->host, mrq);
489 pr_err("%s: %s: unable to map sg lists, %d\n",
490 mmc_hostname(mrq->host), __func__, sg_count);
494 desc = get_trans_desc(cq_host, tag);
496 for_each_sg(data->sg, sg, sg_count, i) {
497 addr = sg_dma_address(sg);
498 len = sg_dma_len(sg);
500 if ((i+1) == sg_count)
502 cqhci_set_tran_desc(desc, addr, len, end, dma64);
503 desc += cq_host->trans_desc_len;
509 static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
510 struct mmc_request *mrq)
512 u64 *task_desc = NULL;
517 struct cqhci_host *cq_host = mmc->cqe_private;
520 if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
524 if (mrq->cmd->flags & MMC_RSP_R1B) {
533 task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
534 memset(task_desc, 0, cq_host->task_desc_len);
535 data |= (CQHCI_VALID(1) |
540 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
541 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
543 desc = (u8 *)task_desc;
544 pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
545 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
546 dataddr = (__le64 __force *)(desc + 4);
547 dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
551 static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
553 struct mmc_data *data = mrq->data;
556 dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
557 (data->flags & MMC_DATA_READ) ?
558 DMA_FROM_DEVICE : DMA_TO_DEVICE);
562 static inline int cqhci_tag(struct mmc_request *mrq)
564 return mrq->cmd ? DCMD_SLOT : mrq->tag;
567 static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
571 u64 *task_desc = NULL;
572 int tag = cqhci_tag(mrq);
573 struct cqhci_host *cq_host = mmc->cqe_private;
576 if (!cq_host->enabled) {
577 pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
581 /* First request after resume has to re-enable */
582 if (!cq_host->activated)
583 __cqhci_enable(cq_host);
586 cqhci_writel(cq_host, 0, CQHCI_CTL);
588 pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
589 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
590 pr_err("%s: cqhci: CQE failed to exit halt state\n",
593 if (cq_host->ops->enable)
594 cq_host->ops->enable(mmc);
598 task_desc = (__le64 __force *)get_desc(cq_host, tag);
599 cqhci_prep_task_desc(mrq, &data, 1);
600 *task_desc = cpu_to_le64(data);
601 err = cqhci_prep_tran_desc(mrq, cq_host, tag);
603 pr_err("%s: cqhci: failed to setup tx desc: %d\n",
604 mmc_hostname(mmc), err);
608 cqhci_prep_dcmd_desc(mmc, mrq);
611 spin_lock_irqsave(&cq_host->lock, flags);
613 if (cq_host->recovery_halt) {
618 cq_host->slot[tag].mrq = mrq;
619 cq_host->slot[tag].flags = 0;
622 /* Make sure descriptors are ready before ringing the doorbell */
624 cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
625 if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
626 pr_debug("%s: cqhci: doorbell not set for tag %d\n",
627 mmc_hostname(mmc), tag);
629 spin_unlock_irqrestore(&cq_host->lock, flags);
632 cqhci_post_req(mmc, mrq);
637 static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
640 struct cqhci_host *cq_host = mmc->cqe_private;
642 if (!cq_host->recovery_halt) {
643 cq_host->recovery_halt = true;
644 pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
645 wake_up(&cq_host->wait_queue);
646 if (notify && mrq->recovery_notifier)
647 mrq->recovery_notifier(mrq);
651 static unsigned int cqhci_error_flags(int error1, int error2)
653 int error = error1 ? error1 : error2;
657 return CQHCI_HOST_CRC;
659 return CQHCI_HOST_TIMEOUT;
661 return CQHCI_HOST_OTHER;
665 static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
668 struct cqhci_host *cq_host = mmc->cqe_private;
669 struct cqhci_slot *slot;
673 spin_lock(&cq_host->lock);
675 terri = cqhci_readl(cq_host, CQHCI_TERRI);
677 pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
678 mmc_hostname(mmc), status, cmd_error, data_error, terri);
680 /* Forget about errors when recovery has already been triggered */
681 if (cq_host->recovery_halt)
684 if (!cq_host->qcnt) {
685 WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
686 mmc_hostname(mmc), status, cmd_error, data_error,
691 if (CQHCI_TERRI_C_VALID(terri)) {
692 tag = CQHCI_TERRI_C_TASK(terri);
693 slot = &cq_host->slot[tag];
695 slot->flags = cqhci_error_flags(cmd_error, data_error);
696 cqhci_recovery_needed(mmc, slot->mrq, true);
700 if (CQHCI_TERRI_D_VALID(terri)) {
701 tag = CQHCI_TERRI_D_TASK(terri);
702 slot = &cq_host->slot[tag];
704 slot->flags = cqhci_error_flags(data_error, cmd_error);
705 cqhci_recovery_needed(mmc, slot->mrq, true);
709 if (!cq_host->recovery_halt) {
711 * The only way to guarantee forward progress is to mark at
712 * least one task in error, so if none is indicated, pick one.
714 for (tag = 0; tag < NUM_SLOTS; tag++) {
715 slot = &cq_host->slot[tag];
718 slot->flags = cqhci_error_flags(data_error, cmd_error);
719 cqhci_recovery_needed(mmc, slot->mrq, true);
725 spin_unlock(&cq_host->lock);
728 static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
730 struct cqhci_host *cq_host = mmc->cqe_private;
731 struct cqhci_slot *slot = &cq_host->slot[tag];
732 struct mmc_request *mrq = slot->mrq;
733 struct mmc_data *data;
736 WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
737 mmc_hostname(mmc), tag);
741 /* No completions allowed during recovery */
742 if (cq_host->recovery_halt) {
743 slot->flags |= CQHCI_COMPLETED;
754 data->bytes_xfered = 0;
756 data->bytes_xfered = data->blksz * data->blocks;
759 mmc_cqe_request_done(mmc, mrq);
762 irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
766 unsigned long tag = 0, comp_status;
767 struct cqhci_host *cq_host = mmc->cqe_private;
769 status = cqhci_readl(cq_host, CQHCI_IS);
770 cqhci_writel(cq_host, status, CQHCI_IS);
772 pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
774 if ((status & CQHCI_IS_RED) || cmd_error || data_error)
775 cqhci_error_irq(mmc, status, cmd_error, data_error);
777 if (status & CQHCI_IS_TCC) {
778 /* read TCN and complete the request */
779 comp_status = cqhci_readl(cq_host, CQHCI_TCN);
780 cqhci_writel(cq_host, comp_status, CQHCI_TCN);
781 pr_debug("%s: cqhci: TCN: 0x%08lx\n",
782 mmc_hostname(mmc), comp_status);
784 spin_lock(&cq_host->lock);
786 for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
787 /* complete the corresponding mrq */
788 pr_debug("%s: cqhci: completing tag %lu\n",
789 mmc_hostname(mmc), tag);
790 cqhci_finish_mrq(mmc, tag);
793 if (cq_host->waiting_for_idle && !cq_host->qcnt) {
794 cq_host->waiting_for_idle = false;
795 wake_up(&cq_host->wait_queue);
798 spin_unlock(&cq_host->lock);
801 if (status & CQHCI_IS_TCL)
802 wake_up(&cq_host->wait_queue);
804 if (status & CQHCI_IS_HAC)
805 wake_up(&cq_host->wait_queue);
809 EXPORT_SYMBOL(cqhci_irq);
811 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
816 spin_lock_irqsave(&cq_host->lock, flags);
817 is_idle = !cq_host->qcnt || cq_host->recovery_halt;
818 *ret = cq_host->recovery_halt ? -EBUSY : 0;
819 cq_host->waiting_for_idle = !is_idle;
820 spin_unlock_irqrestore(&cq_host->lock, flags);
825 static int cqhci_wait_for_idle(struct mmc_host *mmc)
827 struct cqhci_host *cq_host = mmc->cqe_private;
830 wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
835 static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
836 bool *recovery_needed)
838 struct cqhci_host *cq_host = mmc->cqe_private;
839 int tag = cqhci_tag(mrq);
840 struct cqhci_slot *slot = &cq_host->slot[tag];
844 spin_lock_irqsave(&cq_host->lock, flags);
845 timed_out = slot->mrq == mrq;
847 slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
848 cqhci_recovery_needed(mmc, mrq, false);
849 *recovery_needed = cq_host->recovery_halt;
851 spin_unlock_irqrestore(&cq_host->lock, flags);
854 pr_err("%s: cqhci: timeout for tag %d\n",
855 mmc_hostname(mmc), tag);
856 cqhci_dumpregs(cq_host);
862 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
864 return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
867 static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
869 struct cqhci_host *cq_host = mmc->cqe_private;
873 cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
875 ctl = cqhci_readl(cq_host, CQHCI_CTL);
876 ctl |= CQHCI_CLEAR_ALL_TASKS;
877 cqhci_writel(cq_host, ctl, CQHCI_CTL);
879 wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
880 msecs_to_jiffies(timeout) + 1);
882 cqhci_set_irqs(cq_host, 0);
884 ret = cqhci_tasks_cleared(cq_host);
887 pr_debug("%s: cqhci: Failed to clear tasks\n",
893 static bool cqhci_halted(struct cqhci_host *cq_host)
895 return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
898 static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
900 struct cqhci_host *cq_host = mmc->cqe_private;
904 if (cqhci_halted(cq_host))
907 cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
909 ctl = cqhci_readl(cq_host, CQHCI_CTL);
911 cqhci_writel(cq_host, ctl, CQHCI_CTL);
913 wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
914 msecs_to_jiffies(timeout) + 1);
916 cqhci_set_irqs(cq_host, 0);
918 ret = cqhci_halted(cq_host);
921 pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
927 * After halting we expect to be able to use the command line. We interpret the
928 * failure to halt to mean the data lines might still be in use (and the upper
929 * layers will need to send a STOP command), so we set the timeout based on a
930 * generous command timeout.
932 #define CQHCI_START_HALT_TIMEOUT 5
934 static void cqhci_recovery_start(struct mmc_host *mmc)
936 struct cqhci_host *cq_host = mmc->cqe_private;
938 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
940 WARN_ON(!cq_host->recovery_halt);
942 cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
944 if (cq_host->ops->disable)
945 cq_host->ops->disable(mmc, true);
950 static int cqhci_error_from_flags(unsigned int flags)
955 /* CRC errors might indicate re-tuning so prefer to report that */
956 if (flags & CQHCI_HOST_CRC)
959 if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
965 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
967 struct cqhci_slot *slot = &cq_host->slot[tag];
968 struct mmc_request *mrq = slot->mrq;
969 struct mmc_data *data;
980 data->bytes_xfered = 0;
981 data->error = cqhci_error_from_flags(slot->flags);
983 mrq->cmd->error = cqhci_error_from_flags(slot->flags);
986 mmc_cqe_request_done(cq_host->mmc, mrq);
989 static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
993 for (i = 0; i < cq_host->num_slots; i++)
994 cqhci_recover_mrq(cq_host, i);
998 * By now the command and data lines should be unused so there is no reason for
999 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1000 * problems clearing tasks, so be generous.
1002 #define CQHCI_FINISH_HALT_TIMEOUT 20
1004 /* CQHCI could be expected to clear it's internal state pretty quickly */
1005 #define CQHCI_CLEAR_TIMEOUT 20
1007 static void cqhci_recovery_finish(struct mmc_host *mmc)
1009 struct cqhci_host *cq_host = mmc->cqe_private;
1010 unsigned long flags;
1014 pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1016 WARN_ON(!cq_host->recovery_halt);
1018 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1020 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1024 * The specification contradicts itself, by saying that tasks cannot be
1025 * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1026 * be disabled/re-enabled, but not to disable before clearing tasks.
1030 pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1031 cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1032 cqcfg &= ~CQHCI_ENABLE;
1033 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1034 cqcfg |= CQHCI_ENABLE;
1035 cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1036 /* Be sure that there are no tasks */
1037 ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1038 if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1043 cqhci_recover_mrqs(cq_host);
1045 WARN_ON(cq_host->qcnt);
1047 spin_lock_irqsave(&cq_host->lock, flags);
1049 cq_host->recovery_halt = false;
1050 mmc->cqe_on = false;
1051 spin_unlock_irqrestore(&cq_host->lock, flags);
1053 /* Ensure all writes are done before interrupts are re-enabled */
1056 cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1058 cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1060 pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1063 static const struct mmc_cqe_ops cqhci_cqe_ops = {
1064 .cqe_enable = cqhci_enable,
1065 .cqe_disable = cqhci_disable,
1066 .cqe_request = cqhci_request,
1067 .cqe_post_req = cqhci_post_req,
1068 .cqe_off = cqhci_off,
1069 .cqe_wait_for_idle = cqhci_wait_for_idle,
1070 .cqe_timeout = cqhci_timeout,
1071 .cqe_recovery_start = cqhci_recovery_start,
1072 .cqe_recovery_finish = cqhci_recovery_finish,
1075 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1077 struct cqhci_host *cq_host;
1078 struct resource *cqhci_memres = NULL;
1080 /* check and setup CMDQ interface */
1081 cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1083 if (!cqhci_memres) {
1084 dev_dbg(&pdev->dev, "CMDQ not supported\n");
1085 return ERR_PTR(-EINVAL);
1088 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1090 return ERR_PTR(-ENOMEM);
1091 cq_host->mmio = devm_ioremap(&pdev->dev,
1092 cqhci_memres->start,
1093 resource_size(cqhci_memres));
1094 if (!cq_host->mmio) {
1095 dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1096 return ERR_PTR(-EBUSY);
1098 dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1102 EXPORT_SYMBOL(cqhci_pltfm_init);
1104 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1106 return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1109 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1111 u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1113 return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1116 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1121 cq_host->dma64 = dma64;
1123 cq_host->mmc->cqe_private = cq_host;
1125 cq_host->num_slots = NUM_SLOTS;
1126 cq_host->dcmd_slot = DCMD_SLOT;
1128 mmc->cqe_ops = &cqhci_cqe_ops;
1130 mmc->cqe_qdepth = NUM_SLOTS;
1131 if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1132 mmc->cqe_qdepth -= 1;
1134 cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1135 sizeof(*cq_host->slot), GFP_KERNEL);
1136 if (!cq_host->slot) {
1141 spin_lock_init(&cq_host->lock);
1143 init_completion(&cq_host->halt_comp);
1144 init_waitqueue_head(&cq_host->wait_queue);
1146 pr_info("%s: CQHCI version %u.%02u\n",
1147 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1148 cqhci_ver_minor(cq_host));
1153 pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1154 mmc_hostname(mmc), cqhci_ver_major(cq_host),
1155 cqhci_ver_minor(cq_host), err);
1158 EXPORT_SYMBOL(cqhci_init);
1160 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1161 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1162 MODULE_LICENSE("GPL v2");