2 * linux/drivers/mmc/core/mmc_ops.h
4 * Copyright 2006-2007 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
26 #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/
27 #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
28 #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
30 static const u8 tuning_blk_pattern_4bit[] = {
31 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
32 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
33 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
34 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
35 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
36 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
37 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
38 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
41 static const u8 tuning_blk_pattern_8bit[] = {
42 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
43 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
44 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
45 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
46 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
47 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
48 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
49 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
50 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
51 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
52 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
53 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
54 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
55 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
56 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
57 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
60 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
63 struct mmc_command cmd = {};
65 cmd.opcode = MMC_SEND_STATUS;
66 if (!mmc_host_is_spi(card->host))
67 cmd.arg = card->rca << 16;
68 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
70 err = mmc_wait_for_cmd(card->host, &cmd, retries);
74 /* NOTE: callers are required to understand the difference
75 * between "native" and SPI format status words!
78 *status = cmd.resp[0];
82 EXPORT_SYMBOL_GPL(__mmc_send_status);
84 int mmc_send_status(struct mmc_card *card, u32 *status)
86 return __mmc_send_status(card, status, MMC_CMD_RETRIES);
88 EXPORT_SYMBOL_GPL(mmc_send_status);
90 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
92 struct mmc_command cmd = {};
94 cmd.opcode = MMC_SELECT_CARD;
97 cmd.arg = card->rca << 16;
98 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
101 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
104 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
107 int mmc_select_card(struct mmc_card *card)
110 return _mmc_select_card(card->host, card);
113 int mmc_deselect_cards(struct mmc_host *host)
115 return _mmc_select_card(host, NULL);
119 * Write the value specified in the device tree or board code into the optional
120 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
121 * drive strength of the DAT and CMD outputs. The actual meaning of a given
122 * value is hardware dependant.
123 * The presence of the DSR register can be determined from the CSD register,
126 int mmc_set_dsr(struct mmc_host *host)
128 struct mmc_command cmd = {};
130 cmd.opcode = MMC_SET_DSR;
132 cmd.arg = (host->dsr << 16) | 0xffff;
133 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
135 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
138 int mmc_go_idle(struct mmc_host *host)
141 struct mmc_command cmd = {};
144 * Non-SPI hosts need to prevent chipselect going active during
145 * GO_IDLE; that would put chips into SPI mode. Remind them of
146 * that in case of hardware that won't pull up DAT3/nCS otherwise.
148 * SPI hosts ignore ios.chip_select; it's managed according to
149 * rules that must accommodate non-MMC slaves which this layer
150 * won't even know about.
152 if (!mmc_host_is_spi(host)) {
153 mmc_set_chip_select(host, MMC_CS_HIGH);
157 cmd.opcode = MMC_GO_IDLE_STATE;
159 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
161 err = mmc_wait_for_cmd(host, &cmd, 0);
165 if (!mmc_host_is_spi(host)) {
166 mmc_set_chip_select(host, MMC_CS_DONTCARE);
170 host->use_spi_crc = 0;
175 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
177 struct mmc_command cmd = {};
180 cmd.opcode = MMC_SEND_OP_COND;
181 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
182 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
184 for (i = 100; i; i--) {
185 err = mmc_wait_for_cmd(host, &cmd, 0);
189 /* if we're just probing, do a single pass */
193 /* otherwise wait until reset completes */
194 if (mmc_host_is_spi(host)) {
195 if (!(cmd.resp[0] & R1_SPI_IDLE))
198 if (cmd.resp[0] & MMC_CARD_BUSY)
207 if (rocr && !mmc_host_is_spi(host))
213 int mmc_set_relative_addr(struct mmc_card *card)
215 struct mmc_command cmd = {};
217 cmd.opcode = MMC_SET_RELATIVE_ADDR;
218 cmd.arg = card->rca << 16;
219 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
221 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
225 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
228 struct mmc_command cmd = {};
232 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
234 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
238 memcpy(cxd, cmd.resp, sizeof(u32) * 4);
244 * NOTE: void *buf, caller for the buf is required to use DMA-capable
245 * buffer or on-stack buffer (with some overhead in callee).
248 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
249 u32 opcode, void *buf, unsigned len)
251 struct mmc_request mrq = {};
252 struct mmc_command cmd = {};
253 struct mmc_data data = {};
254 struct scatterlist sg;
262 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
263 * rely on callers to never use this with "native" calls for reading
264 * CSD or CID. Native versions of those commands use the R2 type,
265 * not R1 plus a data block.
267 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
271 data.flags = MMC_DATA_READ;
275 sg_init_one(&sg, buf, len);
277 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
279 * The spec states that CSR and CID accesses have a timeout
280 * of 64 clock cycles.
283 data.timeout_clks = 64;
285 mmc_set_data_timeout(&data, card);
287 mmc_wait_for_req(host, &mrq);
297 static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
302 csd_tmp = kzalloc(16, GFP_KERNEL);
306 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
310 for (i = 0; i < 4; i++)
311 csd[i] = be32_to_cpu(csd_tmp[i]);
318 int mmc_send_csd(struct mmc_card *card, u32 *csd)
320 if (mmc_host_is_spi(card->host))
321 return mmc_spi_send_csd(card, csd);
323 return mmc_send_cxd_native(card->host, card->rca << 16, csd,
327 static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
332 cid_tmp = kzalloc(16, GFP_KERNEL);
336 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
340 for (i = 0; i < 4; i++)
341 cid[i] = be32_to_cpu(cid_tmp[i]);
348 int mmc_send_cid(struct mmc_host *host, u32 *cid)
350 if (mmc_host_is_spi(host))
351 return mmc_spi_send_cid(host, cid);
353 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
356 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
361 if (!card || !new_ext_csd)
364 if (!mmc_can_ext_csd(card))
368 * As the ext_csd is so large and mostly unused, we don't store the
369 * raw block in mmc_card.
371 ext_csd = kzalloc(512, GFP_KERNEL);
375 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
380 *new_ext_csd = ext_csd;
384 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
386 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
388 struct mmc_command cmd = {};
391 cmd.opcode = MMC_SPI_READ_OCR;
392 cmd.arg = highcap ? (1 << 30) : 0;
393 cmd.flags = MMC_RSP_SPI_R3;
395 err = mmc_wait_for_cmd(host, &cmd, 0);
401 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
403 struct mmc_command cmd = {};
406 cmd.opcode = MMC_SPI_CRC_ON_OFF;
407 cmd.flags = MMC_RSP_SPI_R1;
410 err = mmc_wait_for_cmd(host, &cmd, 0);
412 host->use_spi_crc = use_crc;
416 static int mmc_switch_status_error(struct mmc_host *host, u32 status)
418 if (mmc_host_is_spi(host)) {
419 if (status & R1_SPI_ILLEGAL_COMMAND)
422 if (status & 0xFDFFA000)
423 pr_warn("%s: unexpected status %#x after switch\n",
424 mmc_hostname(host), status);
425 if (status & R1_SWITCH_ERROR)
431 /* Caller must hold re-tuning */
432 int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
437 err = mmc_send_status(card, &status);
438 if (!crc_err_fatal && err == -EILSEQ)
443 return mmc_switch_status_error(card->host, status);
446 int mmc_switch_status(struct mmc_card *card)
448 return __mmc_switch_status(card, true);
451 static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
452 bool send_status, bool retry_crc_err)
454 struct mmc_host *host = card->host;
456 unsigned long timeout;
458 bool expired = false;
462 * In cases when not allowed to poll by using CMD13 or because we aren't
463 * capable of polling by using ->card_busy(), then rely on waiting the
464 * stated timeout to be sufficient.
466 if (!send_status && !host->ops->card_busy) {
467 mmc_delay(timeout_ms);
471 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
474 * Due to the possibility of being preempted while polling,
475 * check the expiration time first.
477 expired = time_after(jiffies, timeout);
479 if (host->ops->card_busy) {
480 busy = host->ops->card_busy(host);
482 err = mmc_send_status(card, &status);
483 if (retry_crc_err && err == -EILSEQ) {
488 err = mmc_switch_status_error(host, status);
491 busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
495 /* Timeout if the device still remains busy. */
496 if (expired && busy) {
497 pr_err("%s: Card stuck being busy! %s\n",
498 mmc_hostname(host), __func__);
507 * __mmc_switch - modify EXT_CSD register
508 * @card: the MMC card associated with the data transfer
509 * @set: cmd set values
510 * @index: EXT_CSD register index
511 * @value: value to program into EXT_CSD register
512 * @timeout_ms: timeout (ms) for operation performed by register write,
513 * timeout of zero implies maximum possible timeout
514 * @timing: new timing to change to
515 * @use_busy_signal: use the busy signal as response type
516 * @send_status: send status cmd to poll for busy
517 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
519 * Modifies the EXT_CSD register for selected card.
521 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
522 unsigned int timeout_ms, unsigned char timing,
523 bool use_busy_signal, bool send_status, bool retry_crc_err)
525 struct mmc_host *host = card->host;
527 struct mmc_command cmd = {};
528 bool use_r1b_resp = use_busy_signal;
529 unsigned char old_timing = host->ios.timing;
531 mmc_retune_hold(host);
534 pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
536 timeout_ms = card->ext_csd.generic_cmd6_time;
540 * If the cmd timeout and the max_busy_timeout of the host are both
541 * specified, let's validate them. A failure means we need to prevent
542 * the host from doing hw busy detection, which is done by converting
543 * to a R1 response instead of a R1B.
545 if (host->max_busy_timeout &&
546 (timeout_ms > host->max_busy_timeout))
547 use_r1b_resp = false;
549 cmd.opcode = MMC_SWITCH;
550 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
554 cmd.flags = MMC_CMD_AC;
556 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
557 cmd.busy_timeout = timeout_ms;
559 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
562 if (index == EXT_CSD_SANITIZE_START)
563 cmd.sanitize_busy = true;
565 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
569 /* No need to check card status in case of unblocking command */
570 if (!use_busy_signal)
573 /*If SPI or used HW busy detection above, then we don't need to poll. */
574 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
575 mmc_host_is_spi(host))
578 /* Let's try to poll to find out when the command is completed. */
579 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
584 /* Switch to new timing before check switch status. */
586 mmc_set_timing(host, timing);
589 err = mmc_switch_status(card);
591 mmc_set_timing(host, old_timing);
594 mmc_retune_release(host);
599 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
600 unsigned int timeout_ms)
602 return __mmc_switch(card, set, index, value, timeout_ms, 0,
605 EXPORT_SYMBOL_GPL(mmc_switch);
607 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
609 struct mmc_request mrq = {};
610 struct mmc_command cmd = {};
611 struct mmc_data data = {};
612 struct scatterlist sg;
613 struct mmc_ios *ios = &host->ios;
614 const u8 *tuning_block_pattern;
618 if (ios->bus_width == MMC_BUS_WIDTH_8) {
619 tuning_block_pattern = tuning_blk_pattern_8bit;
620 size = sizeof(tuning_blk_pattern_8bit);
621 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
622 tuning_block_pattern = tuning_blk_pattern_4bit;
623 size = sizeof(tuning_blk_pattern_4bit);
627 data_buf = kzalloc(size, GFP_KERNEL);
635 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
639 data.flags = MMC_DATA_READ;
642 * According to the tuning specs, Tuning process
643 * is normally shorter 40 executions of CMD19,
644 * and timeout value should be shorter than 150 ms
646 data.timeout_ns = 150 * NSEC_PER_MSEC;
650 sg_init_one(&sg, data_buf, size);
652 mmc_wait_for_req(host, &mrq);
655 *cmd_error = cmd.error;
667 if (memcmp(data_buf, tuning_block_pattern, size))
674 EXPORT_SYMBOL_GPL(mmc_send_tuning);
676 int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
678 struct mmc_command cmd = {};
681 * eMMC specification specifies that CMD12 can be used to stop a tuning
682 * command, but SD specification does not, so do nothing unless it is
685 if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
688 cmd.opcode = MMC_STOP_TRANSMISSION;
689 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
692 * For drivers that override R1 to R1b, set an arbitrary timeout based
693 * on the tuning timeout i.e. 150ms.
695 cmd.busy_timeout = 150;
697 return mmc_wait_for_cmd(host, &cmd, 0);
699 EXPORT_SYMBOL_GPL(mmc_abort_tuning);
702 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
705 struct mmc_request mrq = {};
706 struct mmc_command cmd = {};
707 struct mmc_data data = {};
708 struct scatterlist sg;
712 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
713 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
715 /* dma onto stack is unsafe/nonportable, but callers to this
716 * routine normally provide temporary on-stack buffers ...
718 data_buf = kmalloc(len, GFP_KERNEL);
723 test_buf = testdata_8bit;
725 test_buf = testdata_4bit;
727 pr_err("%s: Invalid bus_width %d\n",
728 mmc_hostname(host), len);
733 if (opcode == MMC_BUS_TEST_W)
734 memcpy(data_buf, test_buf, len);
741 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
742 * rely on callers to never use this with "native" calls for reading
743 * CSD or CID. Native versions of those commands use the R2 type,
744 * not R1 plus a data block.
746 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
750 if (opcode == MMC_BUS_TEST_R)
751 data.flags = MMC_DATA_READ;
753 data.flags = MMC_DATA_WRITE;
757 mmc_set_data_timeout(&data, card);
758 sg_init_one(&sg, data_buf, len);
759 mmc_wait_for_req(host, &mrq);
761 if (opcode == MMC_BUS_TEST_R) {
762 for (i = 0; i < len / 4; i++)
763 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
778 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
782 if (bus_width == MMC_BUS_WIDTH_8)
784 else if (bus_width == MMC_BUS_WIDTH_4)
786 else if (bus_width == MMC_BUS_WIDTH_1)
787 return 0; /* no need for test */
792 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
793 * is a problem. This improves chances that the test will work.
795 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
796 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
799 static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
801 struct mmc_command cmd = {};
805 if (!card->ext_csd.hpi) {
806 pr_warn("%s: Card didn't support HPI command\n",
807 mmc_hostname(card->host));
811 opcode = card->ext_csd.hpi_cmd;
812 if (opcode == MMC_STOP_TRANSMISSION)
813 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
814 else if (opcode == MMC_SEND_STATUS)
815 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
818 cmd.arg = card->rca << 16 | 1;
820 err = mmc_wait_for_cmd(card->host, &cmd, 0);
822 pr_warn("%s: error %d interrupting operation. "
823 "HPI command response %#x\n", mmc_hostname(card->host),
828 *status = cmd.resp[0];
834 * mmc_interrupt_hpi - Issue for High priority Interrupt
835 * @card: the MMC card associated with the HPI transfer
837 * Issued High Priority Interrupt, and check for card status
838 * until out-of prg-state.
840 int mmc_interrupt_hpi(struct mmc_card *card)
844 unsigned long prg_wait;
846 if (!card->ext_csd.hpi_en) {
847 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
851 mmc_claim_host(card->host);
852 err = mmc_send_status(card, &status);
854 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
858 switch (R1_CURRENT_STATE(status)) {
864 * In idle and transfer states, HPI is not needed and the caller
865 * can issue the next intended command immediately
871 /* In all other states, it's illegal to issue HPI */
872 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
873 mmc_hostname(card->host), R1_CURRENT_STATE(status));
878 err = mmc_send_hpi_cmd(card, &status);
882 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
884 err = mmc_send_status(card, &status);
886 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
888 if (time_after(jiffies, prg_wait))
893 mmc_release_host(card->host);
897 int mmc_can_ext_csd(struct mmc_card *card)
899 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
903 * mmc_stop_bkops - stop ongoing BKOPS
904 * @card: MMC card to check BKOPS
906 * Send HPI command to stop ongoing background operations to
907 * allow rapid servicing of foreground operations, e.g. read/
908 * writes. Wait until the card comes out of the programming state
909 * to avoid errors in servicing read/write requests.
911 int mmc_stop_bkops(struct mmc_card *card)
915 err = mmc_interrupt_hpi(card);
918 * If err is EINVAL, we can't issue an HPI.
919 * It should complete the BKOPS.
921 if (!err || (err == -EINVAL)) {
922 mmc_card_clr_doing_bkops(card);
923 mmc_retune_release(card->host);
930 static int mmc_read_bkops_status(struct mmc_card *card)
935 mmc_claim_host(card->host);
936 err = mmc_get_ext_csd(card, &ext_csd);
937 mmc_release_host(card->host);
941 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
942 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
948 * mmc_start_bkops - start BKOPS for supported cards
949 * @card: MMC card to start BKOPS
950 * @from_exception: A flag to indicate if this function was
951 * called due to an exception raised by the card
953 * Start background operations whenever requested.
954 * When the urgent BKOPS bit is set in a R1 command response
955 * then background operations should be started immediately.
957 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
961 bool use_busy_signal;
963 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
966 err = mmc_read_bkops_status(card);
968 pr_err("%s: Failed to read bkops status: %d\n",
969 mmc_hostname(card->host), err);
973 if (!card->ext_csd.raw_bkops_status)
976 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
980 mmc_claim_host(card->host);
981 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
982 timeout = MMC_BKOPS_TIMEOUT_MS;
983 use_busy_signal = true;
986 use_busy_signal = false;
989 mmc_retune_hold(card->host);
991 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
992 EXT_CSD_BKOPS_START, 1, timeout, 0,
993 use_busy_signal, true, false);
995 pr_warn("%s: Error %d starting bkops\n",
996 mmc_hostname(card->host), err);
997 mmc_retune_release(card->host);
1002 * For urgent bkops status (LEVEL_2 and more)
1003 * bkops executed synchronously, otherwise
1004 * the operation is in progress
1006 if (!use_busy_signal)
1007 mmc_card_set_doing_bkops(card);
1009 mmc_retune_release(card->host);
1011 mmc_release_host(card->host);
1015 * Flush the cache to the non-volatile storage.
1017 int mmc_flush_cache(struct mmc_card *card)
1021 if (mmc_card_mmc(card) &&
1022 (card->ext_csd.cache_size > 0) &&
1023 (card->ext_csd.cache_ctrl & 1)) {
1024 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1025 EXT_CSD_FLUSH_CACHE, 1,
1026 MMC_CACHE_FLUSH_TIMEOUT_MS);
1028 pr_err("%s: cache flush error %d\n",
1029 mmc_hostname(card->host), err);
1034 EXPORT_SYMBOL(mmc_flush_cache);
1036 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1038 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1041 if (!card->ext_csd.cmdq_support)
1044 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1045 val, card->ext_csd.generic_cmd6_time);
1047 card->ext_csd.cmdq_en = enable;
1052 int mmc_cmdq_enable(struct mmc_card *card)
1054 return mmc_cmdq_switch(card, true);
1056 EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1058 int mmc_cmdq_disable(struct mmc_card *card)
1060 return mmc_cmdq_switch(card, false);
1062 EXPORT_SYMBOL_GPL(mmc_cmdq_disable);