1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Synopsys DesignWare Multimedia Card Interface driver
4 * (Based on NXP driver for lpc 31xx)
6 * Copyright (C) 2009 NXP Semiconductors
7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/ioport.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/stat.h>
26 #include <linux/delay.h>
27 #include <linux/irq.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/bitops.h>
34 #include <linux/regulator/consumer.h>
36 #include <linux/of_gpio.h>
37 #include <linux/mmc/slot-gpio.h>
41 /* Common flag combinations */
42 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
43 SDMMC_INT_HTO | SDMMC_INT_SBE | \
44 SDMMC_INT_EBE | SDMMC_INT_HLE)
45 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
46 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
47 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
48 DW_MCI_CMD_ERROR_FLAGS)
49 #define DW_MCI_SEND_STATUS 1
50 #define DW_MCI_RECV_STATUS 2
51 #define DW_MCI_DMA_THRESHOLD 16
53 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
54 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
56 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
57 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
58 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
61 #define DESC_RING_BUF_SZ PAGE_SIZE
63 struct idmac_desc_64addr {
64 u32 des0; /* Control Descriptor */
65 #define IDMAC_OWN_CLR64(x) \
66 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
68 u32 des1; /* Reserved */
70 u32 des2; /*Buffer sizes */
71 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
72 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
73 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
75 u32 des3; /* Reserved */
77 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
78 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
80 u32 des6; /* Lower 32-bits of Next Descriptor Address */
81 u32 des7; /* Upper 32-bits of Next Descriptor Address */
85 __le32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 __le32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
98 __le32 des2; /* buffer 1 physical address */
100 __le32 des3; /* buffer 2 physical address */
103 /* Each descriptor can transfer up to 4KB of data in chained mode */
104 #define DW_MCI_DESC_DATA_LENGTH 0x1000
106 #if defined(CONFIG_DEBUG_FS)
107 static int dw_mci_req_show(struct seq_file *s, void *v)
109 struct dw_mci_slot *slot = s->private;
110 struct mmc_request *mrq;
111 struct mmc_command *cmd;
112 struct mmc_command *stop;
113 struct mmc_data *data;
115 /* Make sure we get a consistent snapshot */
116 spin_lock_bh(&slot->host->lock);
126 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
127 cmd->opcode, cmd->arg, cmd->flags,
128 cmd->resp[0], cmd->resp[1], cmd->resp[2],
129 cmd->resp[2], cmd->error);
131 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
132 data->bytes_xfered, data->blocks,
133 data->blksz, data->flags, data->error);
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 stop->opcode, stop->arg, stop->flags,
138 stop->resp[0], stop->resp[1], stop->resp[2],
139 stop->resp[2], stop->error);
142 spin_unlock_bh(&slot->host->lock);
146 DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
148 static int dw_mci_regs_show(struct seq_file *s, void *v)
150 struct dw_mci *host = s->private;
152 pm_runtime_get_sync(host->dev);
154 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
155 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
156 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
157 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
158 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
159 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
161 pm_runtime_put_autosuspend(host->dev);
165 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
167 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
169 struct mmc_host *mmc = slot->mmc;
170 struct dw_mci *host = slot->host;
174 root = mmc->debugfs_root;
178 node = debugfs_create_file("regs", S_IRUSR, root, host,
183 node = debugfs_create_file("req", S_IRUSR, root, slot,
188 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
192 node = debugfs_create_x32("pending_events", S_IRUSR, root,
193 (u32 *)&host->pending_events);
197 node = debugfs_create_x32("completed_events", S_IRUSR, root,
198 (u32 *)&host->completed_events);
205 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
207 #endif /* defined(CONFIG_DEBUG_FS) */
209 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
213 ctrl = mci_readl(host, CTRL);
215 mci_writel(host, CTRL, ctrl);
217 /* wait till resets clear */
218 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
220 1, 500 * USEC_PER_MSEC)) {
222 "Timeout resetting block (ctrl reset %#x)\n",
230 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
235 * Databook says that before issuing a new data transfer command
236 * we need to check to see if the card is busy. Data transfer commands
237 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
239 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
242 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
243 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
244 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
246 !(status & SDMMC_STATUS_BUSY),
247 10, 500 * USEC_PER_MSEC))
248 dev_err(host->dev, "Busy; trying anyway\n");
252 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
254 struct dw_mci *host = slot->host;
255 unsigned int cmd_status = 0;
257 mci_writel(host, CMDARG, arg);
258 wmb(); /* drain writebuffer */
259 dw_mci_wait_while_busy(host, cmd);
260 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
262 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
263 !(cmd_status & SDMMC_CMD_START),
264 1, 500 * USEC_PER_MSEC))
265 dev_err(&slot->mmc->class_dev,
266 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
267 cmd, arg, cmd_status);
270 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
272 struct dw_mci_slot *slot = mmc_priv(mmc);
273 struct dw_mci *host = slot->host;
276 cmd->error = -EINPROGRESS;
279 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
280 cmd->opcode == MMC_GO_IDLE_STATE ||
281 cmd->opcode == MMC_GO_INACTIVE_STATE ||
282 (cmd->opcode == SD_IO_RW_DIRECT &&
283 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
284 cmdr |= SDMMC_CMD_STOP;
285 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
286 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
288 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
291 /* Special bit makes CMD11 not die */
292 cmdr |= SDMMC_CMD_VOLT_SWITCH;
294 /* Change state to continue to handle CMD11 weirdness */
295 WARN_ON(slot->host->state != STATE_SENDING_CMD);
296 slot->host->state = STATE_SENDING_CMD11;
299 * We need to disable low power mode (automatic clock stop)
300 * while doing voltage switch so we don't confuse the card,
301 * since stopping the clock is a specific part of the UHS
302 * voltage change dance.
304 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
305 * unconditionally turned back on in dw_mci_setup_bus() if it's
306 * ever called with a non-zero clock. That shouldn't happen
307 * until the voltage change is all done.
309 clk_en_a = mci_readl(host, CLKENA);
310 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
311 mci_writel(host, CLKENA, clk_en_a);
312 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
313 SDMMC_CMD_PRV_DAT_WAIT, 0);
316 if (cmd->flags & MMC_RSP_PRESENT) {
317 /* We expect a response, so set this bit */
318 cmdr |= SDMMC_CMD_RESP_EXP;
319 if (cmd->flags & MMC_RSP_136)
320 cmdr |= SDMMC_CMD_RESP_LONG;
323 if (cmd->flags & MMC_RSP_CRC)
324 cmdr |= SDMMC_CMD_RESP_CRC;
327 cmdr |= SDMMC_CMD_DAT_EXP;
328 if (cmd->data->flags & MMC_DATA_WRITE)
329 cmdr |= SDMMC_CMD_DAT_WR;
332 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
333 cmdr |= SDMMC_CMD_USE_HOLD_REG;
338 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
340 struct mmc_command *stop;
346 stop = &host->stop_abort;
348 memset(stop, 0, sizeof(struct mmc_command));
350 if (cmdr == MMC_READ_SINGLE_BLOCK ||
351 cmdr == MMC_READ_MULTIPLE_BLOCK ||
352 cmdr == MMC_WRITE_BLOCK ||
353 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
354 cmdr == MMC_SEND_TUNING_BLOCK ||
355 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
356 stop->opcode = MMC_STOP_TRANSMISSION;
358 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
359 } else if (cmdr == SD_IO_RW_EXTENDED) {
360 stop->opcode = SD_IO_RW_DIRECT;
361 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
362 ((cmd->arg >> 28) & 0x7);
363 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
368 cmdr = stop->opcode | SDMMC_CMD_STOP |
369 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
371 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
372 cmdr |= SDMMC_CMD_USE_HOLD_REG;
377 static inline void dw_mci_set_cto(struct dw_mci *host)
379 unsigned int cto_clks;
380 unsigned int cto_div;
382 unsigned long irqflags;
384 cto_clks = mci_readl(host, TMOUT) & 0xff;
385 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
389 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
392 /* add a bit spare time */
396 * The durations we're working with are fairly short so we have to be
397 * extra careful about synchronization here. Specifically in hardware a
398 * command timeout is _at most_ 5.1 ms, so that means we expect an
399 * interrupt (either command done or timeout) to come rather quickly
400 * after the mci_writel. ...but just in case we have a long interrupt
401 * latency let's add a bit of paranoia.
403 * In general we'll assume that at least an interrupt will be asserted
404 * in hardware by the time the cto_timer runs. ...and if it hasn't
405 * been asserted in hardware by that time then we'll assume it'll never
408 spin_lock_irqsave(&host->irq_lock, irqflags);
409 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
410 mod_timer(&host->cto_timer,
411 jiffies + msecs_to_jiffies(cto_ms) + 1);
412 spin_unlock_irqrestore(&host->irq_lock, irqflags);
415 static void dw_mci_start_command(struct dw_mci *host,
416 struct mmc_command *cmd, u32 cmd_flags)
420 "start command: ARGR=0x%08x CMDR=0x%08x\n",
421 cmd->arg, cmd_flags);
423 mci_writel(host, CMDARG, cmd->arg);
424 wmb(); /* drain writebuffer */
425 dw_mci_wait_while_busy(host, cmd_flags);
427 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
429 /* response expected command only */
430 if (cmd_flags & SDMMC_CMD_RESP_EXP)
431 dw_mci_set_cto(host);
434 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
436 struct mmc_command *stop = &host->stop_abort;
438 dw_mci_start_command(host, stop, host->stop_cmdr);
441 /* DMA interface functions */
442 static void dw_mci_stop_dma(struct dw_mci *host)
444 if (host->using_dma) {
445 host->dma_ops->stop(host);
446 host->dma_ops->cleanup(host);
449 /* Data transfer was stopped by the interrupt handler */
450 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
453 static void dw_mci_dma_cleanup(struct dw_mci *host)
455 struct mmc_data *data = host->data;
457 if (data && data->host_cookie == COOKIE_MAPPED) {
458 dma_unmap_sg(host->dev,
461 mmc_get_dma_dir(data));
462 data->host_cookie = COOKIE_UNMAPPED;
466 static void dw_mci_idmac_reset(struct dw_mci *host)
468 u32 bmod = mci_readl(host, BMOD);
469 /* Software reset of DMA */
470 bmod |= SDMMC_IDMAC_SWRESET;
471 mci_writel(host, BMOD, bmod);
474 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
478 /* Disable and reset the IDMAC interface */
479 temp = mci_readl(host, CTRL);
480 temp &= ~SDMMC_CTRL_USE_IDMAC;
481 temp |= SDMMC_CTRL_DMA_RESET;
482 mci_writel(host, CTRL, temp);
484 /* Stop the IDMAC running */
485 temp = mci_readl(host, BMOD);
486 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
487 temp |= SDMMC_IDMAC_SWRESET;
488 mci_writel(host, BMOD, temp);
491 static void dw_mci_dmac_complete_dma(void *arg)
493 struct dw_mci *host = arg;
494 struct mmc_data *data = host->data;
496 dev_vdbg(host->dev, "DMA complete\n");
498 if ((host->use_dma == TRANS_MODE_EDMAC) &&
499 data && (data->flags & MMC_DATA_READ))
500 /* Invalidate cache after read */
501 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
506 host->dma_ops->cleanup(host);
509 * If the card was removed, data will be NULL. No point in trying to
510 * send the stop command or waiting for NBUSY in this case.
513 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
514 tasklet_schedule(&host->tasklet);
518 static int dw_mci_idmac_init(struct dw_mci *host)
522 if (host->dma_64bit_address == 1) {
523 struct idmac_desc_64addr *p;
524 /* Number of descriptors in the ring buffer */
526 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
528 /* Forward link the descriptor list */
529 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
531 p->des6 = (host->sg_dma +
532 (sizeof(struct idmac_desc_64addr) *
533 (i + 1))) & 0xffffffff;
535 p->des7 = (u64)(host->sg_dma +
536 (sizeof(struct idmac_desc_64addr) *
538 /* Initialize reserved and buffer size fields to "0" */
545 /* Set the last descriptor as the end-of-ring descriptor */
546 p->des6 = host->sg_dma & 0xffffffff;
547 p->des7 = (u64)host->sg_dma >> 32;
548 p->des0 = IDMAC_DES0_ER;
551 struct idmac_desc *p;
552 /* Number of descriptors in the ring buffer */
554 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
556 /* Forward link the descriptor list */
557 for (i = 0, p = host->sg_cpu;
558 i < host->ring_size - 1;
560 p->des3 = cpu_to_le32(host->sg_dma +
561 (sizeof(struct idmac_desc) * (i + 1)));
566 /* Set the last descriptor as the end-of-ring descriptor */
567 p->des3 = cpu_to_le32(host->sg_dma);
568 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
571 dw_mci_idmac_reset(host);
573 if (host->dma_64bit_address == 1) {
574 /* Mask out interrupts - get Tx & Rx complete only */
575 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
576 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
577 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
579 /* Set the descriptor base address */
580 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
581 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
584 /* Mask out interrupts - get Tx & Rx complete only */
585 mci_writel(host, IDSTS, IDMAC_INT_CLR);
586 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
587 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
589 /* Set the descriptor base address */
590 mci_writel(host, DBADDR, host->sg_dma);
596 static inline int dw_mci_prepare_desc64(struct dw_mci *host,
597 struct mmc_data *data,
600 unsigned int desc_len;
601 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
605 desc_first = desc_last = desc = host->sg_cpu;
607 for (i = 0; i < sg_len; i++) {
608 unsigned int length = sg_dma_len(&data->sg[i]);
610 u64 mem_addr = sg_dma_address(&data->sg[i]);
612 for ( ; length ; desc++) {
613 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
614 length : DW_MCI_DESC_DATA_LENGTH;
619 * Wait for the former clear OWN bit operation
620 * of IDMAC to make sure that this descriptor
621 * isn't still owned by IDMAC as IDMAC's write
622 * ops and CPU's read ops are asynchronous.
624 if (readl_poll_timeout_atomic(&desc->des0, val,
625 !(val & IDMAC_DES0_OWN),
626 10, 100 * USEC_PER_MSEC))
630 * Set the OWN bit and disable interrupts
631 * for this descriptor
633 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
637 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
639 /* Physical address to DMA to/from */
640 desc->des4 = mem_addr & 0xffffffff;
641 desc->des5 = mem_addr >> 32;
643 /* Update physical address for the next desc */
644 mem_addr += desc_len;
646 /* Save pointer to the last descriptor */
651 /* Set first descriptor */
652 desc_first->des0 |= IDMAC_DES0_FD;
654 /* Set last descriptor */
655 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
656 desc_last->des0 |= IDMAC_DES0_LD;
660 /* restore the descriptor chain as it's polluted */
661 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
662 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
663 dw_mci_idmac_init(host);
668 static inline int dw_mci_prepare_desc32(struct dw_mci *host,
669 struct mmc_data *data,
672 unsigned int desc_len;
673 struct idmac_desc *desc_first, *desc_last, *desc;
677 desc_first = desc_last = desc = host->sg_cpu;
679 for (i = 0; i < sg_len; i++) {
680 unsigned int length = sg_dma_len(&data->sg[i]);
682 u32 mem_addr = sg_dma_address(&data->sg[i]);
684 for ( ; length ; desc++) {
685 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
686 length : DW_MCI_DESC_DATA_LENGTH;
691 * Wait for the former clear OWN bit operation
692 * of IDMAC to make sure that this descriptor
693 * isn't still owned by IDMAC as IDMAC's write
694 * ops and CPU's read ops are asynchronous.
696 if (readl_poll_timeout_atomic(&desc->des0, val,
697 IDMAC_OWN_CLR64(val),
699 100 * USEC_PER_MSEC))
703 * Set the OWN bit and disable interrupts
704 * for this descriptor
706 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
711 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
713 /* Physical address to DMA to/from */
714 desc->des2 = cpu_to_le32(mem_addr);
716 /* Update physical address for the next desc */
717 mem_addr += desc_len;
719 /* Save pointer to the last descriptor */
724 /* Set first descriptor */
725 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
727 /* Set last descriptor */
728 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
730 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
734 /* restore the descriptor chain as it's polluted */
735 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
736 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
737 dw_mci_idmac_init(host);
741 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
746 if (host->dma_64bit_address == 1)
747 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
749 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
754 /* drain writebuffer */
757 /* Make sure to reset DMA in case we did PIO before this */
758 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
759 dw_mci_idmac_reset(host);
761 /* Select IDMAC interface */
762 temp = mci_readl(host, CTRL);
763 temp |= SDMMC_CTRL_USE_IDMAC;
764 mci_writel(host, CTRL, temp);
766 /* drain writebuffer */
769 /* Enable the IDMAC */
770 temp = mci_readl(host, BMOD);
771 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
772 mci_writel(host, BMOD, temp);
774 /* Start it running */
775 mci_writel(host, PLDMND, 1);
781 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
782 .init = dw_mci_idmac_init,
783 .start = dw_mci_idmac_start_dma,
784 .stop = dw_mci_idmac_stop_dma,
785 .complete = dw_mci_dmac_complete_dma,
786 .cleanup = dw_mci_dma_cleanup,
789 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
791 dmaengine_terminate_async(host->dms->ch);
794 static int dw_mci_edmac_start_dma(struct dw_mci *host,
797 struct dma_slave_config cfg;
798 struct dma_async_tx_descriptor *desc = NULL;
799 struct scatterlist *sgl = host->data->sg;
800 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
801 u32 sg_elems = host->data->sg_len;
803 u32 fifo_offset = host->fifo_reg - host->regs;
806 /* Set external dma config: burst size, burst width */
807 cfg.dst_addr = host->phy_regs + fifo_offset;
808 cfg.src_addr = cfg.dst_addr;
809 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
810 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
812 /* Match burst msize with external dma config */
813 fifoth_val = mci_readl(host, FIFOTH);
814 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
815 cfg.src_maxburst = cfg.dst_maxburst;
817 if (host->data->flags & MMC_DATA_WRITE)
818 cfg.direction = DMA_MEM_TO_DEV;
820 cfg.direction = DMA_DEV_TO_MEM;
822 ret = dmaengine_slave_config(host->dms->ch, &cfg);
824 dev_err(host->dev, "Failed to config edmac.\n");
828 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
829 sg_len, cfg.direction,
830 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
832 dev_err(host->dev, "Can't prepare slave sg.\n");
836 /* Set dw_mci_dmac_complete_dma as callback */
837 desc->callback = dw_mci_dmac_complete_dma;
838 desc->callback_param = (void *)host;
839 dmaengine_submit(desc);
841 /* Flush cache before write */
842 if (host->data->flags & MMC_DATA_WRITE)
843 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
844 sg_elems, DMA_TO_DEVICE);
846 dma_async_issue_pending(host->dms->ch);
851 static int dw_mci_edmac_init(struct dw_mci *host)
853 /* Request external dma channel */
854 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
858 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
859 if (!host->dms->ch) {
860 dev_err(host->dev, "Failed to get external DMA channel.\n");
869 static void dw_mci_edmac_exit(struct dw_mci *host)
873 dma_release_channel(host->dms->ch);
874 host->dms->ch = NULL;
881 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
882 .init = dw_mci_edmac_init,
883 .exit = dw_mci_edmac_exit,
884 .start = dw_mci_edmac_start_dma,
885 .stop = dw_mci_edmac_stop_dma,
886 .complete = dw_mci_dmac_complete_dma,
887 .cleanup = dw_mci_dma_cleanup,
890 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
891 struct mmc_data *data,
894 struct scatterlist *sg;
895 unsigned int i, sg_len;
897 if (data->host_cookie == COOKIE_PRE_MAPPED)
901 * We don't do DMA on "complex" transfers, i.e. with
902 * non-word-aligned buffers or lengths. Also, we don't bother
903 * with all the DMA setup overhead for short transfers.
905 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
911 for_each_sg(data->sg, sg, data->sg_len, i) {
912 if (sg->offset & 3 || sg->length & 3)
916 sg_len = dma_map_sg(host->dev,
919 mmc_get_dma_dir(data));
923 data->host_cookie = cookie;
928 static void dw_mci_pre_req(struct mmc_host *mmc,
929 struct mmc_request *mrq)
931 struct dw_mci_slot *slot = mmc_priv(mmc);
932 struct mmc_data *data = mrq->data;
934 if (!slot->host->use_dma || !data)
937 /* This data might be unmapped at this time */
938 data->host_cookie = COOKIE_UNMAPPED;
940 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
941 COOKIE_PRE_MAPPED) < 0)
942 data->host_cookie = COOKIE_UNMAPPED;
945 static void dw_mci_post_req(struct mmc_host *mmc,
946 struct mmc_request *mrq,
949 struct dw_mci_slot *slot = mmc_priv(mmc);
950 struct mmc_data *data = mrq->data;
952 if (!slot->host->use_dma || !data)
955 if (data->host_cookie != COOKIE_UNMAPPED)
956 dma_unmap_sg(slot->host->dev,
959 mmc_get_dma_dir(data));
960 data->host_cookie = COOKIE_UNMAPPED;
963 static int dw_mci_get_cd(struct mmc_host *mmc)
966 struct dw_mci_slot *slot = mmc_priv(mmc);
967 struct dw_mci *host = slot->host;
968 int gpio_cd = mmc_gpio_get_cd(mmc);
970 /* Use platform get_cd function, else try onboard card detect */
971 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
972 || !mmc_card_is_removable(mmc))) {
975 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
976 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
977 dev_info(&mmc->class_dev,
978 "card is polling.\n");
980 dev_info(&mmc->class_dev,
981 "card is non-removable.\n");
983 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
987 } else if (gpio_cd >= 0)
990 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
993 spin_lock_bh(&host->lock);
994 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
995 dev_dbg(&mmc->class_dev, "card is present\n");
997 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
998 dev_dbg(&mmc->class_dev, "card is not present\n");
999 spin_unlock_bh(&host->lock);
1004 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
1006 unsigned int blksz = data->blksz;
1007 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
1008 u32 fifo_width = 1 << host->data_shift;
1009 u32 blksz_depth = blksz / fifo_width, fifoth_val;
1010 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
1011 int idx = ARRAY_SIZE(mszs) - 1;
1013 /* pio should ship this scenario */
1017 tx_wmark = (host->fifo_depth) / 2;
1018 tx_wmark_invers = host->fifo_depth - tx_wmark;
1022 * if blksz is not a multiple of the FIFO width
1024 if (blksz % fifo_width)
1028 if (!((blksz_depth % mszs[idx]) ||
1029 (tx_wmark_invers % mszs[idx]))) {
1031 rx_wmark = mszs[idx] - 1;
1034 } while (--idx > 0);
1036 * If idx is '0', it won't be tried
1037 * Thus, initial values are uesed
1040 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1041 mci_writel(host, FIFOTH, fifoth_val);
1044 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1046 unsigned int blksz = data->blksz;
1047 u32 blksz_depth, fifo_depth;
1052 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1053 * in the FIFO region, so we really shouldn't access it).
1055 if (host->verid < DW_MMC_240A ||
1056 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1060 * Card write Threshold is introduced since 2.80a
1061 * It's used when HS400 mode is enabled.
1063 if (data->flags & MMC_DATA_WRITE &&
1064 host->timing != MMC_TIMING_MMC_HS400)
1067 if (data->flags & MMC_DATA_WRITE)
1068 enable = SDMMC_CARD_WR_THR_EN;
1070 enable = SDMMC_CARD_RD_THR_EN;
1072 if (host->timing != MMC_TIMING_MMC_HS200 &&
1073 host->timing != MMC_TIMING_UHS_SDR104 &&
1074 host->timing != MMC_TIMING_MMC_HS400)
1077 blksz_depth = blksz / (1 << host->data_shift);
1078 fifo_depth = host->fifo_depth;
1080 if (blksz_depth > fifo_depth)
1084 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1085 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1086 * Currently just choose blksz.
1089 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1093 mci_writel(host, CDTHRCTL, 0);
1096 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1098 unsigned long irqflags;
1102 host->using_dma = 0;
1104 /* If we don't have a channel, we can't do DMA */
1108 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1110 host->dma_ops->stop(host);
1114 host->using_dma = 1;
1116 if (host->use_dma == TRANS_MODE_IDMAC)
1118 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1119 (unsigned long)host->sg_cpu,
1120 (unsigned long)host->sg_dma,
1124 * Decide the MSIZE and RX/TX Watermark.
1125 * If current block size is same with previous size,
1126 * no need to update fifoth.
1128 if (host->prev_blksz != data->blksz)
1129 dw_mci_adjust_fifoth(host, data);
1131 /* Enable the DMA interface */
1132 temp = mci_readl(host, CTRL);
1133 temp |= SDMMC_CTRL_DMA_ENABLE;
1134 mci_writel(host, CTRL, temp);
1136 /* Disable RX/TX IRQs, let DMA handle it */
1137 spin_lock_irqsave(&host->irq_lock, irqflags);
1138 temp = mci_readl(host, INTMASK);
1139 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1140 mci_writel(host, INTMASK, temp);
1141 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1143 if (host->dma_ops->start(host, sg_len)) {
1144 host->dma_ops->stop(host);
1145 /* We can't do DMA, try PIO for this one */
1147 "%s: fall back to PIO mode for current transfer\n",
1155 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1157 unsigned long irqflags;
1158 int flags = SG_MITER_ATOMIC;
1161 data->error = -EINPROGRESS;
1163 WARN_ON(host->data);
1167 if (data->flags & MMC_DATA_READ)
1168 host->dir_status = DW_MCI_RECV_STATUS;
1170 host->dir_status = DW_MCI_SEND_STATUS;
1172 dw_mci_ctrl_thld(host, data);
1174 if (dw_mci_submit_data_dma(host, data)) {
1175 if (host->data->flags & MMC_DATA_READ)
1176 flags |= SG_MITER_TO_SG;
1178 flags |= SG_MITER_FROM_SG;
1180 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1181 host->sg = data->sg;
1182 host->part_buf_start = 0;
1183 host->part_buf_count = 0;
1185 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1187 spin_lock_irqsave(&host->irq_lock, irqflags);
1188 temp = mci_readl(host, INTMASK);
1189 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1190 mci_writel(host, INTMASK, temp);
1191 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1193 temp = mci_readl(host, CTRL);
1194 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1195 mci_writel(host, CTRL, temp);
1198 * Use the initial fifoth_val for PIO mode. If wm_algined
1199 * is set, we set watermark same as data size.
1200 * If next issued data may be transfered by DMA mode,
1201 * prev_blksz should be invalidated.
1203 if (host->wm_aligned)
1204 dw_mci_adjust_fifoth(host, data);
1206 mci_writel(host, FIFOTH, host->fifoth_val);
1207 host->prev_blksz = 0;
1210 * Keep the current block size.
1211 * It will be used to decide whether to update
1212 * fifoth register next time.
1214 host->prev_blksz = data->blksz;
1218 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1220 struct dw_mci *host = slot->host;
1221 unsigned int clock = slot->clock;
1224 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1226 /* We must continue to set bit 28 in CMD until the change is complete */
1227 if (host->state == STATE_WAITING_CMD11_DONE)
1228 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1230 slot->mmc->actual_clock = 0;
1233 mci_writel(host, CLKENA, 0);
1234 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1235 } else if (clock != host->current_speed || force_clkinit) {
1236 div = host->bus_hz / clock;
1237 if (host->bus_hz % clock && host->bus_hz > clock)
1239 * move the + 1 after the divide to prevent
1240 * over-clocking the card.
1244 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1246 if ((clock != slot->__clk_old &&
1247 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1249 /* Silent the verbose log if calling from PM context */
1251 dev_info(&slot->mmc->class_dev,
1252 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1253 slot->id, host->bus_hz, clock,
1254 div ? ((host->bus_hz / div) >> 1) :
1258 * If card is polling, display the message only
1259 * one time at boot time.
1261 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1262 slot->mmc->f_min == clock)
1263 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1267 mci_writel(host, CLKENA, 0);
1268 mci_writel(host, CLKSRC, 0);
1271 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1273 /* set clock to desired speed */
1274 mci_writel(host, CLKDIV, div);
1277 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1279 /* enable clock; only low power if no SDIO */
1280 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1281 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1282 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1283 mci_writel(host, CLKENA, clk_en_a);
1286 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1288 /* keep the last clock value that was requested from core */
1289 slot->__clk_old = clock;
1290 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1294 host->current_speed = clock;
1296 /* Set the current slot bus width */
1297 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1300 static void __dw_mci_start_request(struct dw_mci *host,
1301 struct dw_mci_slot *slot,
1302 struct mmc_command *cmd)
1304 struct mmc_request *mrq;
1305 struct mmc_data *data;
1312 host->pending_events = 0;
1313 host->completed_events = 0;
1314 host->cmd_status = 0;
1315 host->data_status = 0;
1316 host->dir_status = 0;
1320 mci_writel(host, TMOUT, 0xFFFFFFFF);
1321 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1322 mci_writel(host, BLKSIZ, data->blksz);
1325 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1327 /* this is the first command, send the initialization clock */
1328 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1329 cmdflags |= SDMMC_CMD_INIT;
1332 dw_mci_submit_data(host, data);
1333 wmb(); /* drain writebuffer */
1336 dw_mci_start_command(host, cmd, cmdflags);
1338 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1339 unsigned long irqflags;
1342 * Databook says to fail after 2ms w/ no response, but evidence
1343 * shows that sometimes the cmd11 interrupt takes over 130ms.
1344 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1345 * is just about to roll over.
1347 * We do this whole thing under spinlock and only if the
1348 * command hasn't already completed (indicating the the irq
1349 * already ran so we don't want the timeout).
1351 spin_lock_irqsave(&host->irq_lock, irqflags);
1352 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1353 mod_timer(&host->cmd11_timer,
1354 jiffies + msecs_to_jiffies(500) + 1);
1355 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1358 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1361 static void dw_mci_start_request(struct dw_mci *host,
1362 struct dw_mci_slot *slot)
1364 struct mmc_request *mrq = slot->mrq;
1365 struct mmc_command *cmd;
1367 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1368 __dw_mci_start_request(host, slot, cmd);
1371 /* must be called with host->lock held */
1372 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1373 struct mmc_request *mrq)
1375 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1380 if (host->state == STATE_WAITING_CMD11_DONE) {
1381 dev_warn(&slot->mmc->class_dev,
1382 "Voltage change didn't complete\n");
1384 * this case isn't expected to happen, so we can
1385 * either crash here or just try to continue on
1386 * in the closest possible state
1388 host->state = STATE_IDLE;
1391 if (host->state == STATE_IDLE) {
1392 host->state = STATE_SENDING_CMD;
1393 dw_mci_start_request(host, slot);
1395 list_add_tail(&slot->queue_node, &host->queue);
1399 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1401 struct dw_mci_slot *slot = mmc_priv(mmc);
1402 struct dw_mci *host = slot->host;
1407 * The check for card presence and queueing of the request must be
1408 * atomic, otherwise the card could be removed in between and the
1409 * request wouldn't fail until another card was inserted.
1412 if (!dw_mci_get_cd(mmc)) {
1413 mrq->cmd->error = -ENOMEDIUM;
1414 mmc_request_done(mmc, mrq);
1418 spin_lock_bh(&host->lock);
1420 dw_mci_queue_request(host, slot, mrq);
1422 spin_unlock_bh(&host->lock);
1425 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1427 struct dw_mci_slot *slot = mmc_priv(mmc);
1428 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1432 switch (ios->bus_width) {
1433 case MMC_BUS_WIDTH_4:
1434 slot->ctype = SDMMC_CTYPE_4BIT;
1436 case MMC_BUS_WIDTH_8:
1437 slot->ctype = SDMMC_CTYPE_8BIT;
1440 /* set default 1 bit mode */
1441 slot->ctype = SDMMC_CTYPE_1BIT;
1444 regs = mci_readl(slot->host, UHS_REG);
1447 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1448 ios->timing == MMC_TIMING_UHS_DDR50 ||
1449 ios->timing == MMC_TIMING_MMC_HS400)
1450 regs |= ((0x1 << slot->id) << 16);
1452 regs &= ~((0x1 << slot->id) << 16);
1454 mci_writel(slot->host, UHS_REG, regs);
1455 slot->host->timing = ios->timing;
1458 * Use mirror of ios->clock to prevent race with mmc
1459 * core ios update when finding the minimum.
1461 slot->clock = ios->clock;
1463 if (drv_data && drv_data->set_ios)
1464 drv_data->set_ios(slot->host, ios);
1466 switch (ios->power_mode) {
1468 if (!IS_ERR(mmc->supply.vmmc)) {
1469 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1472 dev_err(slot->host->dev,
1473 "failed to enable vmmc regulator\n");
1474 /*return, if failed turn on vmmc*/
1478 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1479 regs = mci_readl(slot->host, PWREN);
1480 regs |= (1 << slot->id);
1481 mci_writel(slot->host, PWREN, regs);
1484 if (!slot->host->vqmmc_enabled) {
1485 if (!IS_ERR(mmc->supply.vqmmc)) {
1486 ret = regulator_enable(mmc->supply.vqmmc);
1488 dev_err(slot->host->dev,
1489 "failed to enable vqmmc\n");
1491 slot->host->vqmmc_enabled = true;
1494 /* Keep track so we don't reset again */
1495 slot->host->vqmmc_enabled = true;
1498 /* Reset our state machine after powering on */
1499 dw_mci_ctrl_reset(slot->host,
1500 SDMMC_CTRL_ALL_RESET_FLAGS);
1503 /* Adjust clock / bus width after power is up */
1504 dw_mci_setup_bus(slot, false);
1508 /* Turn clock off before power goes down */
1509 dw_mci_setup_bus(slot, false);
1511 if (!IS_ERR(mmc->supply.vmmc))
1512 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1514 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1515 regulator_disable(mmc->supply.vqmmc);
1516 slot->host->vqmmc_enabled = false;
1518 regs = mci_readl(slot->host, PWREN);
1519 regs &= ~(1 << slot->id);
1520 mci_writel(slot->host, PWREN, regs);
1526 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1527 slot->host->state = STATE_IDLE;
1530 static int dw_mci_card_busy(struct mmc_host *mmc)
1532 struct dw_mci_slot *slot = mmc_priv(mmc);
1536 * Check the busy bit which is low when DAT[3:0]
1537 * (the data lines) are 0000
1539 status = mci_readl(slot->host, STATUS);
1541 return !!(status & SDMMC_STATUS_BUSY);
1544 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1546 struct dw_mci_slot *slot = mmc_priv(mmc);
1547 struct dw_mci *host = slot->host;
1548 const struct dw_mci_drv_data *drv_data = host->drv_data;
1550 u32 v18 = SDMMC_UHS_18V << slot->id;
1553 if (drv_data && drv_data->switch_voltage)
1554 return drv_data->switch_voltage(mmc, ios);
1557 * Program the voltage. Note that some instances of dw_mmc may use
1558 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1559 * does no harm but you need to set the regulator directly. Try both.
1561 uhs = mci_readl(host, UHS_REG);
1562 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1567 if (!IS_ERR(mmc->supply.vqmmc)) {
1568 ret = mmc_regulator_set_vqmmc(mmc, ios);
1571 dev_dbg(&mmc->class_dev,
1572 "Regulator set error %d - %s V\n",
1573 ret, uhs & v18 ? "1.8" : "3.3");
1577 mci_writel(host, UHS_REG, uhs);
1582 static int dw_mci_get_ro(struct mmc_host *mmc)
1585 struct dw_mci_slot *slot = mmc_priv(mmc);
1586 int gpio_ro = mmc_gpio_get_ro(mmc);
1588 /* Use platform get_ro function, else try on board write protect */
1590 read_only = gpio_ro;
1593 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1595 dev_dbg(&mmc->class_dev, "card is %s\n",
1596 read_only ? "read-only" : "read-write");
1601 static void dw_mci_hw_reset(struct mmc_host *mmc)
1603 struct dw_mci_slot *slot = mmc_priv(mmc);
1604 struct dw_mci *host = slot->host;
1607 if (host->use_dma == TRANS_MODE_IDMAC)
1608 dw_mci_idmac_reset(host);
1610 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1611 SDMMC_CTRL_FIFO_RESET))
1615 * According to eMMC spec, card reset procedure:
1616 * tRstW >= 1us: RST_n pulse width
1617 * tRSCA >= 200us: RST_n to Command time
1618 * tRSTH >= 1us: RST_n high period
1620 reset = mci_readl(host, RST_N);
1621 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1622 mci_writel(host, RST_N, reset);
1624 reset |= SDMMC_RST_HWACTIVE << slot->id;
1625 mci_writel(host, RST_N, reset);
1626 usleep_range(200, 300);
1629 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1631 struct dw_mci_slot *slot = mmc_priv(mmc);
1632 struct dw_mci *host = slot->host;
1635 * Low power mode will stop the card clock when idle. According to the
1636 * description of the CLKENA register we should disable low power mode
1637 * for SDIO cards if we need SDIO interrupts to work.
1639 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1640 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1644 clk_en_a_old = mci_readl(host, CLKENA);
1646 if (card->type == MMC_TYPE_SDIO ||
1647 card->type == MMC_TYPE_SD_COMBO) {
1648 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1649 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1651 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1652 clk_en_a = clk_en_a_old | clken_low_pwr;
1655 if (clk_en_a != clk_en_a_old) {
1656 mci_writel(host, CLKENA, clk_en_a);
1657 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1658 SDMMC_CMD_PRV_DAT_WAIT, 0);
1663 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1665 struct dw_mci *host = slot->host;
1666 unsigned long irqflags;
1669 spin_lock_irqsave(&host->irq_lock, irqflags);
1671 /* Enable/disable Slot Specific SDIO interrupt */
1672 int_mask = mci_readl(host, INTMASK);
1674 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1676 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1677 mci_writel(host, INTMASK, int_mask);
1679 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1682 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1684 struct dw_mci_slot *slot = mmc_priv(mmc);
1685 struct dw_mci *host = slot->host;
1687 __dw_mci_enable_sdio_irq(slot, enb);
1689 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1691 pm_runtime_get_noresume(host->dev);
1693 pm_runtime_put_noidle(host->dev);
1696 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1698 struct dw_mci_slot *slot = mmc_priv(mmc);
1700 __dw_mci_enable_sdio_irq(slot, 1);
1703 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1705 struct dw_mci_slot *slot = mmc_priv(mmc);
1706 struct dw_mci *host = slot->host;
1707 const struct dw_mci_drv_data *drv_data = host->drv_data;
1710 if (drv_data && drv_data->execute_tuning)
1711 err = drv_data->execute_tuning(slot, opcode);
1715 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1716 struct mmc_ios *ios)
1718 struct dw_mci_slot *slot = mmc_priv(mmc);
1719 struct dw_mci *host = slot->host;
1720 const struct dw_mci_drv_data *drv_data = host->drv_data;
1722 if (drv_data && drv_data->prepare_hs400_tuning)
1723 return drv_data->prepare_hs400_tuning(host, ios);
1728 static bool dw_mci_reset(struct dw_mci *host)
1730 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1735 * Resetting generates a block interrupt, hence setting
1736 * the scatter-gather pointer to NULL.
1739 sg_miter_stop(&host->sg_miter);
1744 flags |= SDMMC_CTRL_DMA_RESET;
1746 if (dw_mci_ctrl_reset(host, flags)) {
1748 * In all cases we clear the RAWINTS
1749 * register to clear any interrupts.
1751 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1753 if (!host->use_dma) {
1758 /* Wait for dma_req to be cleared */
1759 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1761 !(status & SDMMC_STATUS_DMA_REQ),
1762 1, 500 * USEC_PER_MSEC)) {
1764 "%s: Timeout waiting for dma_req to be cleared\n",
1769 /* when using DMA next we reset the fifo again */
1770 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1773 /* if the controller reset bit did clear, then set clock regs */
1774 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1776 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1782 if (host->use_dma == TRANS_MODE_IDMAC)
1783 /* It is also required that we reinit idmac */
1784 dw_mci_idmac_init(host);
1789 /* After a CTRL reset we need to have CIU set clock registers */
1790 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1795 static const struct mmc_host_ops dw_mci_ops = {
1796 .request = dw_mci_request,
1797 .pre_req = dw_mci_pre_req,
1798 .post_req = dw_mci_post_req,
1799 .set_ios = dw_mci_set_ios,
1800 .get_ro = dw_mci_get_ro,
1801 .get_cd = dw_mci_get_cd,
1802 .hw_reset = dw_mci_hw_reset,
1803 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1804 .ack_sdio_irq = dw_mci_ack_sdio_irq,
1805 .execute_tuning = dw_mci_execute_tuning,
1806 .card_busy = dw_mci_card_busy,
1807 .start_signal_voltage_switch = dw_mci_switch_voltage,
1808 .init_card = dw_mci_init_card,
1809 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1812 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1813 __releases(&host->lock)
1814 __acquires(&host->lock)
1816 struct dw_mci_slot *slot;
1817 struct mmc_host *prev_mmc = host->slot->mmc;
1819 WARN_ON(host->cmd || host->data);
1821 host->slot->mrq = NULL;
1823 if (!list_empty(&host->queue)) {
1824 slot = list_entry(host->queue.next,
1825 struct dw_mci_slot, queue_node);
1826 list_del(&slot->queue_node);
1827 dev_vdbg(host->dev, "list not empty: %s is next\n",
1828 mmc_hostname(slot->mmc));
1829 host->state = STATE_SENDING_CMD;
1830 dw_mci_start_request(host, slot);
1832 dev_vdbg(host->dev, "list empty\n");
1834 if (host->state == STATE_SENDING_CMD11)
1835 host->state = STATE_WAITING_CMD11_DONE;
1837 host->state = STATE_IDLE;
1840 spin_unlock(&host->lock);
1841 mmc_request_done(prev_mmc, mrq);
1842 spin_lock(&host->lock);
1845 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1847 u32 status = host->cmd_status;
1849 host->cmd_status = 0;
1851 /* Read the response from the card (up to 16 bytes) */
1852 if (cmd->flags & MMC_RSP_PRESENT) {
1853 if (cmd->flags & MMC_RSP_136) {
1854 cmd->resp[3] = mci_readl(host, RESP0);
1855 cmd->resp[2] = mci_readl(host, RESP1);
1856 cmd->resp[1] = mci_readl(host, RESP2);
1857 cmd->resp[0] = mci_readl(host, RESP3);
1859 cmd->resp[0] = mci_readl(host, RESP0);
1866 if (status & SDMMC_INT_RTO)
1867 cmd->error = -ETIMEDOUT;
1868 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1869 cmd->error = -EILSEQ;
1870 else if (status & SDMMC_INT_RESP_ERR)
1878 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1880 u32 status = host->data_status;
1882 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1883 if (status & SDMMC_INT_DRTO) {
1884 data->error = -ETIMEDOUT;
1885 } else if (status & SDMMC_INT_DCRC) {
1886 data->error = -EILSEQ;
1887 } else if (status & SDMMC_INT_EBE) {
1888 if (host->dir_status ==
1889 DW_MCI_SEND_STATUS) {
1891 * No data CRC status was returned.
1892 * The number of bytes transferred
1893 * will be exaggerated in PIO mode.
1895 data->bytes_xfered = 0;
1896 data->error = -ETIMEDOUT;
1897 } else if (host->dir_status ==
1898 DW_MCI_RECV_STATUS) {
1899 data->error = -EILSEQ;
1902 /* SDMMC_INT_SBE is included */
1903 data->error = -EILSEQ;
1906 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1909 * After an error, there may be data lingering
1914 data->bytes_xfered = data->blocks * data->blksz;
1921 static void dw_mci_set_drto(struct dw_mci *host)
1923 unsigned int drto_clks;
1924 unsigned int drto_div;
1925 unsigned int drto_ms;
1926 unsigned long irqflags;
1928 drto_clks = mci_readl(host, TMOUT) >> 8;
1929 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1933 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1936 /* add a bit spare time */
1939 spin_lock_irqsave(&host->irq_lock, irqflags);
1940 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1941 mod_timer(&host->dto_timer,
1942 jiffies + msecs_to_jiffies(drto_ms));
1943 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1946 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
1948 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1952 * Really be certain that the timer has stopped. This is a bit of
1953 * paranoia and could only really happen if we had really bad
1954 * interrupt latency and the interrupt routine and timeout were
1955 * running concurrently so that the del_timer() in the interrupt
1956 * handler couldn't run.
1958 WARN_ON(del_timer_sync(&host->cto_timer));
1959 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1964 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
1966 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1969 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
1970 WARN_ON(del_timer_sync(&host->dto_timer));
1971 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1976 static void dw_mci_tasklet_func(unsigned long priv)
1978 struct dw_mci *host = (struct dw_mci *)priv;
1979 struct mmc_data *data;
1980 struct mmc_command *cmd;
1981 struct mmc_request *mrq;
1982 enum dw_mci_state state;
1983 enum dw_mci_state prev_state;
1986 spin_lock(&host->lock);
1988 state = host->state;
1997 case STATE_WAITING_CMD11_DONE:
2000 case STATE_SENDING_CMD11:
2001 case STATE_SENDING_CMD:
2002 if (!dw_mci_clear_pending_cmd_complete(host))
2007 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2008 err = dw_mci_command_complete(host, cmd);
2009 if (cmd == mrq->sbc && !err) {
2010 __dw_mci_start_request(host, host->slot,
2015 if (cmd->data && err) {
2017 * During UHS tuning sequence, sending the stop
2018 * command after the response CRC error would
2019 * throw the system into a confused state
2020 * causing all future tuning phases to report
2023 * In such case controller will move into a data
2024 * transfer state after a response error or
2025 * response CRC error. Let's let that finish
2026 * before trying to send a stop, so we'll go to
2027 * STATE_SENDING_DATA.
2029 * Although letting the data transfer take place
2030 * will waste a bit of time (we already know
2031 * the command was bad), it can't cause any
2032 * errors since it's possible it would have
2033 * taken place anyway if this tasklet got
2034 * delayed. Allowing the transfer to take place
2035 * avoids races and keeps things simple.
2037 if ((err != -ETIMEDOUT) &&
2038 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
2039 state = STATE_SENDING_DATA;
2043 dw_mci_stop_dma(host);
2044 send_stop_abort(host, data);
2045 state = STATE_SENDING_STOP;
2049 if (!cmd->data || err) {
2050 dw_mci_request_end(host, mrq);
2054 prev_state = state = STATE_SENDING_DATA;
2057 case STATE_SENDING_DATA:
2059 * We could get a data error and never a transfer
2060 * complete so we'd better check for it here.
2062 * Note that we don't really care if we also got a
2063 * transfer complete; stopping the DMA and sending an
2066 if (test_and_clear_bit(EVENT_DATA_ERROR,
2067 &host->pending_events)) {
2068 dw_mci_stop_dma(host);
2069 if (!(host->data_status & (SDMMC_INT_DRTO |
2071 send_stop_abort(host, data);
2072 state = STATE_DATA_ERROR;
2076 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2077 &host->pending_events)) {
2079 * If all data-related interrupts don't come
2080 * within the given time in reading data state.
2082 if (host->dir_status == DW_MCI_RECV_STATUS)
2083 dw_mci_set_drto(host);
2087 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2090 * Handle an EVENT_DATA_ERROR that might have shown up
2091 * before the transfer completed. This might not have
2092 * been caught by the check above because the interrupt
2093 * could have gone off between the previous check and
2094 * the check for transfer complete.
2096 * Technically this ought not be needed assuming we
2097 * get a DATA_COMPLETE eventually (we'll notice the
2098 * error and end the request), but it shouldn't hurt.
2100 * This has the advantage of sending the stop command.
2102 if (test_and_clear_bit(EVENT_DATA_ERROR,
2103 &host->pending_events)) {
2104 dw_mci_stop_dma(host);
2105 if (!(host->data_status & (SDMMC_INT_DRTO |
2107 send_stop_abort(host, data);
2108 state = STATE_DATA_ERROR;
2111 prev_state = state = STATE_DATA_BUSY;
2115 case STATE_DATA_BUSY:
2116 if (!dw_mci_clear_pending_data_complete(host)) {
2118 * If data error interrupt comes but data over
2119 * interrupt doesn't come within the given time.
2120 * in reading data state.
2122 if (host->dir_status == DW_MCI_RECV_STATUS)
2123 dw_mci_set_drto(host);
2128 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2129 err = dw_mci_data_complete(host, data);
2132 if (!data->stop || mrq->sbc) {
2133 if (mrq->sbc && data->stop)
2134 data->stop->error = 0;
2135 dw_mci_request_end(host, mrq);
2139 /* stop command for open-ended transfer*/
2141 send_stop_abort(host, data);
2144 * If we don't have a command complete now we'll
2145 * never get one since we just reset everything;
2146 * better end the request.
2148 * If we do have a command complete we'll fall
2149 * through to the SENDING_STOP command and
2150 * everything will be peachy keen.
2152 if (!test_bit(EVENT_CMD_COMPLETE,
2153 &host->pending_events)) {
2155 dw_mci_request_end(host, mrq);
2161 * If err has non-zero,
2162 * stop-abort command has been already issued.
2164 prev_state = state = STATE_SENDING_STOP;
2168 case STATE_SENDING_STOP:
2169 if (!dw_mci_clear_pending_cmd_complete(host))
2172 /* CMD error in data command */
2173 if (mrq->cmd->error && mrq->data)
2179 if (!mrq->sbc && mrq->stop)
2180 dw_mci_command_complete(host, mrq->stop);
2182 host->cmd_status = 0;
2184 dw_mci_request_end(host, mrq);
2187 case STATE_DATA_ERROR:
2188 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2189 &host->pending_events))
2192 state = STATE_DATA_BUSY;
2195 } while (state != prev_state);
2197 host->state = state;
2199 spin_unlock(&host->lock);
2203 /* push final bytes to part_buf, only use during push */
2204 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2206 memcpy((void *)&host->part_buf, buf, cnt);
2207 host->part_buf_count = cnt;
2210 /* append bytes to part_buf, only use during push */
2211 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2213 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2214 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2215 host->part_buf_count += cnt;
2219 /* pull first bytes from part_buf, only use during pull */
2220 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2222 cnt = min_t(int, cnt, host->part_buf_count);
2224 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2226 host->part_buf_count -= cnt;
2227 host->part_buf_start += cnt;
2232 /* pull final bytes from the part_buf, assuming it's just been filled */
2233 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2235 memcpy(buf, &host->part_buf, cnt);
2236 host->part_buf_start = cnt;
2237 host->part_buf_count = (1 << host->data_shift) - cnt;
2240 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2242 struct mmc_data *data = host->data;
2245 /* try and push anything in the part_buf */
2246 if (unlikely(host->part_buf_count)) {
2247 int len = dw_mci_push_part_bytes(host, buf, cnt);
2251 if (host->part_buf_count == 2) {
2252 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2253 host->part_buf_count = 0;
2256 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2257 if (unlikely((unsigned long)buf & 0x1)) {
2259 u16 aligned_buf[64];
2260 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2261 int items = len >> 1;
2263 /* memcpy from input buffer into aligned buffer */
2264 memcpy(aligned_buf, buf, len);
2267 /* push data from aligned buffer into fifo */
2268 for (i = 0; i < items; ++i)
2269 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2276 for (; cnt >= 2; cnt -= 2)
2277 mci_fifo_writew(host->fifo_reg, *pdata++);
2280 /* put anything remaining in the part_buf */
2282 dw_mci_set_part_bytes(host, buf, cnt);
2283 /* Push data if we have reached the expected data length */
2284 if ((data->bytes_xfered + init_cnt) ==
2285 (data->blksz * data->blocks))
2286 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2290 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2292 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2293 if (unlikely((unsigned long)buf & 0x1)) {
2295 /* pull data from fifo into aligned buffer */
2296 u16 aligned_buf[64];
2297 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2298 int items = len >> 1;
2301 for (i = 0; i < items; ++i)
2302 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2303 /* memcpy from aligned buffer into output buffer */
2304 memcpy(buf, aligned_buf, len);
2313 for (; cnt >= 2; cnt -= 2)
2314 *pdata++ = mci_fifo_readw(host->fifo_reg);
2318 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2319 dw_mci_pull_final_bytes(host, buf, cnt);
2323 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2325 struct mmc_data *data = host->data;
2328 /* try and push anything in the part_buf */
2329 if (unlikely(host->part_buf_count)) {
2330 int len = dw_mci_push_part_bytes(host, buf, cnt);
2334 if (host->part_buf_count == 4) {
2335 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2336 host->part_buf_count = 0;
2339 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2340 if (unlikely((unsigned long)buf & 0x3)) {
2342 u32 aligned_buf[32];
2343 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2344 int items = len >> 2;
2346 /* memcpy from input buffer into aligned buffer */
2347 memcpy(aligned_buf, buf, len);
2350 /* push data from aligned buffer into fifo */
2351 for (i = 0; i < items; ++i)
2352 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2359 for (; cnt >= 4; cnt -= 4)
2360 mci_fifo_writel(host->fifo_reg, *pdata++);
2363 /* put anything remaining in the part_buf */
2365 dw_mci_set_part_bytes(host, buf, cnt);
2366 /* Push data if we have reached the expected data length */
2367 if ((data->bytes_xfered + init_cnt) ==
2368 (data->blksz * data->blocks))
2369 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2373 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2375 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2376 if (unlikely((unsigned long)buf & 0x3)) {
2378 /* pull data from fifo into aligned buffer */
2379 u32 aligned_buf[32];
2380 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2381 int items = len >> 2;
2384 for (i = 0; i < items; ++i)
2385 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2386 /* memcpy from aligned buffer into output buffer */
2387 memcpy(buf, aligned_buf, len);
2396 for (; cnt >= 4; cnt -= 4)
2397 *pdata++ = mci_fifo_readl(host->fifo_reg);
2401 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2402 dw_mci_pull_final_bytes(host, buf, cnt);
2406 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2408 struct mmc_data *data = host->data;
2411 /* try and push anything in the part_buf */
2412 if (unlikely(host->part_buf_count)) {
2413 int len = dw_mci_push_part_bytes(host, buf, cnt);
2418 if (host->part_buf_count == 8) {
2419 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2420 host->part_buf_count = 0;
2423 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2424 if (unlikely((unsigned long)buf & 0x7)) {
2426 u64 aligned_buf[16];
2427 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2428 int items = len >> 3;
2430 /* memcpy from input buffer into aligned buffer */
2431 memcpy(aligned_buf, buf, len);
2434 /* push data from aligned buffer into fifo */
2435 for (i = 0; i < items; ++i)
2436 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2443 for (; cnt >= 8; cnt -= 8)
2444 mci_fifo_writeq(host->fifo_reg, *pdata++);
2447 /* put anything remaining in the part_buf */
2449 dw_mci_set_part_bytes(host, buf, cnt);
2450 /* Push data if we have reached the expected data length */
2451 if ((data->bytes_xfered + init_cnt) ==
2452 (data->blksz * data->blocks))
2453 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2457 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2459 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2460 if (unlikely((unsigned long)buf & 0x7)) {
2462 /* pull data from fifo into aligned buffer */
2463 u64 aligned_buf[16];
2464 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2465 int items = len >> 3;
2468 for (i = 0; i < items; ++i)
2469 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2471 /* memcpy from aligned buffer into output buffer */
2472 memcpy(buf, aligned_buf, len);
2481 for (; cnt >= 8; cnt -= 8)
2482 *pdata++ = mci_fifo_readq(host->fifo_reg);
2486 host->part_buf = mci_fifo_readq(host->fifo_reg);
2487 dw_mci_pull_final_bytes(host, buf, cnt);
2491 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2495 /* get remaining partial bytes */
2496 len = dw_mci_pull_part_bytes(host, buf, cnt);
2497 if (unlikely(len == cnt))
2502 /* get the rest of the data */
2503 host->pull_data(host, buf, cnt);
2506 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2508 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2510 unsigned int offset;
2511 struct mmc_data *data = host->data;
2512 int shift = host->data_shift;
2515 unsigned int remain, fcnt;
2518 if (!sg_miter_next(sg_miter))
2521 host->sg = sg_miter->piter.sg;
2522 buf = sg_miter->addr;
2523 remain = sg_miter->length;
2527 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2528 << shift) + host->part_buf_count;
2529 len = min(remain, fcnt);
2532 dw_mci_pull_data(host, (void *)(buf + offset), len);
2533 data->bytes_xfered += len;
2538 sg_miter->consumed = offset;
2539 status = mci_readl(host, MINTSTS);
2540 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2541 /* if the RXDR is ready read again */
2542 } while ((status & SDMMC_INT_RXDR) ||
2543 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2546 if (!sg_miter_next(sg_miter))
2548 sg_miter->consumed = 0;
2550 sg_miter_stop(sg_miter);
2554 sg_miter_stop(sg_miter);
2556 smp_wmb(); /* drain writebuffer */
2557 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2560 static void dw_mci_write_data_pio(struct dw_mci *host)
2562 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2564 unsigned int offset;
2565 struct mmc_data *data = host->data;
2566 int shift = host->data_shift;
2569 unsigned int fifo_depth = host->fifo_depth;
2570 unsigned int remain, fcnt;
2573 if (!sg_miter_next(sg_miter))
2576 host->sg = sg_miter->piter.sg;
2577 buf = sg_miter->addr;
2578 remain = sg_miter->length;
2582 fcnt = ((fifo_depth -
2583 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2584 << shift) - host->part_buf_count;
2585 len = min(remain, fcnt);
2588 host->push_data(host, (void *)(buf + offset), len);
2589 data->bytes_xfered += len;
2594 sg_miter->consumed = offset;
2595 status = mci_readl(host, MINTSTS);
2596 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2597 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2600 if (!sg_miter_next(sg_miter))
2602 sg_miter->consumed = 0;
2604 sg_miter_stop(sg_miter);
2608 sg_miter_stop(sg_miter);
2610 smp_wmb(); /* drain writebuffer */
2611 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2614 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2616 del_timer(&host->cto_timer);
2618 if (!host->cmd_status)
2619 host->cmd_status = status;
2621 smp_wmb(); /* drain writebuffer */
2623 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2624 tasklet_schedule(&host->tasklet);
2627 static void dw_mci_handle_cd(struct dw_mci *host)
2629 struct dw_mci_slot *slot = host->slot;
2631 if (slot->mmc->ops->card_event)
2632 slot->mmc->ops->card_event(slot->mmc);
2633 mmc_detect_change(slot->mmc,
2634 msecs_to_jiffies(host->pdata->detect_delay_ms));
2637 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2639 struct dw_mci *host = dev_id;
2641 struct dw_mci_slot *slot = host->slot;
2642 unsigned long irqflags;
2644 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2647 /* Check volt switch first, since it can look like an error */
2648 if ((host->state == STATE_SENDING_CMD11) &&
2649 (pending & SDMMC_INT_VOLT_SWITCH)) {
2650 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2651 pending &= ~SDMMC_INT_VOLT_SWITCH;
2654 * Hold the lock; we know cmd11_timer can't be kicked
2655 * off after the lock is released, so safe to delete.
2657 spin_lock_irqsave(&host->irq_lock, irqflags);
2658 dw_mci_cmd_interrupt(host, pending);
2659 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2661 del_timer(&host->cmd11_timer);
2664 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2665 spin_lock_irqsave(&host->irq_lock, irqflags);
2667 del_timer(&host->cto_timer);
2668 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2669 host->cmd_status = pending;
2670 smp_wmb(); /* drain writebuffer */
2671 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2673 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2676 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2677 /* if there is an error report DATA_ERROR */
2678 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2679 host->data_status = pending;
2680 smp_wmb(); /* drain writebuffer */
2681 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2682 tasklet_schedule(&host->tasklet);
2685 if (pending & SDMMC_INT_DATA_OVER) {
2686 spin_lock_irqsave(&host->irq_lock, irqflags);
2688 del_timer(&host->dto_timer);
2690 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2691 if (!host->data_status)
2692 host->data_status = pending;
2693 smp_wmb(); /* drain writebuffer */
2694 if (host->dir_status == DW_MCI_RECV_STATUS) {
2695 if (host->sg != NULL)
2696 dw_mci_read_data_pio(host, true);
2698 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2699 tasklet_schedule(&host->tasklet);
2701 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2704 if (pending & SDMMC_INT_RXDR) {
2705 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2706 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2707 dw_mci_read_data_pio(host, false);
2710 if (pending & SDMMC_INT_TXDR) {
2711 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2712 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2713 dw_mci_write_data_pio(host);
2716 if (pending & SDMMC_INT_CMD_DONE) {
2717 spin_lock_irqsave(&host->irq_lock, irqflags);
2719 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2720 dw_mci_cmd_interrupt(host, pending);
2722 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2725 if (pending & SDMMC_INT_CD) {
2726 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2727 dw_mci_handle_cd(host);
2730 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2731 mci_writel(host, RINTSTS,
2732 SDMMC_INT_SDIO(slot->sdio_id));
2733 __dw_mci_enable_sdio_irq(slot, 0);
2734 sdio_signal_irq(slot->mmc);
2739 if (host->use_dma != TRANS_MODE_IDMAC)
2742 /* Handle IDMA interrupts */
2743 if (host->dma_64bit_address == 1) {
2744 pending = mci_readl(host, IDSTS64);
2745 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2746 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2747 SDMMC_IDMAC_INT_RI);
2748 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2749 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2750 host->dma_ops->complete((void *)host);
2753 pending = mci_readl(host, IDSTS);
2754 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2755 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2756 SDMMC_IDMAC_INT_RI);
2757 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2758 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2759 host->dma_ops->complete((void *)host);
2766 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2768 struct dw_mci *host = slot->host;
2769 const struct dw_mci_drv_data *drv_data = host->drv_data;
2770 struct mmc_host *mmc = slot->mmc;
2773 if (host->pdata->caps)
2774 mmc->caps = host->pdata->caps;
2777 * Support MMC_CAP_ERASE by default.
2778 * It needs to use trim/discard/erase commands.
2780 mmc->caps |= MMC_CAP_ERASE;
2782 if (host->pdata->pm_caps)
2783 mmc->pm_caps = host->pdata->pm_caps;
2785 if (host->dev->of_node) {
2786 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2790 ctrl_id = to_platform_device(host->dev)->id;
2793 if (drv_data && drv_data->caps) {
2794 if (ctrl_id >= drv_data->num_caps) {
2795 dev_err(host->dev, "invalid controller id %d\n",
2799 mmc->caps |= drv_data->caps[ctrl_id];
2802 if (host->pdata->caps2)
2803 mmc->caps2 = host->pdata->caps2;
2805 mmc->f_min = DW_MCI_FREQ_MIN;
2807 mmc->f_max = DW_MCI_FREQ_MAX;
2809 /* Process SDIO IRQs through the sdio_irq_work. */
2810 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2811 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2816 static int dw_mci_init_slot(struct dw_mci *host)
2818 struct mmc_host *mmc;
2819 struct dw_mci_slot *slot;
2822 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2826 slot = mmc_priv(mmc);
2828 slot->sdio_id = host->sdio_id0 + slot->id;
2833 mmc->ops = &dw_mci_ops;
2835 /*if there are external regulators, get them*/
2836 ret = mmc_regulator_get_supply(mmc);
2838 goto err_host_allocated;
2840 if (!mmc->ocr_avail)
2841 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2843 ret = mmc_of_parse(mmc);
2845 goto err_host_allocated;
2847 ret = dw_mci_init_slot_caps(slot);
2849 goto err_host_allocated;
2851 /* Useful defaults if platform data is unset. */
2852 if (host->use_dma == TRANS_MODE_IDMAC) {
2853 mmc->max_segs = host->ring_size;
2854 mmc->max_blk_size = 65535;
2855 mmc->max_seg_size = 0x1000;
2856 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2857 mmc->max_blk_count = mmc->max_req_size / 512;
2858 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2860 mmc->max_blk_size = 65535;
2861 mmc->max_blk_count = 65535;
2863 mmc->max_blk_size * mmc->max_blk_count;
2864 mmc->max_seg_size = mmc->max_req_size;
2866 /* TRANS_MODE_PIO */
2868 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2869 mmc->max_blk_count = 512;
2870 mmc->max_req_size = mmc->max_blk_size *
2872 mmc->max_seg_size = mmc->max_req_size;
2877 ret = mmc_add_host(mmc);
2879 goto err_host_allocated;
2881 #if defined(CONFIG_DEBUG_FS)
2882 dw_mci_init_debugfs(slot);
2892 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2894 /* Debugfs stuff is cleaned up by mmc core */
2895 mmc_remove_host(slot->mmc);
2896 slot->host->slot = NULL;
2897 mmc_free_host(slot->mmc);
2900 static void dw_mci_init_dma(struct dw_mci *host)
2903 struct device *dev = host->dev;
2906 * Check tansfer mode from HCON[17:16]
2907 * Clear the ambiguous description of dw_mmc databook:
2908 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2909 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2910 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2911 * 2b'11: Non DW DMA Interface -> pio only
2912 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2913 * simpler request/acknowledge handshake mechanism and both of them
2914 * are regarded as external dma master for dw_mmc.
2916 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2917 if (host->use_dma == DMA_INTERFACE_IDMA) {
2918 host->use_dma = TRANS_MODE_IDMAC;
2919 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2920 host->use_dma == DMA_INTERFACE_GDMA) {
2921 host->use_dma = TRANS_MODE_EDMAC;
2926 /* Determine which DMA interface to use */
2927 if (host->use_dma == TRANS_MODE_IDMAC) {
2929 * Check ADDR_CONFIG bit in HCON to find
2930 * IDMAC address bus width
2932 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2934 if (addr_config == 1) {
2935 /* host supports IDMAC in 64-bit address mode */
2936 host->dma_64bit_address = 1;
2938 "IDMAC supports 64-bit address mode.\n");
2939 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2940 dma_set_coherent_mask(host->dev,
2943 /* host supports IDMAC in 32-bit address mode */
2944 host->dma_64bit_address = 0;
2946 "IDMAC supports 32-bit address mode.\n");
2949 /* Alloc memory for sg translation */
2950 host->sg_cpu = dmam_alloc_coherent(host->dev,
2952 &host->sg_dma, GFP_KERNEL);
2953 if (!host->sg_cpu) {
2955 "%s: could not alloc DMA memory\n",
2960 host->dma_ops = &dw_mci_idmac_ops;
2961 dev_info(host->dev, "Using internal DMA controller.\n");
2963 /* TRANS_MODE_EDMAC: check dma bindings again */
2964 if ((device_property_read_string_array(dev, "dma-names",
2966 !device_property_present(dev, "dmas")) {
2969 host->dma_ops = &dw_mci_edmac_ops;
2970 dev_info(host->dev, "Using external DMA controller.\n");
2973 if (host->dma_ops->init && host->dma_ops->start &&
2974 host->dma_ops->stop && host->dma_ops->cleanup) {
2975 if (host->dma_ops->init(host)) {
2976 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2981 dev_err(host->dev, "DMA initialization not found.\n");
2988 dev_info(host->dev, "Using PIO mode.\n");
2989 host->use_dma = TRANS_MODE_PIO;
2992 static void dw_mci_cmd11_timer(struct timer_list *t)
2994 struct dw_mci *host = from_timer(host, t, cmd11_timer);
2996 if (host->state != STATE_SENDING_CMD11) {
2997 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3001 host->cmd_status = SDMMC_INT_RTO;
3002 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3003 tasklet_schedule(&host->tasklet);
3006 static void dw_mci_cto_timer(struct timer_list *t)
3008 struct dw_mci *host = from_timer(host, t, cto_timer);
3009 unsigned long irqflags;
3012 spin_lock_irqsave(&host->irq_lock, irqflags);
3015 * If somehow we have very bad interrupt latency it's remotely possible
3016 * that the timer could fire while the interrupt is still pending or
3017 * while the interrupt is midway through running. Let's be paranoid
3018 * and detect those two cases. Note that this is paranoia is somewhat
3019 * justified because in this function we don't actually cancel the
3020 * pending command in the controller--we just assume it will never come.
3022 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3023 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3024 /* The interrupt should fire; no need to act but we can warn */
3025 dev_warn(host->dev, "Unexpected interrupt latency\n");
3028 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3029 /* Presumably interrupt handler couldn't delete the timer */
3030 dev_warn(host->dev, "CTO timeout when already completed\n");
3035 * Continued paranoia to make sure we're in the state we expect.
3036 * This paranoia isn't really justified but it seems good to be safe.
3038 switch (host->state) {
3039 case STATE_SENDING_CMD11:
3040 case STATE_SENDING_CMD:
3041 case STATE_SENDING_STOP:
3043 * If CMD_DONE interrupt does NOT come in sending command
3044 * state, we should notify the driver to terminate current
3045 * transfer and report a command timeout to the core.
3047 host->cmd_status = SDMMC_INT_RTO;
3048 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3049 tasklet_schedule(&host->tasklet);
3052 dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3058 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3061 static void dw_mci_dto_timer(struct timer_list *t)
3063 struct dw_mci *host = from_timer(host, t, dto_timer);
3064 unsigned long irqflags;
3067 spin_lock_irqsave(&host->irq_lock, irqflags);
3070 * The DTO timer is much longer than the CTO timer, so it's even less
3071 * likely that we'll these cases, but it pays to be paranoid.
3073 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3074 if (pending & SDMMC_INT_DATA_OVER) {
3075 /* The interrupt should fire; no need to act but we can warn */
3076 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3079 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3080 /* Presumably interrupt handler couldn't delete the timer */
3081 dev_warn(host->dev, "DTO timeout when already completed\n");
3086 * Continued paranoia to make sure we're in the state we expect.
3087 * This paranoia isn't really justified but it seems good to be safe.
3089 switch (host->state) {
3090 case STATE_SENDING_DATA:
3091 case STATE_DATA_BUSY:
3093 * If DTO interrupt does NOT come in sending data state,
3094 * we should notify the driver to terminate current transfer
3095 * and report a data timeout to the core.
3097 host->data_status = SDMMC_INT_DRTO;
3098 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3099 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3100 tasklet_schedule(&host->tasklet);
3103 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3109 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3113 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3115 struct dw_mci_board *pdata;
3116 struct device *dev = host->dev;
3117 const struct dw_mci_drv_data *drv_data = host->drv_data;
3119 u32 clock_frequency;
3121 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3123 return ERR_PTR(-ENOMEM);
3125 /* find reset controller when exist */
3126 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3127 if (IS_ERR(pdata->rstc)) {
3128 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
3129 return ERR_PTR(-EPROBE_DEFER);
3132 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3134 "fifo-depth property not found, using value of FIFOTH register as default\n");
3136 device_property_read_u32(dev, "card-detect-delay",
3137 &pdata->detect_delay_ms);
3139 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3141 if (device_property_present(dev, "fifo-watermark-aligned"))
3142 host->wm_aligned = true;
3144 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3145 pdata->bus_hz = clock_frequency;
3147 if (drv_data && drv_data->parse_dt) {
3148 ret = drv_data->parse_dt(host);
3150 return ERR_PTR(ret);
3156 #else /* CONFIG_OF */
3157 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3159 return ERR_PTR(-EINVAL);
3161 #endif /* CONFIG_OF */
3163 static void dw_mci_enable_cd(struct dw_mci *host)
3165 unsigned long irqflags;
3169 * No need for CD if all slots have a non-error GPIO
3170 * as well as broken card detection is found.
3172 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3175 if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3176 spin_lock_irqsave(&host->irq_lock, irqflags);
3177 temp = mci_readl(host, INTMASK);
3178 temp |= SDMMC_INT_CD;
3179 mci_writel(host, INTMASK, temp);
3180 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3184 int dw_mci_probe(struct dw_mci *host)
3186 const struct dw_mci_drv_data *drv_data = host->drv_data;
3187 int width, i, ret = 0;
3191 host->pdata = dw_mci_parse_dt(host);
3192 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3193 return -EPROBE_DEFER;
3194 } else if (IS_ERR(host->pdata)) {
3195 dev_err(host->dev, "platform data not available\n");
3200 host->biu_clk = devm_clk_get(host->dev, "biu");
3201 if (IS_ERR(host->biu_clk)) {
3202 dev_dbg(host->dev, "biu clock not available\n");
3204 ret = clk_prepare_enable(host->biu_clk);
3206 dev_err(host->dev, "failed to enable biu clock\n");
3211 host->ciu_clk = devm_clk_get(host->dev, "ciu");
3212 if (IS_ERR(host->ciu_clk)) {
3213 dev_dbg(host->dev, "ciu clock not available\n");
3214 host->bus_hz = host->pdata->bus_hz;
3216 ret = clk_prepare_enable(host->ciu_clk);
3218 dev_err(host->dev, "failed to enable ciu clock\n");
3222 if (host->pdata->bus_hz) {
3223 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3226 "Unable to set bus rate to %uHz\n",
3227 host->pdata->bus_hz);
3229 host->bus_hz = clk_get_rate(host->ciu_clk);
3232 if (!host->bus_hz) {
3234 "Platform data must supply bus speed\n");
3239 if (!IS_ERR(host->pdata->rstc)) {
3240 reset_control_assert(host->pdata->rstc);
3241 usleep_range(10, 50);
3242 reset_control_deassert(host->pdata->rstc);
3245 if (drv_data && drv_data->init) {
3246 ret = drv_data->init(host);
3249 "implementation specific init failed\n");
3254 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3255 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3256 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3258 spin_lock_init(&host->lock);
3259 spin_lock_init(&host->irq_lock);
3260 INIT_LIST_HEAD(&host->queue);
3263 * Get the host data width - this assumes that HCON has been set with
3264 * the correct values.
3266 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3268 host->push_data = dw_mci_push_data16;
3269 host->pull_data = dw_mci_pull_data16;
3271 host->data_shift = 1;
3272 } else if (i == 2) {
3273 host->push_data = dw_mci_push_data64;
3274 host->pull_data = dw_mci_pull_data64;
3276 host->data_shift = 3;
3278 /* Check for a reserved value, and warn if it is */
3280 "HCON reports a reserved host data width!\n"
3281 "Defaulting to 32-bit access.\n");
3282 host->push_data = dw_mci_push_data32;
3283 host->pull_data = dw_mci_pull_data32;
3285 host->data_shift = 2;
3288 /* Reset all blocks */
3289 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3294 host->dma_ops = host->pdata->dma_ops;
3295 dw_mci_init_dma(host);
3297 /* Clear the interrupts for the host controller */
3298 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3299 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3301 /* Put in max timeout */
3302 mci_writel(host, TMOUT, 0xFFFFFFFF);
3305 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3306 * Tx Mark = fifo_size / 2 DMA Size = 8
3308 if (!host->pdata->fifo_depth) {
3310 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3311 * have been overwritten by the bootloader, just like we're
3312 * about to do, so if you know the value for your hardware, you
3313 * should put it in the platform data.
3315 fifo_size = mci_readl(host, FIFOTH);
3316 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3318 fifo_size = host->pdata->fifo_depth;
3320 host->fifo_depth = fifo_size;
3322 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3323 mci_writel(host, FIFOTH, host->fifoth_val);
3325 /* disable clock to CIU */
3326 mci_writel(host, CLKENA, 0);
3327 mci_writel(host, CLKSRC, 0);
3330 * In 2.40a spec, Data offset is changed.
3331 * Need to check the version-id and set data-offset for DATA register.
3333 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3334 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3336 if (host->data_addr_override)
3337 host->fifo_reg = host->regs + host->data_addr_override;
3338 else if (host->verid < DW_MMC_240A)
3339 host->fifo_reg = host->regs + DATA_OFFSET;
3341 host->fifo_reg = host->regs + DATA_240A_OFFSET;
3343 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3344 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3345 host->irq_flags, "dw-mci", host);
3350 * Enable interrupts for command done, data over, data empty,
3351 * receive ready and error such as transmit, receive timeout, crc error
3353 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3354 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3355 DW_MCI_ERROR_FLAGS);
3356 /* Enable mci interrupt */
3357 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3360 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3361 host->irq, width, fifo_size);
3363 /* We need at least one slot to succeed */
3364 ret = dw_mci_init_slot(host);
3366 dev_dbg(host->dev, "slot %d init failed\n", i);
3370 /* Now that slots are all setup, we can enable card detect */
3371 dw_mci_enable_cd(host);
3376 if (host->use_dma && host->dma_ops->exit)
3377 host->dma_ops->exit(host);
3379 if (!IS_ERR(host->pdata->rstc))
3380 reset_control_assert(host->pdata->rstc);
3383 clk_disable_unprepare(host->ciu_clk);
3386 clk_disable_unprepare(host->biu_clk);
3390 EXPORT_SYMBOL(dw_mci_probe);
3392 void dw_mci_remove(struct dw_mci *host)
3394 dev_dbg(host->dev, "remove slot\n");
3396 dw_mci_cleanup_slot(host->slot);
3398 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3399 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3401 /* disable clock to CIU */
3402 mci_writel(host, CLKENA, 0);
3403 mci_writel(host, CLKSRC, 0);
3405 if (host->use_dma && host->dma_ops->exit)
3406 host->dma_ops->exit(host);
3408 if (!IS_ERR(host->pdata->rstc))
3409 reset_control_assert(host->pdata->rstc);
3411 clk_disable_unprepare(host->ciu_clk);
3412 clk_disable_unprepare(host->biu_clk);
3414 EXPORT_SYMBOL(dw_mci_remove);
3419 int dw_mci_runtime_suspend(struct device *dev)
3421 struct dw_mci *host = dev_get_drvdata(dev);
3423 if (host->use_dma && host->dma_ops->exit)
3424 host->dma_ops->exit(host);
3426 clk_disable_unprepare(host->ciu_clk);
3429 (mmc_can_gpio_cd(host->slot->mmc) ||
3430 !mmc_card_is_removable(host->slot->mmc)))
3431 clk_disable_unprepare(host->biu_clk);
3435 EXPORT_SYMBOL(dw_mci_runtime_suspend);
3437 int dw_mci_runtime_resume(struct device *dev)
3440 struct dw_mci *host = dev_get_drvdata(dev);
3443 (mmc_can_gpio_cd(host->slot->mmc) ||
3444 !mmc_card_is_removable(host->slot->mmc))) {
3445 ret = clk_prepare_enable(host->biu_clk);
3450 ret = clk_prepare_enable(host->ciu_clk);
3454 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3455 clk_disable_unprepare(host->ciu_clk);
3460 if (host->use_dma && host->dma_ops->init)
3461 host->dma_ops->init(host);
3464 * Restore the initial value at FIFOTH register
3465 * And Invalidate the prev_blksz with zero
3467 mci_writel(host, FIFOTH, host->fifoth_val);
3468 host->prev_blksz = 0;
3470 /* Put in max timeout */
3471 mci_writel(host, TMOUT, 0xFFFFFFFF);
3473 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3474 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3475 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3476 DW_MCI_ERROR_FLAGS);
3477 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3480 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3481 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3483 /* Force setup bus to guarantee available clock output */
3484 dw_mci_setup_bus(host->slot, true);
3486 /* Now that slots are all setup, we can enable card detect */
3487 dw_mci_enable_cd(host);
3493 (mmc_can_gpio_cd(host->slot->mmc) ||
3494 !mmc_card_is_removable(host->slot->mmc)))
3495 clk_disable_unprepare(host->biu_clk);
3499 EXPORT_SYMBOL(dw_mci_runtime_resume);
3500 #endif /* CONFIG_PM */
3502 static int __init dw_mci_init(void)
3504 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3508 static void __exit dw_mci_exit(void)
3512 module_init(dw_mci_init);
3513 module_exit(dw_mci_exit);
3515 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3516 MODULE_AUTHOR("NXP Semiconductor VietNam");
3517 MODULE_AUTHOR("Imagination Technologies Ltd");
3518 MODULE_LICENSE("GPL v2");