2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51 #define DW_MCI_SEND_STATUS 1
52 #define DW_MCI_RECV_STATUS 2
53 #define DW_MCI_DMA_THRESHOLD 16
55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
58 #ifdef CONFIG_MMC_DW_IDMAC
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
65 u32 des0; /* Control Descriptor */
66 #define IDMAC_DES0_DIC BIT(1)
67 #define IDMAC_DES0_LD BIT(2)
68 #define IDMAC_DES0_FD BIT(3)
69 #define IDMAC_DES0_CH BIT(4)
70 #define IDMAC_DES0_ER BIT(5)
71 #define IDMAC_DES0_CES BIT(30)
72 #define IDMAC_DES0_OWN BIT(31)
74 u32 des1; /* Buffer sizes */
75 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
78 u32 des2; /* buffer 1 physical address */
80 u32 des3; /* buffer 2 physical address */
82 #endif /* CONFIG_MMC_DW_IDMAC */
84 static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
95 static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
114 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
117 #if defined(CONFIG_DEBUG_FS)
118 static int dw_mci_req_show(struct seq_file *s, void *v)
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
153 spin_unlock_bh(&slot->host->lock);
158 static int dw_mci_req_open(struct inode *inode, struct file *file)
160 return single_open(file, dw_mci_req_show, inode->i_private);
163 static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
168 .release = single_release,
171 static int dw_mci_regs_show(struct seq_file *s, void *v)
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
183 static int dw_mci_regs_open(struct inode *inode, struct file *file)
185 return single_open(file, dw_mci_regs_show, inode->i_private);
188 static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
193 .release = single_release,
196 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
203 root = mmc->debugfs_root;
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
236 #endif /* defined(CONFIG_DEBUG_FS) */
238 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
240 struct mmc_data *data;
241 struct dw_mci_slot *slot = mmc_priv(mmc);
242 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
244 cmd->error = -EINPROGRESS;
248 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
249 cmd->opcode == MMC_GO_IDLE_STATE ||
250 cmd->opcode == MMC_GO_INACTIVE_STATE ||
251 (cmd->opcode == SD_IO_RW_DIRECT &&
252 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
253 cmdr |= SDMMC_CMD_STOP;
254 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
255 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
257 if (cmd->flags & MMC_RSP_PRESENT) {
258 /* We expect a response, so set this bit */
259 cmdr |= SDMMC_CMD_RESP_EXP;
260 if (cmd->flags & MMC_RSP_136)
261 cmdr |= SDMMC_CMD_RESP_LONG;
264 if (cmd->flags & MMC_RSP_CRC)
265 cmdr |= SDMMC_CMD_RESP_CRC;
269 cmdr |= SDMMC_CMD_DAT_EXP;
270 if (data->flags & MMC_DATA_STREAM)
271 cmdr |= SDMMC_CMD_STRM_MODE;
272 if (data->flags & MMC_DATA_WRITE)
273 cmdr |= SDMMC_CMD_DAT_WR;
276 if (drv_data && drv_data->prepare_command)
277 drv_data->prepare_command(slot->host, &cmdr);
282 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
284 struct mmc_command *stop;
290 stop = &host->stop_abort;
292 memset(stop, 0, sizeof(struct mmc_command));
294 if (cmdr == MMC_READ_SINGLE_BLOCK ||
295 cmdr == MMC_READ_MULTIPLE_BLOCK ||
296 cmdr == MMC_WRITE_BLOCK ||
297 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
298 stop->opcode = MMC_STOP_TRANSMISSION;
300 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
301 } else if (cmdr == SD_IO_RW_EXTENDED) {
302 stop->opcode = SD_IO_RW_DIRECT;
303 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
304 ((cmd->arg >> 28) & 0x7);
305 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
310 cmdr = stop->opcode | SDMMC_CMD_STOP |
311 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
316 static void dw_mci_start_command(struct dw_mci *host,
317 struct mmc_command *cmd, u32 cmd_flags)
321 "start command: ARGR=0x%08x CMDR=0x%08x\n",
322 cmd->arg, cmd_flags);
324 mci_writel(host, CMDARG, cmd->arg);
327 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
330 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
332 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
333 dw_mci_start_command(host, stop, host->stop_cmdr);
336 /* DMA interface functions */
337 static void dw_mci_stop_dma(struct dw_mci *host)
339 if (host->using_dma) {
340 host->dma_ops->stop(host);
341 host->dma_ops->cleanup(host);
344 /* Data transfer was stopped by the interrupt handler */
345 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
348 static int dw_mci_get_dma_dir(struct mmc_data *data)
350 if (data->flags & MMC_DATA_WRITE)
351 return DMA_TO_DEVICE;
353 return DMA_FROM_DEVICE;
356 #ifdef CONFIG_MMC_DW_IDMAC
357 static void dw_mci_dma_cleanup(struct dw_mci *host)
359 struct mmc_data *data = host->data;
362 if (!data->host_cookie)
363 dma_unmap_sg(host->dev,
366 dw_mci_get_dma_dir(data));
369 static void dw_mci_idmac_reset(struct dw_mci *host)
371 u32 bmod = mci_readl(host, BMOD);
372 /* Software reset of DMA */
373 bmod |= SDMMC_IDMAC_SWRESET;
374 mci_writel(host, BMOD, bmod);
377 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
381 /* Disable and reset the IDMAC interface */
382 temp = mci_readl(host, CTRL);
383 temp &= ~SDMMC_CTRL_USE_IDMAC;
384 temp |= SDMMC_CTRL_DMA_RESET;
385 mci_writel(host, CTRL, temp);
387 /* Stop the IDMAC running */
388 temp = mci_readl(host, BMOD);
389 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
390 temp |= SDMMC_IDMAC_SWRESET;
391 mci_writel(host, BMOD, temp);
394 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
396 struct mmc_data *data = host->data;
398 dev_vdbg(host->dev, "DMA complete\n");
400 host->dma_ops->cleanup(host);
403 * If the card was removed, data will be NULL. No point in trying to
404 * send the stop command or waiting for NBUSY in this case.
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
408 tasklet_schedule(&host->tasklet);
412 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
416 struct idmac_desc *desc = host->sg_cpu;
418 for (i = 0; i < sg_len; i++, desc++) {
419 unsigned int length = sg_dma_len(&data->sg[i]);
420 u32 mem_addr = sg_dma_address(&data->sg[i]);
422 /* Set the OWN bit and disable interrupts for this descriptor */
423 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
426 IDMAC_SET_BUFFER1_SIZE(desc, length);
428 /* Physical address to DMA to/from */
429 desc->des2 = mem_addr;
432 /* Set first descriptor */
434 desc->des0 |= IDMAC_DES0_FD;
436 /* Set last descriptor */
437 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
438 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
439 desc->des0 |= IDMAC_DES0_LD;
444 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
448 dw_mci_translate_sglist(host, host->data, sg_len);
450 /* Select IDMAC interface */
451 temp = mci_readl(host, CTRL);
452 temp |= SDMMC_CTRL_USE_IDMAC;
453 mci_writel(host, CTRL, temp);
457 /* Enable the IDMAC */
458 temp = mci_readl(host, BMOD);
459 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
460 mci_writel(host, BMOD, temp);
462 /* Start it running */
463 mci_writel(host, PLDMND, 1);
466 static int dw_mci_idmac_init(struct dw_mci *host)
468 struct idmac_desc *p;
471 /* Number of descriptors in the ring buffer */
472 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
474 /* Forward link the descriptor list */
475 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
476 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
478 /* Set the last descriptor as the end-of-ring descriptor */
479 p->des3 = host->sg_dma;
480 p->des0 = IDMAC_DES0_ER;
482 dw_mci_idmac_reset(host);
484 /* Mask out interrupts - get Tx & Rx complete only */
485 mci_writel(host, IDSTS, IDMAC_INT_CLR);
486 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
489 /* Set the descriptor base address */
490 mci_writel(host, DBADDR, host->sg_dma);
494 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
495 .init = dw_mci_idmac_init,
496 .start = dw_mci_idmac_start_dma,
497 .stop = dw_mci_idmac_stop_dma,
498 .complete = dw_mci_idmac_complete_dma,
499 .cleanup = dw_mci_dma_cleanup,
501 #endif /* CONFIG_MMC_DW_IDMAC */
503 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
504 struct mmc_data *data,
507 struct scatterlist *sg;
508 unsigned int i, sg_len;
510 if (!next && data->host_cookie)
511 return data->host_cookie;
514 * We don't do DMA on "complex" transfers, i.e. with
515 * non-word-aligned buffers or lengths. Also, we don't bother
516 * with all the DMA setup overhead for short transfers.
518 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
524 for_each_sg(data->sg, sg, data->sg_len, i) {
525 if (sg->offset & 3 || sg->length & 3)
529 sg_len = dma_map_sg(host->dev,
532 dw_mci_get_dma_dir(data));
537 data->host_cookie = sg_len;
542 static void dw_mci_pre_req(struct mmc_host *mmc,
543 struct mmc_request *mrq,
546 struct dw_mci_slot *slot = mmc_priv(mmc);
547 struct mmc_data *data = mrq->data;
549 if (!slot->host->use_dma || !data)
552 if (data->host_cookie) {
553 data->host_cookie = 0;
557 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
558 data->host_cookie = 0;
561 static void dw_mci_post_req(struct mmc_host *mmc,
562 struct mmc_request *mrq,
565 struct dw_mci_slot *slot = mmc_priv(mmc);
566 struct mmc_data *data = mrq->data;
568 if (!slot->host->use_dma || !data)
571 if (data->host_cookie)
572 dma_unmap_sg(slot->host->dev,
575 dw_mci_get_dma_dir(data));
576 data->host_cookie = 0;
579 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
581 #ifdef CONFIG_MMC_DW_IDMAC
582 unsigned int blksz = data->blksz;
583 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
584 u32 fifo_width = 1 << host->data_shift;
585 u32 blksz_depth = blksz / fifo_width, fifoth_val;
586 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
587 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
589 tx_wmark = (host->fifo_depth) / 2;
590 tx_wmark_invers = host->fifo_depth - tx_wmark;
594 * if blksz is not a multiple of the FIFO width
596 if (blksz % fifo_width) {
603 if (!((blksz_depth % mszs[idx]) ||
604 (tx_wmark_invers % mszs[idx]))) {
606 rx_wmark = mszs[idx] - 1;
611 * If idx is '0', it won't be tried
612 * Thus, initial values are uesed
615 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
616 mci_writel(host, FIFOTH, fifoth_val);
620 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
622 unsigned int blksz = data->blksz;
623 u32 blksz_depth, fifo_depth;
626 WARN_ON(!(data->flags & MMC_DATA_READ));
628 if (host->timing != MMC_TIMING_MMC_HS200 &&
629 host->timing != MMC_TIMING_UHS_SDR104)
632 blksz_depth = blksz / (1 << host->data_shift);
633 fifo_depth = host->fifo_depth;
635 if (blksz_depth > fifo_depth)
639 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
640 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
641 * Currently just choose blksz.
644 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
648 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
651 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
658 /* If we don't have a channel, we can't do DMA */
662 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
664 host->dma_ops->stop(host);
671 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
672 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
676 * Decide the MSIZE and RX/TX Watermark.
677 * If current block size is same with previous size,
678 * no need to update fifoth.
680 if (host->prev_blksz != data->blksz)
681 dw_mci_adjust_fifoth(host, data);
683 /* Enable the DMA interface */
684 temp = mci_readl(host, CTRL);
685 temp |= SDMMC_CTRL_DMA_ENABLE;
686 mci_writel(host, CTRL, temp);
688 /* Disable RX/TX IRQs, let DMA handle it */
689 temp = mci_readl(host, INTMASK);
690 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
691 mci_writel(host, INTMASK, temp);
693 host->dma_ops->start(host, sg_len);
698 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
702 data->error = -EINPROGRESS;
708 if (data->flags & MMC_DATA_READ) {
709 host->dir_status = DW_MCI_RECV_STATUS;
710 dw_mci_ctrl_rd_thld(host, data);
712 host->dir_status = DW_MCI_SEND_STATUS;
715 if (dw_mci_submit_data_dma(host, data)) {
716 int flags = SG_MITER_ATOMIC;
717 if (host->data->flags & MMC_DATA_READ)
718 flags |= SG_MITER_TO_SG;
720 flags |= SG_MITER_FROM_SG;
722 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
724 host->part_buf_start = 0;
725 host->part_buf_count = 0;
727 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
728 temp = mci_readl(host, INTMASK);
729 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
730 mci_writel(host, INTMASK, temp);
732 temp = mci_readl(host, CTRL);
733 temp &= ~SDMMC_CTRL_DMA_ENABLE;
734 mci_writel(host, CTRL, temp);
737 * Use the initial fifoth_val for PIO mode.
738 * If next issued data may be transfered by DMA mode,
739 * prev_blksz should be invalidated.
741 mci_writel(host, FIFOTH, host->fifoth_val);
742 host->prev_blksz = 0;
745 * Keep the current block size.
746 * It will be used to decide whether to update
747 * fifoth register next time.
749 host->prev_blksz = data->blksz;
753 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
755 struct dw_mci *host = slot->host;
756 unsigned long timeout = jiffies + msecs_to_jiffies(500);
757 unsigned int cmd_status = 0;
759 mci_writel(host, CMDARG, arg);
761 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
763 while (time_before(jiffies, timeout)) {
764 cmd_status = mci_readl(host, CMD);
765 if (!(cmd_status & SDMMC_CMD_START))
768 dev_err(&slot->mmc->class_dev,
769 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
770 cmd, arg, cmd_status);
773 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
775 struct dw_mci *host = slot->host;
776 unsigned int clock = slot->clock;
781 mci_writel(host, CLKENA, 0);
783 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
784 } else if (clock != host->current_speed || force_clkinit) {
785 div = host->bus_hz / clock;
786 if (host->bus_hz % clock && host->bus_hz > clock)
788 * move the + 1 after the divide to prevent
789 * over-clocking the card.
793 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
795 if ((clock << div) != slot->__clk_old || force_clkinit)
796 dev_info(&slot->mmc->class_dev,
797 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
798 slot->id, host->bus_hz, clock,
799 div ? ((host->bus_hz / div) >> 1) :
803 mci_writel(host, CLKENA, 0);
804 mci_writel(host, CLKSRC, 0);
808 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
810 /* set clock to desired speed */
811 mci_writel(host, CLKDIV, div);
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
817 /* enable clock; only low power if no SDIO */
818 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
819 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
820 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
821 mci_writel(host, CLKENA, clk_en_a);
825 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
827 /* keep the clock with reflecting clock dividor */
828 slot->__clk_old = clock << div;
831 host->current_speed = clock;
833 /* Set the current slot bus width */
834 mci_writel(host, CTYPE, (slot->ctype << slot->id));
837 static void __dw_mci_start_request(struct dw_mci *host,
838 struct dw_mci_slot *slot,
839 struct mmc_command *cmd)
841 struct mmc_request *mrq;
842 struct mmc_data *data;
847 host->cur_slot = slot;
850 host->pending_events = 0;
851 host->completed_events = 0;
852 host->cmd_status = 0;
853 host->data_status = 0;
854 host->dir_status = 0;
858 mci_writel(host, TMOUT, 0xFFFFFFFF);
859 mci_writel(host, BYTCNT, data->blksz*data->blocks);
860 mci_writel(host, BLKSIZ, data->blksz);
863 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
865 /* this is the first command, send the initialization clock */
866 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
867 cmdflags |= SDMMC_CMD_INIT;
870 dw_mci_submit_data(host, data);
874 dw_mci_start_command(host, cmd, cmdflags);
877 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
879 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
882 static void dw_mci_start_request(struct dw_mci *host,
883 struct dw_mci_slot *slot)
885 struct mmc_request *mrq = slot->mrq;
886 struct mmc_command *cmd;
888 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
889 __dw_mci_start_request(host, slot, cmd);
892 /* must be called with host->lock held */
893 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
894 struct mmc_request *mrq)
896 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
901 if (host->state == STATE_IDLE) {
902 host->state = STATE_SENDING_CMD;
903 dw_mci_start_request(host, slot);
905 list_add_tail(&slot->queue_node, &host->queue);
909 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct dw_mci *host = slot->host;
917 * The check for card presence and queueing of the request must be
918 * atomic, otherwise the card could be removed in between and the
919 * request wouldn't fail until another card was inserted.
921 spin_lock_bh(&host->lock);
923 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
924 spin_unlock_bh(&host->lock);
925 mrq->cmd->error = -ENOMEDIUM;
926 mmc_request_done(mmc, mrq);
930 dw_mci_queue_request(host, slot, mrq);
932 spin_unlock_bh(&host->lock);
935 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
937 struct dw_mci_slot *slot = mmc_priv(mmc);
938 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
941 switch (ios->bus_width) {
942 case MMC_BUS_WIDTH_4:
943 slot->ctype = SDMMC_CTYPE_4BIT;
945 case MMC_BUS_WIDTH_8:
946 slot->ctype = SDMMC_CTYPE_8BIT;
949 /* set default 1 bit mode */
950 slot->ctype = SDMMC_CTYPE_1BIT;
953 regs = mci_readl(slot->host, UHS_REG);
956 if (ios->timing == MMC_TIMING_MMC_DDR52)
957 regs |= ((0x1 << slot->id) << 16);
959 regs &= ~((0x1 << slot->id) << 16);
961 mci_writel(slot->host, UHS_REG, regs);
962 slot->host->timing = ios->timing;
965 * Use mirror of ios->clock to prevent race with mmc
966 * core ios update when finding the minimum.
968 slot->clock = ios->clock;
970 if (drv_data && drv_data->set_ios)
971 drv_data->set_ios(slot->host, ios);
973 /* Slot specific timing and width adjustment */
974 dw_mci_setup_bus(slot, false);
976 switch (ios->power_mode) {
978 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
979 regs = mci_readl(slot->host, PWREN);
980 regs |= (1 << slot->id);
981 mci_writel(slot->host, PWREN, regs);
984 regs = mci_readl(slot->host, PWREN);
985 regs &= ~(1 << slot->id);
986 mci_writel(slot->host, PWREN, regs);
993 static int dw_mci_get_ro(struct mmc_host *mmc)
996 struct dw_mci_slot *slot = mmc_priv(mmc);
997 int gpio_ro = mmc_gpio_get_ro(mmc);
999 /* Use platform get_ro function, else try on board write protect */
1000 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1002 else if (!IS_ERR_VALUE(gpio_ro))
1003 read_only = gpio_ro;
1006 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1008 dev_dbg(&mmc->class_dev, "card is %s\n",
1009 read_only ? "read-only" : "read-write");
1014 static int dw_mci_get_cd(struct mmc_host *mmc)
1017 struct dw_mci_slot *slot = mmc_priv(mmc);
1018 struct dw_mci_board *brd = slot->host->pdata;
1019 struct dw_mci *host = slot->host;
1020 int gpio_cd = mmc_gpio_get_cd(mmc);
1022 /* Use platform get_cd function, else try onboard card detect */
1023 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1025 else if (!IS_ERR_VALUE(gpio_cd))
1028 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1031 spin_lock_bh(&host->lock);
1033 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1034 dev_dbg(&mmc->class_dev, "card is present\n");
1036 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1037 dev_dbg(&mmc->class_dev, "card is not present\n");
1039 spin_unlock_bh(&host->lock);
1045 * Disable lower power mode.
1047 * Low power mode will stop the card clock when idle. According to the
1048 * description of the CLKENA register we should disable low power mode
1049 * for SDIO cards if we need SDIO interrupts to work.
1051 * This function is fast if low power mode is already disabled.
1053 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1055 struct dw_mci *host = slot->host;
1057 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1059 clk_en_a = mci_readl(host, CLKENA);
1061 if (clk_en_a & clken_low_pwr) {
1062 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1063 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1064 SDMMC_CMD_PRV_DAT_WAIT, 0);
1068 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1070 struct dw_mci_slot *slot = mmc_priv(mmc);
1071 struct dw_mci *host = slot->host;
1074 /* Enable/disable Slot Specific SDIO interrupt */
1075 int_mask = mci_readl(host, INTMASK);
1078 * Turn off low power mode if it was enabled. This is a bit of
1079 * a heavy operation and we disable / enable IRQs a lot, so
1080 * we'll leave low power mode disabled and it will get
1081 * re-enabled again in dw_mci_setup_bus().
1083 dw_mci_disable_low_power(slot);
1085 mci_writel(host, INTMASK,
1086 (int_mask | SDMMC_INT_SDIO(slot->id)));
1088 mci_writel(host, INTMASK,
1089 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1093 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1095 struct dw_mci_slot *slot = mmc_priv(mmc);
1096 struct dw_mci *host = slot->host;
1097 const struct dw_mci_drv_data *drv_data = host->drv_data;
1098 struct dw_mci_tuning_data tuning_data;
1101 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1102 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1103 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1104 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1105 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1106 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1107 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1111 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1112 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1113 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1116 "Undefined command(%d) for tuning\n", opcode);
1120 if (drv_data && drv_data->execute_tuning)
1121 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1125 static const struct mmc_host_ops dw_mci_ops = {
1126 .request = dw_mci_request,
1127 .pre_req = dw_mci_pre_req,
1128 .post_req = dw_mci_post_req,
1129 .set_ios = dw_mci_set_ios,
1130 .get_ro = dw_mci_get_ro,
1131 .get_cd = dw_mci_get_cd,
1132 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1133 .execute_tuning = dw_mci_execute_tuning,
1136 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1137 __releases(&host->lock)
1138 __acquires(&host->lock)
1140 struct dw_mci_slot *slot;
1141 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1143 WARN_ON(host->cmd || host->data);
1145 host->cur_slot->mrq = NULL;
1147 if (!list_empty(&host->queue)) {
1148 slot = list_entry(host->queue.next,
1149 struct dw_mci_slot, queue_node);
1150 list_del(&slot->queue_node);
1151 dev_vdbg(host->dev, "list not empty: %s is next\n",
1152 mmc_hostname(slot->mmc));
1153 host->state = STATE_SENDING_CMD;
1154 dw_mci_start_request(host, slot);
1156 dev_vdbg(host->dev, "list empty\n");
1157 host->state = STATE_IDLE;
1160 spin_unlock(&host->lock);
1161 mmc_request_done(prev_mmc, mrq);
1162 spin_lock(&host->lock);
1165 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1167 u32 status = host->cmd_status;
1169 host->cmd_status = 0;
1171 /* Read the response from the card (up to 16 bytes) */
1172 if (cmd->flags & MMC_RSP_PRESENT) {
1173 if (cmd->flags & MMC_RSP_136) {
1174 cmd->resp[3] = mci_readl(host, RESP0);
1175 cmd->resp[2] = mci_readl(host, RESP1);
1176 cmd->resp[1] = mci_readl(host, RESP2);
1177 cmd->resp[0] = mci_readl(host, RESP3);
1179 cmd->resp[0] = mci_readl(host, RESP0);
1186 if (status & SDMMC_INT_RTO)
1187 cmd->error = -ETIMEDOUT;
1188 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1189 cmd->error = -EILSEQ;
1190 else if (status & SDMMC_INT_RESP_ERR)
1196 /* newer ip versions need a delay between retries */
1197 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1204 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1206 u32 status = host->data_status;
1208 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1209 if (status & SDMMC_INT_DRTO) {
1210 data->error = -ETIMEDOUT;
1211 } else if (status & SDMMC_INT_DCRC) {
1212 data->error = -EILSEQ;
1213 } else if (status & SDMMC_INT_EBE) {
1214 if (host->dir_status ==
1215 DW_MCI_SEND_STATUS) {
1217 * No data CRC status was returned.
1218 * The number of bytes transferred
1219 * will be exaggerated in PIO mode.
1221 data->bytes_xfered = 0;
1222 data->error = -ETIMEDOUT;
1223 } else if (host->dir_status ==
1224 DW_MCI_RECV_STATUS) {
1228 /* SDMMC_INT_SBE is included */
1232 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1235 * After an error, there may be data lingering
1238 dw_mci_fifo_reset(host);
1240 data->bytes_xfered = data->blocks * data->blksz;
1247 static void dw_mci_tasklet_func(unsigned long priv)
1249 struct dw_mci *host = (struct dw_mci *)priv;
1250 struct mmc_data *data;
1251 struct mmc_command *cmd;
1252 struct mmc_request *mrq;
1253 enum dw_mci_state state;
1254 enum dw_mci_state prev_state;
1257 spin_lock(&host->lock);
1259 state = host->state;
1270 case STATE_SENDING_CMD:
1271 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1272 &host->pending_events))
1277 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1278 err = dw_mci_command_complete(host, cmd);
1279 if (cmd == mrq->sbc && !err) {
1280 prev_state = state = STATE_SENDING_CMD;
1281 __dw_mci_start_request(host, host->cur_slot,
1286 if (cmd->data && err) {
1287 dw_mci_stop_dma(host);
1288 send_stop_abort(host, data);
1289 state = STATE_SENDING_STOP;
1293 if (!cmd->data || err) {
1294 dw_mci_request_end(host, mrq);
1298 prev_state = state = STATE_SENDING_DATA;
1301 case STATE_SENDING_DATA:
1302 if (test_and_clear_bit(EVENT_DATA_ERROR,
1303 &host->pending_events)) {
1304 dw_mci_stop_dma(host);
1305 send_stop_abort(host, data);
1306 state = STATE_DATA_ERROR;
1310 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1311 &host->pending_events))
1314 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1315 prev_state = state = STATE_DATA_BUSY;
1318 case STATE_DATA_BUSY:
1319 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1320 &host->pending_events))
1324 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1325 err = dw_mci_data_complete(host, data);
1328 if (!data->stop || mrq->sbc) {
1329 if (mrq->sbc && data->stop)
1330 data->stop->error = 0;
1331 dw_mci_request_end(host, mrq);
1335 /* stop command for open-ended transfer*/
1337 send_stop_abort(host, data);
1341 * If err has non-zero,
1342 * stop-abort command has been already issued.
1344 prev_state = state = STATE_SENDING_STOP;
1348 case STATE_SENDING_STOP:
1349 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1350 &host->pending_events))
1353 /* CMD error in data command */
1354 if (mrq->cmd->error && mrq->data)
1355 dw_mci_fifo_reset(host);
1361 dw_mci_command_complete(host, mrq->stop);
1363 host->cmd_status = 0;
1365 dw_mci_request_end(host, mrq);
1368 case STATE_DATA_ERROR:
1369 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1370 &host->pending_events))
1373 state = STATE_DATA_BUSY;
1376 } while (state != prev_state);
1378 host->state = state;
1380 spin_unlock(&host->lock);
1384 /* push final bytes to part_buf, only use during push */
1385 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1387 memcpy((void *)&host->part_buf, buf, cnt);
1388 host->part_buf_count = cnt;
1391 /* append bytes to part_buf, only use during push */
1392 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1394 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1395 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1396 host->part_buf_count += cnt;
1400 /* pull first bytes from part_buf, only use during pull */
1401 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1403 cnt = min(cnt, (int)host->part_buf_count);
1405 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1407 host->part_buf_count -= cnt;
1408 host->part_buf_start += cnt;
1413 /* pull final bytes from the part_buf, assuming it's just been filled */
1414 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1416 memcpy(buf, &host->part_buf, cnt);
1417 host->part_buf_start = cnt;
1418 host->part_buf_count = (1 << host->data_shift) - cnt;
1421 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1423 struct mmc_data *data = host->data;
1426 /* try and push anything in the part_buf */
1427 if (unlikely(host->part_buf_count)) {
1428 int len = dw_mci_push_part_bytes(host, buf, cnt);
1431 if (host->part_buf_count == 2) {
1432 mci_writew(host, DATA(host->data_offset),
1434 host->part_buf_count = 0;
1437 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1438 if (unlikely((unsigned long)buf & 0x1)) {
1440 u16 aligned_buf[64];
1441 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1442 int items = len >> 1;
1444 /* memcpy from input buffer into aligned buffer */
1445 memcpy(aligned_buf, buf, len);
1448 /* push data from aligned buffer into fifo */
1449 for (i = 0; i < items; ++i)
1450 mci_writew(host, DATA(host->data_offset),
1457 for (; cnt >= 2; cnt -= 2)
1458 mci_writew(host, DATA(host->data_offset), *pdata++);
1461 /* put anything remaining in the part_buf */
1463 dw_mci_set_part_bytes(host, buf, cnt);
1464 /* Push data if we have reached the expected data length */
1465 if ((data->bytes_xfered + init_cnt) ==
1466 (data->blksz * data->blocks))
1467 mci_writew(host, DATA(host->data_offset),
1472 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1474 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1475 if (unlikely((unsigned long)buf & 0x1)) {
1477 /* pull data from fifo into aligned buffer */
1478 u16 aligned_buf[64];
1479 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1480 int items = len >> 1;
1482 for (i = 0; i < items; ++i)
1483 aligned_buf[i] = mci_readw(host,
1484 DATA(host->data_offset));
1485 /* memcpy from aligned buffer into output buffer */
1486 memcpy(buf, aligned_buf, len);
1494 for (; cnt >= 2; cnt -= 2)
1495 *pdata++ = mci_readw(host, DATA(host->data_offset));
1499 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1500 dw_mci_pull_final_bytes(host, buf, cnt);
1504 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1506 struct mmc_data *data = host->data;
1509 /* try and push anything in the part_buf */
1510 if (unlikely(host->part_buf_count)) {
1511 int len = dw_mci_push_part_bytes(host, buf, cnt);
1514 if (host->part_buf_count == 4) {
1515 mci_writel(host, DATA(host->data_offset),
1517 host->part_buf_count = 0;
1520 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1521 if (unlikely((unsigned long)buf & 0x3)) {
1523 u32 aligned_buf[32];
1524 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1525 int items = len >> 2;
1527 /* memcpy from input buffer into aligned buffer */
1528 memcpy(aligned_buf, buf, len);
1531 /* push data from aligned buffer into fifo */
1532 for (i = 0; i < items; ++i)
1533 mci_writel(host, DATA(host->data_offset),
1540 for (; cnt >= 4; cnt -= 4)
1541 mci_writel(host, DATA(host->data_offset), *pdata++);
1544 /* put anything remaining in the part_buf */
1546 dw_mci_set_part_bytes(host, buf, cnt);
1547 /* Push data if we have reached the expected data length */
1548 if ((data->bytes_xfered + init_cnt) ==
1549 (data->blksz * data->blocks))
1550 mci_writel(host, DATA(host->data_offset),
1555 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1557 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1558 if (unlikely((unsigned long)buf & 0x3)) {
1560 /* pull data from fifo into aligned buffer */
1561 u32 aligned_buf[32];
1562 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1563 int items = len >> 2;
1565 for (i = 0; i < items; ++i)
1566 aligned_buf[i] = mci_readl(host,
1567 DATA(host->data_offset));
1568 /* memcpy from aligned buffer into output buffer */
1569 memcpy(buf, aligned_buf, len);
1577 for (; cnt >= 4; cnt -= 4)
1578 *pdata++ = mci_readl(host, DATA(host->data_offset));
1582 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1583 dw_mci_pull_final_bytes(host, buf, cnt);
1587 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1589 struct mmc_data *data = host->data;
1592 /* try and push anything in the part_buf */
1593 if (unlikely(host->part_buf_count)) {
1594 int len = dw_mci_push_part_bytes(host, buf, cnt);
1598 if (host->part_buf_count == 8) {
1599 mci_writeq(host, DATA(host->data_offset),
1601 host->part_buf_count = 0;
1604 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1605 if (unlikely((unsigned long)buf & 0x7)) {
1607 u64 aligned_buf[16];
1608 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1609 int items = len >> 3;
1611 /* memcpy from input buffer into aligned buffer */
1612 memcpy(aligned_buf, buf, len);
1615 /* push data from aligned buffer into fifo */
1616 for (i = 0; i < items; ++i)
1617 mci_writeq(host, DATA(host->data_offset),
1624 for (; cnt >= 8; cnt -= 8)
1625 mci_writeq(host, DATA(host->data_offset), *pdata++);
1628 /* put anything remaining in the part_buf */
1630 dw_mci_set_part_bytes(host, buf, cnt);
1631 /* Push data if we have reached the expected data length */
1632 if ((data->bytes_xfered + init_cnt) ==
1633 (data->blksz * data->blocks))
1634 mci_writeq(host, DATA(host->data_offset),
1639 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1641 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1642 if (unlikely((unsigned long)buf & 0x7)) {
1644 /* pull data from fifo into aligned buffer */
1645 u64 aligned_buf[16];
1646 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1647 int items = len >> 3;
1649 for (i = 0; i < items; ++i)
1650 aligned_buf[i] = mci_readq(host,
1651 DATA(host->data_offset));
1652 /* memcpy from aligned buffer into output buffer */
1653 memcpy(buf, aligned_buf, len);
1661 for (; cnt >= 8; cnt -= 8)
1662 *pdata++ = mci_readq(host, DATA(host->data_offset));
1666 host->part_buf = mci_readq(host, DATA(host->data_offset));
1667 dw_mci_pull_final_bytes(host, buf, cnt);
1671 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1675 /* get remaining partial bytes */
1676 len = dw_mci_pull_part_bytes(host, buf, cnt);
1677 if (unlikely(len == cnt))
1682 /* get the rest of the data */
1683 host->pull_data(host, buf, cnt);
1686 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1688 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1690 unsigned int offset;
1691 struct mmc_data *data = host->data;
1692 int shift = host->data_shift;
1695 unsigned int remain, fcnt;
1698 if (!sg_miter_next(sg_miter))
1701 host->sg = sg_miter->piter.sg;
1702 buf = sg_miter->addr;
1703 remain = sg_miter->length;
1707 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1708 << shift) + host->part_buf_count;
1709 len = min(remain, fcnt);
1712 dw_mci_pull_data(host, (void *)(buf + offset), len);
1713 data->bytes_xfered += len;
1718 sg_miter->consumed = offset;
1719 status = mci_readl(host, MINTSTS);
1720 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1721 /* if the RXDR is ready read again */
1722 } while ((status & SDMMC_INT_RXDR) ||
1723 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1726 if (!sg_miter_next(sg_miter))
1728 sg_miter->consumed = 0;
1730 sg_miter_stop(sg_miter);
1734 sg_miter_stop(sg_miter);
1737 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1740 static void dw_mci_write_data_pio(struct dw_mci *host)
1742 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1744 unsigned int offset;
1745 struct mmc_data *data = host->data;
1746 int shift = host->data_shift;
1749 unsigned int fifo_depth = host->fifo_depth;
1750 unsigned int remain, fcnt;
1753 if (!sg_miter_next(sg_miter))
1756 host->sg = sg_miter->piter.sg;
1757 buf = sg_miter->addr;
1758 remain = sg_miter->length;
1762 fcnt = ((fifo_depth -
1763 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1764 << shift) - host->part_buf_count;
1765 len = min(remain, fcnt);
1768 host->push_data(host, (void *)(buf + offset), len);
1769 data->bytes_xfered += len;
1774 sg_miter->consumed = offset;
1775 status = mci_readl(host, MINTSTS);
1776 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1777 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1780 if (!sg_miter_next(sg_miter))
1782 sg_miter->consumed = 0;
1784 sg_miter_stop(sg_miter);
1788 sg_miter_stop(sg_miter);
1791 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1794 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1796 if (!host->cmd_status)
1797 host->cmd_status = status;
1801 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1802 tasklet_schedule(&host->tasklet);
1805 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1807 struct dw_mci *host = dev_id;
1811 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1814 * DTO fix - version 2.10a and below, and only if internal DMA
1817 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1819 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1820 pending |= SDMMC_INT_DATA_OVER;
1824 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1825 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1826 host->cmd_status = pending;
1828 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1831 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1832 /* if there is an error report DATA_ERROR */
1833 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1834 host->data_status = pending;
1836 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1837 tasklet_schedule(&host->tasklet);
1840 if (pending & SDMMC_INT_DATA_OVER) {
1841 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1842 if (!host->data_status)
1843 host->data_status = pending;
1845 if (host->dir_status == DW_MCI_RECV_STATUS) {
1846 if (host->sg != NULL)
1847 dw_mci_read_data_pio(host, true);
1849 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1850 tasklet_schedule(&host->tasklet);
1853 if (pending & SDMMC_INT_RXDR) {
1854 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1855 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1856 dw_mci_read_data_pio(host, false);
1859 if (pending & SDMMC_INT_TXDR) {
1860 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1861 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1862 dw_mci_write_data_pio(host);
1865 if (pending & SDMMC_INT_CMD_DONE) {
1866 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1867 dw_mci_cmd_interrupt(host, pending);
1870 if (pending & SDMMC_INT_CD) {
1871 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1872 queue_work(host->card_workqueue, &host->card_work);
1875 /* Handle SDIO Interrupts */
1876 for (i = 0; i < host->num_slots; i++) {
1877 struct dw_mci_slot *slot = host->slot[i];
1878 if (pending & SDMMC_INT_SDIO(i)) {
1879 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1880 mmc_signal_sdio_irq(slot->mmc);
1886 #ifdef CONFIG_MMC_DW_IDMAC
1887 /* Handle DMA interrupts */
1888 pending = mci_readl(host, IDSTS);
1889 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1890 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1891 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1892 host->dma_ops->complete(host);
1899 static void dw_mci_work_routine_card(struct work_struct *work)
1901 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1904 for (i = 0; i < host->num_slots; i++) {
1905 struct dw_mci_slot *slot = host->slot[i];
1906 struct mmc_host *mmc = slot->mmc;
1907 struct mmc_request *mrq;
1910 present = dw_mci_get_cd(mmc);
1911 while (present != slot->last_detect_state) {
1912 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1913 present ? "inserted" : "removed");
1915 spin_lock_bh(&host->lock);
1917 /* Card change detected */
1918 slot->last_detect_state = present;
1920 /* Clean up queue if present */
1923 if (mrq == host->mrq) {
1927 switch (host->state) {
1930 case STATE_SENDING_CMD:
1931 mrq->cmd->error = -ENOMEDIUM;
1935 case STATE_SENDING_DATA:
1936 mrq->data->error = -ENOMEDIUM;
1937 dw_mci_stop_dma(host);
1939 case STATE_DATA_BUSY:
1940 case STATE_DATA_ERROR:
1941 if (mrq->data->error == -EINPROGRESS)
1942 mrq->data->error = -ENOMEDIUM;
1944 case STATE_SENDING_STOP:
1946 mrq->stop->error = -ENOMEDIUM;
1950 dw_mci_request_end(host, mrq);
1952 list_del(&slot->queue_node);
1953 mrq->cmd->error = -ENOMEDIUM;
1955 mrq->data->error = -ENOMEDIUM;
1957 mrq->stop->error = -ENOMEDIUM;
1959 spin_unlock(&host->lock);
1960 mmc_request_done(slot->mmc, mrq);
1961 spin_lock(&host->lock);
1965 /* Power down slot */
1967 /* Clear down the FIFO */
1968 dw_mci_fifo_reset(host);
1969 #ifdef CONFIG_MMC_DW_IDMAC
1970 dw_mci_idmac_reset(host);
1975 spin_unlock_bh(&host->lock);
1977 present = dw_mci_get_cd(mmc);
1980 mmc_detect_change(slot->mmc,
1981 msecs_to_jiffies(host->pdata->detect_delay_ms));
1986 /* given a slot id, find out the device node representing that slot */
1987 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1989 struct device_node *np;
1993 if (!dev || !dev->of_node)
1996 for_each_child_of_node(dev->of_node, np) {
1997 addr = of_get_property(np, "reg", &len);
1998 if (!addr || (len < sizeof(int)))
2000 if (be32_to_cpup(addr) == slot)
2006 static struct dw_mci_of_slot_quirks {
2009 } of_slot_quirks[] = {
2011 .quirk = "disable-wp",
2012 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2016 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2018 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2023 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2024 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2025 quirks |= of_slot_quirks[idx].id;
2029 #else /* CONFIG_OF */
2030 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2034 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2038 #endif /* CONFIG_OF */
2040 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2042 struct mmc_host *mmc;
2043 struct dw_mci_slot *slot;
2044 const struct dw_mci_drv_data *drv_data = host->drv_data;
2048 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2052 slot = mmc_priv(mmc);
2056 host->slot[id] = slot;
2058 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2060 mmc->ops = &dw_mci_ops;
2061 if (of_property_read_u32_array(host->dev->of_node,
2062 "clock-freq-min-max", freq, 2)) {
2063 mmc->f_min = DW_MCI_FREQ_MIN;
2064 mmc->f_max = DW_MCI_FREQ_MAX;
2066 mmc->f_min = freq[0];
2067 mmc->f_max = freq[1];
2070 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2072 if (host->pdata->caps)
2073 mmc->caps = host->pdata->caps;
2075 if (host->pdata->pm_caps)
2076 mmc->pm_caps = host->pdata->pm_caps;
2078 if (host->dev->of_node) {
2079 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2083 ctrl_id = to_platform_device(host->dev)->id;
2085 if (drv_data && drv_data->caps)
2086 mmc->caps |= drv_data->caps[ctrl_id];
2088 if (host->pdata->caps2)
2089 mmc->caps2 = host->pdata->caps2;
2093 if (host->pdata->blk_settings) {
2094 mmc->max_segs = host->pdata->blk_settings->max_segs;
2095 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2096 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2097 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2098 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2100 /* Useful defaults if platform data is unset. */
2101 #ifdef CONFIG_MMC_DW_IDMAC
2102 mmc->max_segs = host->ring_size;
2103 mmc->max_blk_size = 65536;
2104 mmc->max_blk_count = host->ring_size;
2105 mmc->max_seg_size = 0x1000;
2106 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2109 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2110 mmc->max_blk_count = 512;
2111 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2112 mmc->max_seg_size = mmc->max_req_size;
2113 #endif /* CONFIG_MMC_DW_IDMAC */
2116 if (dw_mci_get_cd(mmc))
2117 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2119 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2121 ret = mmc_add_host(mmc);
2125 #if defined(CONFIG_DEBUG_FS)
2126 dw_mci_init_debugfs(slot);
2129 /* Card initially undetected */
2130 slot->last_detect_state = 0;
2139 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2141 /* Debugfs stuff is cleaned up by mmc core */
2142 mmc_remove_host(slot->mmc);
2143 slot->host->slot[id] = NULL;
2144 mmc_free_host(slot->mmc);
2147 static void dw_mci_init_dma(struct dw_mci *host)
2149 /* Alloc memory for sg translation */
2150 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2151 &host->sg_dma, GFP_KERNEL);
2152 if (!host->sg_cpu) {
2153 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2158 /* Determine which DMA interface to use */
2159 #ifdef CONFIG_MMC_DW_IDMAC
2160 host->dma_ops = &dw_mci_idmac_ops;
2161 dev_info(host->dev, "Using internal DMA controller.\n");
2167 if (host->dma_ops->init && host->dma_ops->start &&
2168 host->dma_ops->stop && host->dma_ops->cleanup) {
2169 if (host->dma_ops->init(host)) {
2170 dev_err(host->dev, "%s: Unable to initialize "
2171 "DMA Controller.\n", __func__);
2175 dev_err(host->dev, "DMA initialization not found.\n");
2183 dev_info(host->dev, "Using PIO mode.\n");
2188 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2190 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2193 ctrl = mci_readl(host, CTRL);
2195 mci_writel(host, CTRL, ctrl);
2197 /* wait till resets clear */
2199 ctrl = mci_readl(host, CTRL);
2200 if (!(ctrl & reset))
2202 } while (time_before(jiffies, timeout));
2205 "Timeout resetting block (ctrl reset %#x)\n",
2211 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2214 * Reseting generates a block interrupt, hence setting
2215 * the scatter-gather pointer to NULL.
2218 sg_miter_stop(&host->sg_miter);
2222 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2225 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2227 return dw_mci_ctrl_reset(host,
2228 SDMMC_CTRL_FIFO_RESET |
2230 SDMMC_CTRL_DMA_RESET);
2234 static struct dw_mci_of_quirks {
2239 .quirk = "broken-cd",
2240 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2244 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2246 struct dw_mci_board *pdata;
2247 struct device *dev = host->dev;
2248 struct device_node *np = dev->of_node;
2249 const struct dw_mci_drv_data *drv_data = host->drv_data;
2251 u32 clock_frequency;
2253 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2255 dev_err(dev, "could not allocate memory for pdata\n");
2256 return ERR_PTR(-ENOMEM);
2259 /* find out number of slots supported */
2260 if (of_property_read_u32(dev->of_node, "num-slots",
2261 &pdata->num_slots)) {
2262 dev_info(dev, "num-slots property not found, "
2263 "assuming 1 slot is available\n");
2264 pdata->num_slots = 1;
2268 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2269 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2270 pdata->quirks |= of_quirks[idx].id;
2272 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2273 dev_info(dev, "fifo-depth property not found, using "
2274 "value of FIFOTH register as default\n");
2276 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2278 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2279 pdata->bus_hz = clock_frequency;
2281 if (drv_data && drv_data->parse_dt) {
2282 ret = drv_data->parse_dt(host);
2284 return ERR_PTR(ret);
2287 if (of_find_property(np, "supports-highspeed", NULL))
2288 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2293 #else /* CONFIG_OF */
2294 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2296 return ERR_PTR(-EINVAL);
2298 #endif /* CONFIG_OF */
2300 int dw_mci_probe(struct dw_mci *host)
2302 const struct dw_mci_drv_data *drv_data = host->drv_data;
2303 int width, i, ret = 0;
2308 host->pdata = dw_mci_parse_dt(host);
2309 if (IS_ERR(host->pdata)) {
2310 dev_err(host->dev, "platform data not available\n");
2315 if (host->pdata->num_slots > 1) {
2317 "Platform data must supply num_slots.\n");
2321 host->biu_clk = devm_clk_get(host->dev, "biu");
2322 if (IS_ERR(host->biu_clk)) {
2323 dev_dbg(host->dev, "biu clock not available\n");
2325 ret = clk_prepare_enable(host->biu_clk);
2327 dev_err(host->dev, "failed to enable biu clock\n");
2332 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2333 if (IS_ERR(host->ciu_clk)) {
2334 dev_dbg(host->dev, "ciu clock not available\n");
2335 host->bus_hz = host->pdata->bus_hz;
2337 ret = clk_prepare_enable(host->ciu_clk);
2339 dev_err(host->dev, "failed to enable ciu clock\n");
2343 if (host->pdata->bus_hz) {
2344 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2347 "Unable to set bus rate to %uHz\n",
2348 host->pdata->bus_hz);
2350 host->bus_hz = clk_get_rate(host->ciu_clk);
2353 if (!host->bus_hz) {
2355 "Platform data must supply bus speed\n");
2360 if (drv_data && drv_data->init) {
2361 ret = drv_data->init(host);
2364 "implementation specific init failed\n");
2369 if (drv_data && drv_data->setup_clock) {
2370 ret = drv_data->setup_clock(host);
2373 "implementation specific clock setup failed\n");
2378 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2379 if (IS_ERR(host->vmmc)) {
2380 ret = PTR_ERR(host->vmmc);
2381 if (ret == -EPROBE_DEFER)
2384 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2387 ret = regulator_enable(host->vmmc);
2389 if (ret != -EPROBE_DEFER)
2391 "regulator_enable fail: %d\n", ret);
2396 host->quirks = host->pdata->quirks;
2398 spin_lock_init(&host->lock);
2399 INIT_LIST_HEAD(&host->queue);
2402 * Get the host data width - this assumes that HCON has been set with
2403 * the correct values.
2405 i = (mci_readl(host, HCON) >> 7) & 0x7;
2407 host->push_data = dw_mci_push_data16;
2408 host->pull_data = dw_mci_pull_data16;
2410 host->data_shift = 1;
2411 } else if (i == 2) {
2412 host->push_data = dw_mci_push_data64;
2413 host->pull_data = dw_mci_pull_data64;
2415 host->data_shift = 3;
2417 /* Check for a reserved value, and warn if it is */
2419 "HCON reports a reserved host data width!\n"
2420 "Defaulting to 32-bit access.\n");
2421 host->push_data = dw_mci_push_data32;
2422 host->pull_data = dw_mci_pull_data32;
2424 host->data_shift = 2;
2427 /* Reset all blocks */
2428 if (!dw_mci_ctrl_all_reset(host))
2431 host->dma_ops = host->pdata->dma_ops;
2432 dw_mci_init_dma(host);
2434 /* Clear the interrupts for the host controller */
2435 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2436 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2438 /* Put in max timeout */
2439 mci_writel(host, TMOUT, 0xFFFFFFFF);
2442 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2443 * Tx Mark = fifo_size / 2 DMA Size = 8
2445 if (!host->pdata->fifo_depth) {
2447 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2448 * have been overwritten by the bootloader, just like we're
2449 * about to do, so if you know the value for your hardware, you
2450 * should put it in the platform data.
2452 fifo_size = mci_readl(host, FIFOTH);
2453 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2455 fifo_size = host->pdata->fifo_depth;
2457 host->fifo_depth = fifo_size;
2459 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2460 mci_writel(host, FIFOTH, host->fifoth_val);
2462 /* disable clock to CIU */
2463 mci_writel(host, CLKENA, 0);
2464 mci_writel(host, CLKSRC, 0);
2467 * In 2.40a spec, Data offset is changed.
2468 * Need to check the version-id and set data-offset for DATA register.
2470 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2471 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2473 if (host->verid < DW_MMC_240A)
2474 host->data_offset = DATA_OFFSET;
2476 host->data_offset = DATA_240A_OFFSET;
2478 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2479 host->card_workqueue = alloc_workqueue("dw-mci-card",
2481 if (!host->card_workqueue) {
2485 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2486 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2487 host->irq_flags, "dw-mci", host);
2491 if (host->pdata->num_slots)
2492 host->num_slots = host->pdata->num_slots;
2494 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2497 * Enable interrupts for command done, data over, data empty, card det,
2498 * receive ready and error such as transmit, receive timeout, crc error
2500 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2501 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2502 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2503 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2504 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2506 dev_info(host->dev, "DW MMC controller at irq %d, "
2507 "%d bit host data width, "
2509 host->irq, width, fifo_size);
2511 /* We need at least one slot to succeed */
2512 for (i = 0; i < host->num_slots; i++) {
2513 ret = dw_mci_init_slot(host, i);
2515 dev_dbg(host->dev, "slot %d init failed\n", i);
2521 dev_info(host->dev, "%d slots initialized\n", init_slots);
2523 dev_dbg(host->dev, "attempted to initialize %d slots, "
2524 "but failed on all\n", host->num_slots);
2528 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2529 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2534 destroy_workqueue(host->card_workqueue);
2537 if (host->use_dma && host->dma_ops->exit)
2538 host->dma_ops->exit(host);
2540 regulator_disable(host->vmmc);
2543 if (!IS_ERR(host->ciu_clk))
2544 clk_disable_unprepare(host->ciu_clk);
2547 if (!IS_ERR(host->biu_clk))
2548 clk_disable_unprepare(host->biu_clk);
2552 EXPORT_SYMBOL(dw_mci_probe);
2554 void dw_mci_remove(struct dw_mci *host)
2558 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2559 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2561 for (i = 0; i < host->num_slots; i++) {
2562 dev_dbg(host->dev, "remove slot %d\n", i);
2564 dw_mci_cleanup_slot(host->slot[i], i);
2567 /* disable clock to CIU */
2568 mci_writel(host, CLKENA, 0);
2569 mci_writel(host, CLKSRC, 0);
2571 destroy_workqueue(host->card_workqueue);
2573 if (host->use_dma && host->dma_ops->exit)
2574 host->dma_ops->exit(host);
2577 regulator_disable(host->vmmc);
2579 if (!IS_ERR(host->ciu_clk))
2580 clk_disable_unprepare(host->ciu_clk);
2582 if (!IS_ERR(host->biu_clk))
2583 clk_disable_unprepare(host->biu_clk);
2585 EXPORT_SYMBOL(dw_mci_remove);
2589 #ifdef CONFIG_PM_SLEEP
2591 * TODO: we should probably disable the clock to the card in the suspend path.
2593 int dw_mci_suspend(struct dw_mci *host)
2596 regulator_disable(host->vmmc);
2600 EXPORT_SYMBOL(dw_mci_suspend);
2602 int dw_mci_resume(struct dw_mci *host)
2607 ret = regulator_enable(host->vmmc);
2610 "failed to enable regulator: %d\n", ret);
2615 if (!dw_mci_ctrl_all_reset(host)) {
2620 if (host->use_dma && host->dma_ops->init)
2621 host->dma_ops->init(host);
2624 * Restore the initial value at FIFOTH register
2625 * And Invalidate the prev_blksz with zero
2627 mci_writel(host, FIFOTH, host->fifoth_val);
2628 host->prev_blksz = 0;
2630 /* Put in max timeout */
2631 mci_writel(host, TMOUT, 0xFFFFFFFF);
2633 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2634 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2635 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2636 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2637 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2639 for (i = 0; i < host->num_slots; i++) {
2640 struct dw_mci_slot *slot = host->slot[i];
2643 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2644 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2645 dw_mci_setup_bus(slot, true);
2650 EXPORT_SYMBOL(dw_mci_resume);
2651 #endif /* CONFIG_PM_SLEEP */
2653 static int __init dw_mci_init(void)
2655 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2659 static void __exit dw_mci_exit(void)
2663 module_init(dw_mci_init);
2664 module_exit(dw_mci_exit);
2666 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2667 MODULE_AUTHOR("NXP Semiconductor VietNam");
2668 MODULE_AUTHOR("Imagination Technologies Ltd");
2669 MODULE_LICENSE("GPL v2");