2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/log2.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/card.h>
24 #include <linux/amba/bus.h>
25 #include <linux/clk.h>
26 #include <linux/scatterlist.h>
27 #include <linux/gpio.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/dmaengine.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/amba/mmci.h>
32 #include <linux/pm_runtime.h>
34 #include <asm/div64.h>
36 #include <asm/sizes.h>
40 #define DRIVER_NAME "mmci-pl18x"
42 static unsigned int fmax = 515633;
45 * struct variant_data - MMCI variant-specific quirks
46 * @clkreg: default value for MCICLOCK register
47 * @clkreg_enable: enable value for MMCICLOCK register
48 * @datalength_bits: number of bits in the MMCIDATALENGTH register
49 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
50 * is asserted (likewise for RX)
51 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
52 * is asserted (likewise for RX)
53 * @sdio: variant supports SDIO
54 * @st_clkdiv: true if using a ST-specific clock divider algorithm
55 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
59 unsigned int clkreg_enable;
60 unsigned int datalength_bits;
61 unsigned int fifosize;
62 unsigned int fifohalfsize;
65 bool blksz_datactrl16;
68 static struct variant_data variant_arm = {
70 .fifohalfsize = 8 * 4,
71 .datalength_bits = 16,
74 static struct variant_data variant_arm_extended_fifo = {
76 .fifohalfsize = 64 * 4,
77 .datalength_bits = 16,
80 static struct variant_data variant_u300 = {
82 .fifohalfsize = 8 * 4,
83 .clkreg_enable = MCI_ST_U300_HWFCEN,
84 .datalength_bits = 16,
88 static struct variant_data variant_ux500 = {
90 .fifohalfsize = 8 * 4,
91 .clkreg = MCI_CLK_ENABLE,
92 .clkreg_enable = MCI_ST_UX500_HWFCEN,
93 .datalength_bits = 24,
98 static struct variant_data variant_ux500v2 = {
100 .fifohalfsize = 8 * 4,
101 .clkreg = MCI_CLK_ENABLE,
102 .clkreg_enable = MCI_ST_UX500_HWFCEN,
103 .datalength_bits = 24,
106 .blksz_datactrl16 = true,
110 * This must be called with host->lock held
112 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
114 struct variant_data *variant = host->variant;
115 u32 clk = variant->clkreg;
118 if (desired >= host->mclk) {
119 clk = MCI_CLK_BYPASS;
120 if (variant->st_clkdiv)
121 clk |= MCI_ST_UX500_NEG_EDGE;
122 host->cclk = host->mclk;
123 } else if (variant->st_clkdiv) {
125 * DB8500 TRM says f = mclk / (clkdiv + 2)
126 * => clkdiv = (mclk / f) - 2
127 * Round the divider up so we don't exceed the max
130 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
133 host->cclk = host->mclk / (clk + 2);
136 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
137 * => clkdiv = mclk / (2 * f) - 1
139 clk = host->mclk / (2 * desired) - 1;
142 host->cclk = host->mclk / (2 * (clk + 1));
145 clk |= variant->clkreg_enable;
146 clk |= MCI_CLK_ENABLE;
147 /* This hasn't proven to be worthwhile */
148 /* clk |= MCI_CLK_PWRSAVE; */
151 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
153 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
154 clk |= MCI_ST_8BIT_BUS;
156 writel(clk, host->base + MMCICLOCK);
160 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
162 writel(0, host->base + MMCICOMMAND);
170 * Need to drop the host lock here; mmc_request_done may call
171 * back into the driver...
173 spin_unlock(&host->lock);
174 pm_runtime_put(mmc_dev(host->mmc));
175 mmc_request_done(host->mmc, mrq);
176 spin_lock(&host->lock);
179 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
181 void __iomem *base = host->base;
183 if (host->singleirq) {
184 unsigned int mask0 = readl(base + MMCIMASK0);
186 mask0 &= ~MCI_IRQ1MASK;
189 writel(mask0, base + MMCIMASK0);
192 writel(mask, base + MMCIMASK1);
195 static void mmci_stop_data(struct mmci_host *host)
197 writel(0, host->base + MMCIDATACTRL);
198 mmci_set_mask1(host, 0);
202 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
204 unsigned int flags = SG_MITER_ATOMIC;
206 if (data->flags & MMC_DATA_READ)
207 flags |= SG_MITER_TO_SG;
209 flags |= SG_MITER_FROM_SG;
211 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
215 * All the DMA operation mode stuff goes inside this ifdef.
216 * This assumes that you have a generic DMA device interface,
217 * no custom DMA interfaces are supported.
219 #ifdef CONFIG_DMA_ENGINE
220 static void __devinit mmci_dma_setup(struct mmci_host *host)
222 struct mmci_platform_data *plat = host->plat;
223 const char *rxname, *txname;
226 if (!plat || !plat->dma_filter) {
227 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
231 /* initialize pre request cookie */
232 host->next_data.cookie = 1;
234 /* Try to acquire a generic DMA engine slave channel */
236 dma_cap_set(DMA_SLAVE, mask);
239 * If only an RX channel is specified, the driver will
240 * attempt to use it bidirectionally, however if it is
241 * is specified but cannot be located, DMA will be disabled.
243 if (plat->dma_rx_param) {
244 host->dma_rx_channel = dma_request_channel(mask,
247 /* E.g if no DMA hardware is present */
248 if (!host->dma_rx_channel)
249 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
252 if (plat->dma_tx_param) {
253 host->dma_tx_channel = dma_request_channel(mask,
256 if (!host->dma_tx_channel)
257 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
259 host->dma_tx_channel = host->dma_rx_channel;
262 if (host->dma_rx_channel)
263 rxname = dma_chan_name(host->dma_rx_channel);
267 if (host->dma_tx_channel)
268 txname = dma_chan_name(host->dma_tx_channel);
272 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
276 * Limit the maximum segment size in any SG entry according to
277 * the parameters of the DMA engine device.
279 if (host->dma_tx_channel) {
280 struct device *dev = host->dma_tx_channel->device->dev;
281 unsigned int max_seg_size = dma_get_max_seg_size(dev);
283 if (max_seg_size < host->mmc->max_seg_size)
284 host->mmc->max_seg_size = max_seg_size;
286 if (host->dma_rx_channel) {
287 struct device *dev = host->dma_rx_channel->device->dev;
288 unsigned int max_seg_size = dma_get_max_seg_size(dev);
290 if (max_seg_size < host->mmc->max_seg_size)
291 host->mmc->max_seg_size = max_seg_size;
296 * This is used in __devinit or __devexit so inline it
297 * so it can be discarded.
299 static inline void mmci_dma_release(struct mmci_host *host)
301 struct mmci_platform_data *plat = host->plat;
303 if (host->dma_rx_channel)
304 dma_release_channel(host->dma_rx_channel);
305 if (host->dma_tx_channel && plat->dma_tx_param)
306 dma_release_channel(host->dma_tx_channel);
307 host->dma_rx_channel = host->dma_tx_channel = NULL;
310 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
312 struct dma_chan *chan = host->dma_current;
313 enum dma_data_direction dir;
317 /* Wait up to 1ms for the DMA to complete */
319 status = readl(host->base + MMCISTATUS);
320 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
326 * Check to see whether we still have some data left in the FIFO -
327 * this catches DMA controllers which are unable to monitor the
328 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
329 * contiguous buffers. On TX, we'll get a FIFO underrun error.
331 if (status & MCI_RXDATAAVLBLMASK) {
332 dmaengine_terminate_all(chan);
337 if (data->flags & MMC_DATA_WRITE) {
340 dir = DMA_FROM_DEVICE;
343 if (!data->host_cookie)
344 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
347 * Use of DMA with scatter-gather is impossible.
348 * Give up with DMA and switch back to PIO mode.
350 if (status & MCI_RXDATAAVLBLMASK) {
351 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
352 mmci_dma_release(host);
356 static void mmci_dma_data_error(struct mmci_host *host)
358 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
359 dmaengine_terminate_all(host->dma_current);
362 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
363 struct mmci_host_next *next)
365 struct variant_data *variant = host->variant;
366 struct dma_slave_config conf = {
367 .src_addr = host->phybase + MMCIFIFO,
368 .dst_addr = host->phybase + MMCIFIFO,
369 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
370 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
371 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
372 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
374 struct dma_chan *chan;
375 struct dma_device *device;
376 struct dma_async_tx_descriptor *desc;
379 /* Check if next job is already prepared */
380 if (data->host_cookie && !next &&
381 host->dma_current && host->dma_desc_current)
385 host->dma_current = NULL;
386 host->dma_desc_current = NULL;
389 if (data->flags & MMC_DATA_READ) {
390 conf.direction = DMA_FROM_DEVICE;
391 chan = host->dma_rx_channel;
393 conf.direction = DMA_TO_DEVICE;
394 chan = host->dma_tx_channel;
397 /* If there's no DMA channel, fall back to PIO */
401 /* If less than or equal to the fifo size, don't bother with DMA */
402 if (data->blksz * data->blocks <= variant->fifosize)
405 device = chan->device;
406 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
410 dmaengine_slave_config(chan, &conf);
411 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
412 conf.direction, DMA_CTRL_ACK);
417 next->dma_chan = chan;
418 next->dma_desc = desc;
420 host->dma_current = chan;
421 host->dma_desc_current = desc;
428 dmaengine_terminate_all(chan);
429 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
433 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
436 struct mmc_data *data = host->data;
438 ret = mmci_dma_prep_data(host, host->data, NULL);
442 /* Okay, go for it. */
443 dev_vdbg(mmc_dev(host->mmc),
444 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
445 data->sg_len, data->blksz, data->blocks, data->flags);
446 dmaengine_submit(host->dma_desc_current);
447 dma_async_issue_pending(host->dma_current);
449 datactrl |= MCI_DPSM_DMAENABLE;
451 /* Trigger the DMA transfer */
452 writel(datactrl, host->base + MMCIDATACTRL);
455 * Let the MMCI say when the data is ended and it's time
456 * to fire next DMA request. When that happens, MMCI will
457 * call mmci_data_end()
459 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
460 host->base + MMCIMASK0);
464 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
466 struct mmci_host_next *next = &host->next_data;
468 if (data->host_cookie && data->host_cookie != next->cookie) {
469 pr_warning("[%s] invalid cookie: data->host_cookie %d"
470 " host->next_data.cookie %d\n",
471 __func__, data->host_cookie, host->next_data.cookie);
472 data->host_cookie = 0;
475 if (!data->host_cookie)
478 host->dma_desc_current = next->dma_desc;
479 host->dma_current = next->dma_chan;
481 next->dma_desc = NULL;
482 next->dma_chan = NULL;
485 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
488 struct mmci_host *host = mmc_priv(mmc);
489 struct mmc_data *data = mrq->data;
490 struct mmci_host_next *nd = &host->next_data;
495 if (data->host_cookie) {
496 data->host_cookie = 0;
500 /* if config for dma */
501 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
502 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
503 if (mmci_dma_prep_data(host, data, nd))
504 data->host_cookie = 0;
506 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
510 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
513 struct mmci_host *host = mmc_priv(mmc);
514 struct mmc_data *data = mrq->data;
515 struct dma_chan *chan;
516 enum dma_data_direction dir;
521 if (data->flags & MMC_DATA_READ) {
522 dir = DMA_FROM_DEVICE;
523 chan = host->dma_rx_channel;
526 chan = host->dma_tx_channel;
530 /* if config for dma */
533 dmaengine_terminate_all(chan);
534 if (data->host_cookie)
535 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
537 mrq->data->host_cookie = 0;
542 /* Blank functions if the DMA engine is not available */
543 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
546 static inline void mmci_dma_setup(struct mmci_host *host)
550 static inline void mmci_dma_release(struct mmci_host *host)
554 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
558 static inline void mmci_dma_data_error(struct mmci_host *host)
562 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
567 #define mmci_pre_request NULL
568 #define mmci_post_request NULL
572 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
574 struct variant_data *variant = host->variant;
575 unsigned int datactrl, timeout, irqmask;
576 unsigned long long clks;
580 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
581 data->blksz, data->blocks, data->flags);
584 host->size = data->blksz * data->blocks;
585 data->bytes_xfered = 0;
587 clks = (unsigned long long)data->timeout_ns * host->cclk;
588 do_div(clks, 1000000000UL);
590 timeout = data->timeout_clks + (unsigned int)clks;
593 writel(timeout, base + MMCIDATATIMER);
594 writel(host->size, base + MMCIDATALENGTH);
596 blksz_bits = ffs(data->blksz) - 1;
597 BUG_ON(1 << blksz_bits != data->blksz);
599 if (variant->blksz_datactrl16)
600 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
602 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
604 if (data->flags & MMC_DATA_READ)
605 datactrl |= MCI_DPSM_DIRECTION;
608 * Attempt to use DMA operation mode, if this
609 * should fail, fall back to PIO mode
611 if (!mmci_dma_start_data(host, datactrl))
614 /* IRQ mode, map the SG list for CPU reading/writing */
615 mmci_init_sg(host, data);
617 if (data->flags & MMC_DATA_READ) {
618 irqmask = MCI_RXFIFOHALFFULLMASK;
621 * If we have less than the fifo 'half-full' threshold to
622 * transfer, trigger a PIO interrupt as soon as any data
625 if (host->size < variant->fifohalfsize)
626 irqmask |= MCI_RXDATAAVLBLMASK;
629 * We don't actually need to include "FIFO empty" here
630 * since its implicit in "FIFO half empty".
632 irqmask = MCI_TXFIFOHALFEMPTYMASK;
635 /* The ST Micro variants has a special bit to enable SDIO */
636 if (variant->sdio && host->mmc->card)
637 if (mmc_card_sdio(host->mmc->card))
638 datactrl |= MCI_ST_DPSM_SDIOEN;
640 writel(datactrl, base + MMCIDATACTRL);
641 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
642 mmci_set_mask1(host, irqmask);
646 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
648 void __iomem *base = host->base;
650 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
651 cmd->opcode, cmd->arg, cmd->flags);
653 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
654 writel(0, base + MMCICOMMAND);
658 c |= cmd->opcode | MCI_CPSM_ENABLE;
659 if (cmd->flags & MMC_RSP_PRESENT) {
660 if (cmd->flags & MMC_RSP_136)
661 c |= MCI_CPSM_LONGRSP;
662 c |= MCI_CPSM_RESPONSE;
665 c |= MCI_CPSM_INTERRUPT;
669 writel(cmd->arg, base + MMCIARGUMENT);
670 writel(c, base + MMCICOMMAND);
674 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
677 /* First check for errors */
678 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
679 MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
682 /* Terminate the DMA transfer */
683 if (dma_inprogress(host))
684 mmci_dma_data_error(host);
687 * Calculate how far we are into the transfer. Note that
688 * the data counter gives the number of bytes transferred
689 * on the MMC bus, not on the host side. On reads, this
690 * can be as much as a FIFO-worth of data ahead. This
691 * matters for FIFO overruns only.
693 remain = readl(host->base + MMCIDATACNT);
694 success = data->blksz * data->blocks - remain;
696 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
698 if (status & MCI_DATACRCFAIL) {
699 /* Last block was not successful */
701 data->error = -EILSEQ;
702 } else if (status & MCI_DATATIMEOUT) {
703 data->error = -ETIMEDOUT;
704 } else if (status & MCI_STARTBITERR) {
705 data->error = -ECOMM;
706 } else if (status & MCI_TXUNDERRUN) {
708 } else if (status & MCI_RXOVERRUN) {
709 if (success > host->variant->fifosize)
710 success -= host->variant->fifosize;
715 data->bytes_xfered = round_down(success, data->blksz);
718 if (status & MCI_DATABLOCKEND)
719 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
721 if (status & MCI_DATAEND || data->error) {
722 if (dma_inprogress(host))
723 mmci_dma_unmap(host, data);
724 mmci_stop_data(host);
727 /* The error clause is handled above, success! */
728 data->bytes_xfered = data->blksz * data->blocks;
731 mmci_request_end(host, data->mrq);
733 mmci_start_command(host, data->stop, 0);
739 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
742 void __iomem *base = host->base;
746 if (status & MCI_CMDTIMEOUT) {
747 cmd->error = -ETIMEDOUT;
748 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
749 cmd->error = -EILSEQ;
751 cmd->resp[0] = readl(base + MMCIRESPONSE0);
752 cmd->resp[1] = readl(base + MMCIRESPONSE1);
753 cmd->resp[2] = readl(base + MMCIRESPONSE2);
754 cmd->resp[3] = readl(base + MMCIRESPONSE3);
757 if (!cmd->data || cmd->error) {
759 /* Terminate the DMA transfer */
760 if (dma_inprogress(host))
761 mmci_dma_data_error(host);
762 mmci_stop_data(host);
764 mmci_request_end(host, cmd->mrq);
765 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
766 mmci_start_data(host, cmd->data);
770 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
772 void __iomem *base = host->base;
775 int host_remain = host->size;
778 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
786 readsl(base + MMCIFIFO, ptr, count >> 2);
790 host_remain -= count;
795 status = readl(base + MMCISTATUS);
796 } while (status & MCI_RXDATAAVLBL);
801 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
803 struct variant_data *variant = host->variant;
804 void __iomem *base = host->base;
808 unsigned int count, maxcnt;
810 maxcnt = status & MCI_TXFIFOEMPTY ?
811 variant->fifosize : variant->fifohalfsize;
812 count = min(remain, maxcnt);
815 * The ST Micro variant for SDIO transfer sizes
816 * less then 8 bytes should have clock H/W flow
820 mmc_card_sdio(host->mmc->card)) {
822 writel(readl(host->base + MMCICLOCK) &
823 ~variant->clkreg_enable,
824 host->base + MMCICLOCK);
826 writel(readl(host->base + MMCICLOCK) |
827 variant->clkreg_enable,
828 host->base + MMCICLOCK);
832 * SDIO especially may want to send something that is
833 * not divisible by 4 (as opposed to card sectors
834 * etc), and the FIFO only accept full 32-bit writes.
835 * So compensate by adding +3 on the count, a single
836 * byte become a 32bit write, 7 bytes will be two
839 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
847 status = readl(base + MMCISTATUS);
848 } while (status & MCI_TXFIFOHALFEMPTY);
854 * PIO data transfer IRQ handler.
856 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
858 struct mmci_host *host = dev_id;
859 struct sg_mapping_iter *sg_miter = &host->sg_miter;
860 struct variant_data *variant = host->variant;
861 void __iomem *base = host->base;
865 status = readl(base + MMCISTATUS);
867 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
869 local_irq_save(flags);
872 unsigned int remain, len;
876 * For write, we only need to test the half-empty flag
877 * here - if the FIFO is completely empty, then by
878 * definition it is more than half empty.
880 * For read, check for data available.
882 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
885 if (!sg_miter_next(sg_miter))
888 buffer = sg_miter->addr;
889 remain = sg_miter->length;
892 if (status & MCI_RXACTIVE)
893 len = mmci_pio_read(host, buffer, remain);
894 if (status & MCI_TXACTIVE)
895 len = mmci_pio_write(host, buffer, remain, status);
897 sg_miter->consumed = len;
905 status = readl(base + MMCISTATUS);
908 sg_miter_stop(sg_miter);
910 local_irq_restore(flags);
913 * If we have less than the fifo 'half-full' threshold to transfer,
914 * trigger a PIO interrupt as soon as any data is available.
916 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
917 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
920 * If we run out of data, disable the data IRQs; this
921 * prevents a race where the FIFO becomes empty before
922 * the chip itself has disabled the data path, and
923 * stops us racing with our data end IRQ.
925 if (host->size == 0) {
926 mmci_set_mask1(host, 0);
927 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
934 * Handle completion of command and data transfers.
936 static irqreturn_t mmci_irq(int irq, void *dev_id)
938 struct mmci_host *host = dev_id;
942 spin_lock(&host->lock);
945 struct mmc_command *cmd;
946 struct mmc_data *data;
948 status = readl(host->base + MMCISTATUS);
950 if (host->singleirq) {
951 if (status & readl(host->base + MMCIMASK1))
952 mmci_pio_irq(irq, dev_id);
954 status &= ~MCI_IRQ1MASK;
957 status &= readl(host->base + MMCIMASK0);
958 writel(status, host->base + MMCICLEAR);
960 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
963 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
964 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
965 MCI_DATABLOCKEND) && data)
966 mmci_data_irq(host, data, status);
969 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
970 mmci_cmd_irq(host, cmd, status);
975 spin_unlock(&host->lock);
977 return IRQ_RETVAL(ret);
980 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
982 struct mmci_host *host = mmc_priv(mmc);
985 WARN_ON(host->mrq != NULL);
987 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
988 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
990 mrq->cmd->error = -EINVAL;
991 mmc_request_done(mmc, mrq);
995 pm_runtime_get_sync(mmc_dev(mmc));
997 spin_lock_irqsave(&host->lock, flags);
1002 mmci_get_next_data(host, mrq->data);
1004 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1005 mmci_start_data(host, mrq->data);
1007 mmci_start_command(host, mrq->cmd, 0);
1009 spin_unlock_irqrestore(&host->lock, flags);
1012 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1014 struct mmci_host *host = mmc_priv(mmc);
1016 unsigned long flags;
1019 switch (ios->power_mode) {
1022 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
1026 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
1028 dev_err(mmc_dev(mmc), "unable to set OCR\n");
1030 * The .set_ios() function in the mmc_host_ops
1031 * struct return void, and failing to set the
1032 * power should be rare so we print an error
1038 if (host->plat->vdd_handler)
1039 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
1041 /* The ST version does not have this, fall through to POWER_ON */
1042 if (host->hw_designer != AMBA_VENDOR_ST) {
1051 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
1052 if (host->hw_designer != AMBA_VENDOR_ST)
1056 * The ST Micro variant use the ROD bit for something
1057 * else and only has OD (Open Drain).
1063 spin_lock_irqsave(&host->lock, flags);
1065 mmci_set_clkreg(host, ios->clock);
1067 if (host->pwr != pwr) {
1069 writel(pwr, host->base + MMCIPOWER);
1072 spin_unlock_irqrestore(&host->lock, flags);
1075 static int mmci_get_ro(struct mmc_host *mmc)
1077 struct mmci_host *host = mmc_priv(mmc);
1079 if (host->gpio_wp == -ENOSYS)
1082 return gpio_get_value_cansleep(host->gpio_wp);
1085 static int mmci_get_cd(struct mmc_host *mmc)
1087 struct mmci_host *host = mmc_priv(mmc);
1088 struct mmci_platform_data *plat = host->plat;
1089 unsigned int status;
1091 if (host->gpio_cd == -ENOSYS) {
1093 return 1; /* Assume always present */
1095 status = plat->status(mmc_dev(host->mmc));
1097 status = !!gpio_get_value_cansleep(host->gpio_cd)
1101 * Use positive logic throughout - status is zero for no card,
1102 * non-zero for card inserted.
1107 static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
1109 struct mmci_host *host = dev_id;
1111 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1116 static const struct mmc_host_ops mmci_ops = {
1117 .request = mmci_request,
1118 .pre_req = mmci_pre_request,
1119 .post_req = mmci_post_request,
1120 .set_ios = mmci_set_ios,
1121 .get_ro = mmci_get_ro,
1122 .get_cd = mmci_get_cd,
1125 static int __devinit mmci_probe(struct amba_device *dev,
1126 const struct amba_id *id)
1128 struct mmci_platform_data *plat = dev->dev.platform_data;
1129 struct variant_data *variant = id->data;
1130 struct mmci_host *host;
1131 struct mmc_host *mmc;
1134 /* must have platform data */
1140 ret = amba_request_regions(dev, DRIVER_NAME);
1144 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1150 host = mmc_priv(mmc);
1153 host->gpio_wp = -ENOSYS;
1154 host->gpio_cd = -ENOSYS;
1155 host->gpio_cd_irq = -1;
1157 host->hw_designer = amba_manf(dev);
1158 host->hw_revision = amba_rev(dev);
1159 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1160 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1162 host->clk = clk_get(&dev->dev, NULL);
1163 if (IS_ERR(host->clk)) {
1164 ret = PTR_ERR(host->clk);
1169 ret = clk_prepare(host->clk);
1173 ret = clk_enable(host->clk);
1178 host->variant = variant;
1179 host->mclk = clk_get_rate(host->clk);
1181 * According to the spec, mclk is max 100 MHz,
1182 * so we try to adjust the clock down to this,
1185 if (host->mclk > 100000000) {
1186 ret = clk_set_rate(host->clk, 100000000);
1189 host->mclk = clk_get_rate(host->clk);
1190 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1193 host->phybase = dev->res.start;
1194 host->base = ioremap(dev->res.start, resource_size(&dev->res));
1200 mmc->ops = &mmci_ops;
1202 * The ARM and ST versions of the block have slightly different
1203 * clock divider equations which means that the minimum divider
1206 if (variant->st_clkdiv)
1207 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1209 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1211 * If the platform data supplies a maximum operating
1212 * frequency, this takes precedence. Else, we fall back
1213 * to using the module parameter, which has a (low)
1214 * default value in case it is not specified. Either
1215 * value must not exceed the clock rate into the block,
1219 mmc->f_max = min(host->mclk, plat->f_max);
1221 mmc->f_max = min(host->mclk, fmax);
1222 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1224 #ifdef CONFIG_REGULATOR
1225 /* If we're using the regulator framework, try to fetch a regulator */
1226 host->vcc = regulator_get(&dev->dev, "vmmc");
1227 if (IS_ERR(host->vcc))
1230 int mask = mmc_regulator_get_ocrmask(host->vcc);
1233 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
1236 host->mmc->ocr_avail = (u32) mask;
1239 "Provided ocr_mask/setpower will not be used "
1240 "(using regulator instead)\n");
1244 /* Fall back to platform data if no regulator is found */
1245 if (host->vcc == NULL)
1246 mmc->ocr_avail = plat->ocr_mask;
1247 mmc->caps = plat->capabilities;
1252 mmc->max_segs = NR_SG;
1255 * Since only a certain number of bits are valid in the data length
1256 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1259 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1262 * Set the maximum segment size. Since we aren't doing DMA
1263 * (yet) we are only limited by the data length register.
1265 mmc->max_seg_size = mmc->max_req_size;
1268 * Block size can be up to 2048 bytes, but must be a power of two.
1270 mmc->max_blk_size = 2048;
1273 * No limit on the number of blocks transferred.
1275 mmc->max_blk_count = mmc->max_req_size;
1277 spin_lock_init(&host->lock);
1279 writel(0, host->base + MMCIMASK0);
1280 writel(0, host->base + MMCIMASK1);
1281 writel(0xfff, host->base + MMCICLEAR);
1283 if (gpio_is_valid(plat->gpio_cd)) {
1284 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1286 ret = gpio_direction_input(plat->gpio_cd);
1288 host->gpio_cd = plat->gpio_cd;
1289 else if (ret != -ENOSYS)
1293 * A gpio pin that will detect cards when inserted and removed
1294 * will most likely want to trigger on the edges if it is
1295 * 0 when ejected and 1 when inserted (or mutatis mutandis
1296 * for the inverted case) so we request triggers on both
1299 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1301 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1302 DRIVER_NAME " (cd)", host);
1304 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1306 if (gpio_is_valid(plat->gpio_wp)) {
1307 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1309 ret = gpio_direction_input(plat->gpio_wp);
1311 host->gpio_wp = plat->gpio_wp;
1312 else if (ret != -ENOSYS)
1316 if ((host->plat->status || host->gpio_cd != -ENOSYS)
1317 && host->gpio_cd_irq < 0)
1318 mmc->caps |= MMC_CAP_NEEDS_POLL;
1320 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1324 if (dev->irq[1] == NO_IRQ)
1325 host->singleirq = true;
1327 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1328 DRIVER_NAME " (pio)", host);
1333 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1335 amba_set_drvdata(dev, mmc);
1337 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1338 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1339 amba_rev(dev), (unsigned long long)dev->res.start,
1340 dev->irq[0], dev->irq[1]);
1342 mmci_dma_setup(host);
1344 pm_runtime_put(&dev->dev);
1351 free_irq(dev->irq[0], host);
1353 if (host->gpio_wp != -ENOSYS)
1354 gpio_free(host->gpio_wp);
1356 if (host->gpio_cd_irq >= 0)
1357 free_irq(host->gpio_cd_irq, host);
1358 if (host->gpio_cd != -ENOSYS)
1359 gpio_free(host->gpio_cd);
1361 iounmap(host->base);
1363 clk_disable(host->clk);
1365 clk_unprepare(host->clk);
1371 amba_release_regions(dev);
1376 static int __devexit mmci_remove(struct amba_device *dev)
1378 struct mmc_host *mmc = amba_get_drvdata(dev);
1380 amba_set_drvdata(dev, NULL);
1383 struct mmci_host *host = mmc_priv(mmc);
1386 * Undo pm_runtime_put() in probe. We use the _sync
1387 * version here so that we can access the primecell.
1389 pm_runtime_get_sync(&dev->dev);
1391 mmc_remove_host(mmc);
1393 writel(0, host->base + MMCIMASK0);
1394 writel(0, host->base + MMCIMASK1);
1396 writel(0, host->base + MMCICOMMAND);
1397 writel(0, host->base + MMCIDATACTRL);
1399 mmci_dma_release(host);
1400 free_irq(dev->irq[0], host);
1401 if (!host->singleirq)
1402 free_irq(dev->irq[1], host);
1404 if (host->gpio_wp != -ENOSYS)
1405 gpio_free(host->gpio_wp);
1406 if (host->gpio_cd_irq >= 0)
1407 free_irq(host->gpio_cd_irq, host);
1408 if (host->gpio_cd != -ENOSYS)
1409 gpio_free(host->gpio_cd);
1411 iounmap(host->base);
1412 clk_disable(host->clk);
1413 clk_unprepare(host->clk);
1417 mmc_regulator_set_ocr(mmc, host->vcc, 0);
1418 regulator_put(host->vcc);
1422 amba_release_regions(dev);
1429 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
1431 struct mmc_host *mmc = amba_get_drvdata(dev);
1435 struct mmci_host *host = mmc_priv(mmc);
1437 ret = mmc_suspend_host(mmc);
1439 writel(0, host->base + MMCIMASK0);
1445 static int mmci_resume(struct amba_device *dev)
1447 struct mmc_host *mmc = amba_get_drvdata(dev);
1451 struct mmci_host *host = mmc_priv(mmc);
1453 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1455 ret = mmc_resume_host(mmc);
1461 #define mmci_suspend NULL
1462 #define mmci_resume NULL
1465 static struct amba_id mmci_ids[] = {
1469 .data = &variant_arm,
1474 .data = &variant_arm_extended_fifo,
1479 .data = &variant_arm,
1481 /* ST Micro variants */
1485 .data = &variant_u300,
1490 .data = &variant_u300,
1495 .data = &variant_ux500,
1500 .data = &variant_ux500v2,
1505 MODULE_DEVICE_TABLE(amba, mmci_ids);
1507 static struct amba_driver mmci_driver = {
1509 .name = DRIVER_NAME,
1511 .probe = mmci_probe,
1512 .remove = __devexit_p(mmci_remove),
1513 .suspend = mmci_suspend,
1514 .resume = mmci_resume,
1515 .id_table = mmci_ids,
1518 static int __init mmci_init(void)
1520 return amba_driver_register(&mmci_driver);
1523 static void __exit mmci_exit(void)
1525 amba_driver_unregister(&mmci_driver);
1528 module_init(mmci_init);
1529 module_exit(mmci_exit);
1530 module_param(fmax, uint, 0444);
1532 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1533 MODULE_LICENSE("GPL");