Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-block.git] / drivers / mmc / host / mxcmmc.c
CommitLineData
d96be879
SH
1/*
2 * linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
3 *
4 * This is a driver for the SDHC controller found in Freescale MX2/MX3
5 * SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
6 * Unlike the hardware found on MX1, this hardware just works and does
3ad2f3fb 7 * not need all the quirks found in imxmmc.c, hence the separate driver.
d96be879
SH
8 *
9 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
10 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
11 *
12 * derived from pxamci.c by Russell King
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/ioport.h>
23#include <linux/platform_device.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/blkdev.h>
27#include <linux/dma-mapping.h>
28#include <linux/mmc/host.h>
29#include <linux/mmc/card.h>
30#include <linux/delay.h>
31#include <linux/clk.h>
32#include <linux/io.h>
33#include <linux/gpio.h>
74b66954 34#include <linux/regulator/consumer.h>
f53fbde4 35#include <linux/dmaengine.h>
d96be879
SH
36
37#include <asm/dma.h>
38#include <asm/irq.h>
39#include <asm/sizes.h>
40#include <mach/mmc.h>
41
f53fbde4 42#include <mach/dma.h>
edae8998 43#include <mach/hardware.h>
d96be879 44
9563b1db 45#define DRIVER_NAME "mxc-mmc"
d96be879
SH
46
47#define MMC_REG_STR_STP_CLK 0x00
48#define MMC_REG_STATUS 0x04
49#define MMC_REG_CLK_RATE 0x08
50#define MMC_REG_CMD_DAT_CONT 0x0C
51#define MMC_REG_RES_TO 0x10
52#define MMC_REG_READ_TO 0x14
53#define MMC_REG_BLK_LEN 0x18
54#define MMC_REG_NOB 0x1C
55#define MMC_REG_REV_NO 0x20
56#define MMC_REG_INT_CNTR 0x24
57#define MMC_REG_CMD 0x28
58#define MMC_REG_ARG 0x2C
59#define MMC_REG_RES_FIFO 0x34
60#define MMC_REG_BUFFER_ACCESS 0x38
61
62#define STR_STP_CLK_RESET (1 << 3)
63#define STR_STP_CLK_START_CLK (1 << 1)
64#define STR_STP_CLK_STOP_CLK (1 << 0)
65
66#define STATUS_CARD_INSERTION (1 << 31)
67#define STATUS_CARD_REMOVAL (1 << 30)
68#define STATUS_YBUF_EMPTY (1 << 29)
69#define STATUS_XBUF_EMPTY (1 << 28)
70#define STATUS_YBUF_FULL (1 << 27)
71#define STATUS_XBUF_FULL (1 << 26)
72#define STATUS_BUF_UND_RUN (1 << 25)
73#define STATUS_BUF_OVFL (1 << 24)
74#define STATUS_SDIO_INT_ACTIVE (1 << 14)
75#define STATUS_END_CMD_RESP (1 << 13)
76#define STATUS_WRITE_OP_DONE (1 << 12)
77#define STATUS_DATA_TRANS_DONE (1 << 11)
78#define STATUS_READ_OP_DONE (1 << 11)
79#define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10)
80#define STATUS_CARD_BUS_CLK_RUN (1 << 8)
81#define STATUS_BUF_READ_RDY (1 << 7)
82#define STATUS_BUF_WRITE_RDY (1 << 6)
83#define STATUS_RESP_CRC_ERR (1 << 5)
84#define STATUS_CRC_READ_ERR (1 << 3)
85#define STATUS_CRC_WRITE_ERR (1 << 2)
86#define STATUS_TIME_OUT_RESP (1 << 1)
87#define STATUS_TIME_OUT_READ (1 << 0)
88#define STATUS_ERR_MASK 0x2f
89
90#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12)
91#define CMD_DAT_CONT_STOP_READWAIT (1 << 11)
92#define CMD_DAT_CONT_START_READWAIT (1 << 10)
93#define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8)
94#define CMD_DAT_CONT_INIT (1 << 7)
95#define CMD_DAT_CONT_WRITE (1 << 4)
96#define CMD_DAT_CONT_DATA_ENABLE (1 << 3)
97#define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0)
98#define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0)
99#define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0)
100
101#define INT_SDIO_INT_WKP_EN (1 << 18)
102#define INT_CARD_INSERTION_WKP_EN (1 << 17)
103#define INT_CARD_REMOVAL_WKP_EN (1 << 16)
104#define INT_CARD_INSERTION_EN (1 << 15)
105#define INT_CARD_REMOVAL_EN (1 << 14)
106#define INT_SDIO_IRQ_EN (1 << 13)
107#define INT_DAT0_EN (1 << 12)
108#define INT_BUF_READ_EN (1 << 4)
109#define INT_BUF_WRITE_EN (1 << 3)
110#define INT_END_CMD_RES_EN (1 << 2)
111#define INT_WRITE_OP_DONE_EN (1 << 1)
112#define INT_READ_OP_EN (1 << 0)
113
114struct mxcmci_host {
115 struct mmc_host *mmc;
116 struct resource *res;
117 void __iomem *base;
118 int irq;
119 int detect_irq;
f53fbde4
SH
120 struct dma_chan *dma;
121 struct dma_async_tx_descriptor *desc;
d96be879 122 int do_dma;
16b3bf8c 123 int default_irq_mask;
f441b993 124 int use_sdio;
d96be879
SH
125 unsigned int power_mode;
126 struct imxmmc_platform_data *pdata;
127
128 struct mmc_request *req;
129 struct mmc_command *cmd;
130 struct mmc_data *data;
131
d96be879
SH
132 unsigned int datasize;
133 unsigned int dma_dir;
134
135 u16 rev_no;
136 unsigned int cmdat;
137
138 struct clk *clk;
139
140 int clock;
141
142 struct work_struct datawork;
f441b993 143 spinlock_t lock;
74b66954
AP
144
145 struct regulator *vcc;
f53fbde4
SH
146
147 int burstlen;
148 int dmareq;
149 struct dma_slave_config dma_slave_config;
150 struct imx_dma_data dma_data;
d96be879
SH
151};
152
18489fa2
MF
153static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
154
74b66954
AP
155static inline void mxcmci_init_ocr(struct mxcmci_host *host)
156{
74b66954
AP
157 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
158
159 if (IS_ERR(host->vcc)) {
160 host->vcc = NULL;
161 } else {
162 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
163 if (host->pdata && host->pdata->ocr_avail)
164 dev_warn(mmc_dev(host->mmc),
165 "pdata->ocr_avail will not be used\n");
166 }
d078d242 167
74b66954
AP
168 if (host->vcc == NULL) {
169 /* fall-back to platform data */
170 if (host->pdata && host->pdata->ocr_avail)
171 host->mmc->ocr_avail = host->pdata->ocr_avail;
172 else
173 host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
174 }
175}
176
d078d242
AP
177static inline void mxcmci_set_power(struct mxcmci_host *host,
178 unsigned char power_mode,
179 unsigned int vdd)
74b66954 180{
d078d242
AP
181 if (host->vcc) {
182 if (power_mode == MMC_POWER_UP)
183 mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
184 else if (power_mode == MMC_POWER_OFF)
185 mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
186 }
187
74b66954
AP
188 if (host->pdata && host->pdata->setpower)
189 host->pdata->setpower(mmc_dev(host->mmc), vdd);
190}
191
d96be879
SH
192static inline int mxcmci_use_dma(struct mxcmci_host *host)
193{
194 return host->do_dma;
195}
196
197static void mxcmci_softreset(struct mxcmci_host *host)
198{
199 int i;
200
4725f6f1
DM
201 dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
202
d96be879
SH
203 /* reset sequence */
204 writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
205 writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
206 host->base + MMC_REG_STR_STP_CLK);
207
208 for (i = 0; i < 8; i++)
209 writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
210
211 writew(0xff, host->base + MMC_REG_RES_TO);
212}
f53fbde4 213static int mxcmci_setup_dma(struct mmc_host *mmc);
d96be879 214
656217d2 215static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
d96be879
SH
216{
217 unsigned int nob = data->blocks;
218 unsigned int blksz = data->blksz;
219 unsigned int datasize = nob * blksz;
d96be879 220 struct scatterlist *sg;
f53fbde4
SH
221 int i, nents;
222
d96be879
SH
223 if (data->flags & MMC_DATA_STREAM)
224 nob = 0xffff;
225
226 host->data = data;
227 data->bytes_xfered = 0;
228
229 writew(nob, host->base + MMC_REG_NOB);
230 writew(blksz, host->base + MMC_REG_BLK_LEN);
231 host->datasize = datasize;
232
f53fbde4
SH
233 if (!mxcmci_use_dma(host))
234 return 0;
235
d96be879
SH
236 for_each_sg(data->sg, sg, data->sg_len, i) {
237 if (sg->offset & 3 || sg->length & 3) {
238 host->do_dma = 0;
656217d2 239 return 0;
d96be879
SH
240 }
241 }
242
f53fbde4 243 if (data->flags & MMC_DATA_READ)
d96be879 244 host->dma_dir = DMA_FROM_DEVICE;
f53fbde4 245 else
d96be879 246 host->dma_dir = DMA_TO_DEVICE;
d96be879 247
f53fbde4
SH
248 nents = dma_map_sg(host->dma->device->dev, data->sg,
249 data->sg_len, host->dma_dir);
250 if (nents != data->sg_len)
251 return -EINVAL;
252
253 host->desc = host->dma->device->device_prep_slave_sg(host->dma,
254 data->sg, data->sg_len, host->dma_dir,
255 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
d96be879 256
f53fbde4
SH
257 if (!host->desc) {
258 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
259 host->dma_dir);
260 host->do_dma = 0;
261 return 0; /* Fall back to PIO */
656217d2 262 }
d96be879
SH
263 wmb();
264
f53fbde4
SH
265 dmaengine_submit(host->desc);
266
656217d2 267 return 0;
d96be879
SH
268}
269
270static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
271 unsigned int cmdat)
272{
16b3bf8c 273 u32 int_cntr = host->default_irq_mask;
f441b993
DM
274 unsigned long flags;
275
d96be879
SH
276 WARN_ON(host->cmd != NULL);
277 host->cmd = cmd;
278
279 switch (mmc_resp_type(cmd)) {
280 case MMC_RSP_R1: /* short CRC, OPCODE */
281 case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
282 cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
283 break;
284 case MMC_RSP_R2: /* long 136 bit + CRC */
285 cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
286 break;
287 case MMC_RSP_R3: /* short */
288 cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
289 break;
290 case MMC_RSP_NONE:
291 break;
292 default:
293 dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
294 mmc_resp_type(cmd));
295 cmd->error = -EINVAL;
296 return -EINVAL;
297 }
298
f441b993
DM
299 int_cntr = INT_END_CMD_RES_EN;
300
d96be879 301 if (mxcmci_use_dma(host))
f441b993
DM
302 int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
303
304 spin_lock_irqsave(&host->lock, flags);
305 if (host->use_sdio)
306 int_cntr |= INT_SDIO_IRQ_EN;
307 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
308 spin_unlock_irqrestore(&host->lock, flags);
d96be879
SH
309
310 writew(cmd->opcode, host->base + MMC_REG_CMD);
311 writel(cmd->arg, host->base + MMC_REG_ARG);
312 writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
313
314 return 0;
315}
316
317static void mxcmci_finish_request(struct mxcmci_host *host,
318 struct mmc_request *req)
319{
16b3bf8c 320 u32 int_cntr = host->default_irq_mask;
f441b993
DM
321 unsigned long flags;
322
323 spin_lock_irqsave(&host->lock, flags);
324 if (host->use_sdio)
325 int_cntr |= INT_SDIO_IRQ_EN;
326 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
327 spin_unlock_irqrestore(&host->lock, flags);
d96be879
SH
328
329 host->req = NULL;
330 host->cmd = NULL;
331 host->data = NULL;
332
333 mmc_request_done(host->mmc, req);
334}
335
336static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
337{
338 struct mmc_data *data = host->data;
339 int data_error;
340
d96be879 341 if (mxcmci_use_dma(host)) {
f53fbde4
SH
342 dmaengine_terminate_all(host->dma);
343 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
d96be879
SH
344 host->dma_dir);
345 }
d96be879
SH
346
347 if (stat & STATUS_ERR_MASK) {
348 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
349 stat);
350 if (stat & STATUS_CRC_READ_ERR) {
4725f6f1 351 dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
d96be879
SH
352 data->error = -EILSEQ;
353 } else if (stat & STATUS_CRC_WRITE_ERR) {
354 u32 err_code = (stat >> 9) & 0x3;
4725f6f1
DM
355 if (err_code == 2) { /* No CRC response */
356 dev_err(mmc_dev(host->mmc),
357 "%s: No CRC -ETIMEDOUT\n", __func__);
d96be879 358 data->error = -ETIMEDOUT;
4725f6f1
DM
359 } else {
360 dev_err(mmc_dev(host->mmc),
361 "%s: -EILSEQ\n", __func__);
d96be879 362 data->error = -EILSEQ;
4725f6f1 363 }
d96be879 364 } else if (stat & STATUS_TIME_OUT_READ) {
4725f6f1
DM
365 dev_err(mmc_dev(host->mmc),
366 "%s: read -ETIMEDOUT\n", __func__);
d96be879
SH
367 data->error = -ETIMEDOUT;
368 } else {
4725f6f1 369 dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
d96be879
SH
370 data->error = -EIO;
371 }
372 } else {
373 data->bytes_xfered = host->datasize;
374 }
375
376 data_error = data->error;
377
378 host->data = NULL;
379
380 return data_error;
381}
382
383static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
384{
385 struct mmc_command *cmd = host->cmd;
386 int i;
387 u32 a, b, c;
388
389 if (!cmd)
390 return;
391
392 if (stat & STATUS_TIME_OUT_RESP) {
393 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
394 cmd->error = -ETIMEDOUT;
395 } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
396 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
397 cmd->error = -EILSEQ;
398 }
399
400 if (cmd->flags & MMC_RSP_PRESENT) {
401 if (cmd->flags & MMC_RSP_136) {
402 for (i = 0; i < 4; i++) {
403 a = readw(host->base + MMC_REG_RES_FIFO);
404 b = readw(host->base + MMC_REG_RES_FIFO);
405 cmd->resp[i] = a << 16 | b;
406 }
407 } else {
408 a = readw(host->base + MMC_REG_RES_FIFO);
409 b = readw(host->base + MMC_REG_RES_FIFO);
410 c = readw(host->base + MMC_REG_RES_FIFO);
411 cmd->resp[0] = a << 24 | b << 8 | c >> 8;
412 }
413 }
414}
415
416static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
417{
418 u32 stat;
419 unsigned long timeout = jiffies + HZ;
420
421 do {
422 stat = readl(host->base + MMC_REG_STATUS);
423 if (stat & STATUS_ERR_MASK)
424 return stat;
18489fa2
MF
425 if (time_after(jiffies, timeout)) {
426 mxcmci_softreset(host);
427 mxcmci_set_clk_rate(host, host->clock);
d96be879 428 return STATUS_TIME_OUT_READ;
18489fa2 429 }
d96be879
SH
430 if (stat & mask)
431 return 0;
432 cpu_relax();
433 } while (1);
434}
435
436static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
437{
438 unsigned int stat;
439 u32 *buf = _buf;
440
441 while (bytes > 3) {
442 stat = mxcmci_poll_status(host,
443 STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
444 if (stat)
445 return stat;
446 *buf++ = readl(host->base + MMC_REG_BUFFER_ACCESS);
447 bytes -= 4;
448 }
449
450 if (bytes) {
451 u8 *b = (u8 *)buf;
452 u32 tmp;
453
454 stat = mxcmci_poll_status(host,
455 STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
456 if (stat)
457 return stat;
458 tmp = readl(host->base + MMC_REG_BUFFER_ACCESS);
459 memcpy(b, &tmp, bytes);
460 }
461
462 return 0;
463}
464
465static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
466{
467 unsigned int stat;
468 u32 *buf = _buf;
469
470 while (bytes > 3) {
471 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
472 if (stat)
473 return stat;
474 writel(*buf++, host->base + MMC_REG_BUFFER_ACCESS);
475 bytes -= 4;
476 }
477
478 if (bytes) {
479 u8 *b = (u8 *)buf;
480 u32 tmp;
481
482 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
483 if (stat)
484 return stat;
485
486 memcpy(&tmp, b, bytes);
487 writel(tmp, host->base + MMC_REG_BUFFER_ACCESS);
488 }
489
490 stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
491 if (stat)
492 return stat;
493
494 return 0;
495}
496
497static int mxcmci_transfer_data(struct mxcmci_host *host)
498{
499 struct mmc_data *data = host->req->data;
500 struct scatterlist *sg;
501 int stat, i;
502
d96be879
SH
503 host->data = data;
504 host->datasize = 0;
505
506 if (data->flags & MMC_DATA_READ) {
507 for_each_sg(data->sg, sg, data->sg_len, i) {
508 stat = mxcmci_pull(host, sg_virt(sg), sg->length);
509 if (stat)
510 return stat;
511 host->datasize += sg->length;
512 }
513 } else {
514 for_each_sg(data->sg, sg, data->sg_len, i) {
515 stat = mxcmci_push(host, sg_virt(sg), sg->length);
516 if (stat)
517 return stat;
518 host->datasize += sg->length;
519 }
520 stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
521 if (stat)
522 return stat;
523 }
524 return 0;
525}
526
527static void mxcmci_datawork(struct work_struct *work)
528{
529 struct mxcmci_host *host = container_of(work, struct mxcmci_host,
530 datawork);
531 int datastat = mxcmci_transfer_data(host);
4a31f2ef
DM
532
533 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
534 host->base + MMC_REG_STATUS);
d96be879
SH
535 mxcmci_finish_data(host, datastat);
536
537 if (host->req->stop) {
538 if (mxcmci_start_cmd(host, host->req->stop, 0)) {
539 mxcmci_finish_request(host, host->req);
540 return;
541 }
542 } else {
543 mxcmci_finish_request(host, host->req);
544 }
545}
546
d96be879
SH
547static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
548{
549 struct mmc_data *data = host->data;
550 int data_error;
551
552 if (!data)
553 return;
554
555 data_error = mxcmci_finish_data(host, stat);
556
557 mxcmci_read_response(host, stat);
558 host->cmd = NULL;
559
560 if (host->req->stop) {
561 if (mxcmci_start_cmd(host, host->req->stop, 0)) {
562 mxcmci_finish_request(host, host->req);
563 return;
564 }
565 } else {
566 mxcmci_finish_request(host, host->req);
567 }
568}
d96be879
SH
569
570static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
571{
572 mxcmci_read_response(host, stat);
573 host->cmd = NULL;
574
575 if (!host->data && host->req) {
576 mxcmci_finish_request(host, host->req);
577 return;
578 }
579
580 /* For the DMA case the DMA engine handles the data transfer
fd589a8f 581 * automatically. For non DMA we have to do it ourselves.
d96be879
SH
582 * Don't do it in interrupt context though.
583 */
584 if (!mxcmci_use_dma(host) && host->data)
585 schedule_work(&host->datawork);
586
587}
588
589static irqreturn_t mxcmci_irq(int irq, void *devid)
590{
591 struct mxcmci_host *host = devid;
f441b993
DM
592 unsigned long flags;
593 bool sdio_irq;
d96be879
SH
594 u32 stat;
595
596 stat = readl(host->base + MMC_REG_STATUS);
4a31f2ef
DM
597 writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
598 STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
d96be879
SH
599
600 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
601
f441b993
DM
602 spin_lock_irqsave(&host->lock, flags);
603 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
604 spin_unlock_irqrestore(&host->lock, flags);
605
4a31f2ef
DM
606 if (mxcmci_use_dma(host) &&
607 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
608 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
609 host->base + MMC_REG_STATUS);
4a31f2ef 610
f441b993
DM
611 if (sdio_irq) {
612 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
613 mmc_signal_sdio_irq(host->mmc);
614 }
615
d96be879
SH
616 if (stat & STATUS_END_CMD_RESP)
617 mxcmci_cmd_done(host, stat);
f441b993 618
d96be879
SH
619 if (mxcmci_use_dma(host) &&
620 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
621 mxcmci_data_done(host, stat);
f53fbde4 622
16b3bf8c
EB
623 if (host->default_irq_mask &&
624 (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
625 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
f53fbde4 626
d96be879
SH
627 return IRQ_HANDLED;
628}
629
630static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
631{
632 struct mxcmci_host *host = mmc_priv(mmc);
633 unsigned int cmdat = host->cmdat;
656217d2 634 int error;
d96be879
SH
635
636 WARN_ON(host->req != NULL);
637
638 host->req = req;
639 host->cmdat &= ~CMD_DAT_CONT_INIT;
f53fbde4
SH
640
641 if (host->dma)
642 host->do_dma = 1;
643
d96be879 644 if (req->data) {
656217d2
MF
645 error = mxcmci_setup_data(host, req->data);
646 if (error) {
647 req->cmd->error = error;
648 goto out;
649 }
650
d96be879
SH
651
652 cmdat |= CMD_DAT_CONT_DATA_ENABLE;
653
654 if (req->data->flags & MMC_DATA_WRITE)
655 cmdat |= CMD_DAT_CONT_WRITE;
656 }
657
656217d2 658 error = mxcmci_start_cmd(host, req->cmd, cmdat);
f53fbde4 659
656217d2
MF
660out:
661 if (error)
d96be879
SH
662 mxcmci_finish_request(host, req);
663}
664
665static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
666{
667 unsigned int divider;
668 int prescaler = 0;
669 unsigned int clk_in = clk_get_rate(host->clk);
670
671 while (prescaler <= 0x800) {
672 for (divider = 1; divider <= 0xF; divider++) {
673 int x;
674
675 x = (clk_in / (divider + 1));
676
677 if (prescaler)
678 x /= (prescaler * 2);
679
680 if (x <= clk_ios)
681 break;
682 }
683 if (divider < 0x10)
684 break;
685
686 if (prescaler == 0)
687 prescaler = 1;
688 else
689 prescaler <<= 1;
690 }
691
692 writew((prescaler << 4) | divider, host->base + MMC_REG_CLK_RATE);
693
694 dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
695 prescaler, divider, clk_in, clk_ios);
696}
697
f53fbde4
SH
698static int mxcmci_setup_dma(struct mmc_host *mmc)
699{
700 struct mxcmci_host *host = mmc_priv(mmc);
701 struct dma_slave_config *config = &host->dma_slave_config;
702
703 config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
704 config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
705 config->dst_addr_width = 4;
706 config->src_addr_width = 4;
707 config->dst_maxburst = host->burstlen;
708 config->src_maxburst = host->burstlen;
709
710 return dmaengine_slave_config(host->dma, config);
711}
712
d96be879
SH
713static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
714{
715 struct mxcmci_host *host = mmc_priv(mmc);
f53fbde4
SH
716 int burstlen, ret;
717
d96be879 718 /*
6584cb88
SH
719 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0)
720 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
d96be879
SH
721 */
722 if (ios->bus_width == MMC_BUS_WIDTH_4)
f53fbde4 723 burstlen = 16;
6584cb88
SH
724 else
725 burstlen = 4;
f53fbde4
SH
726
727 if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
728 host->burstlen = burstlen;
729 ret = mxcmci_setup_dma(mmc);
730 if (ret) {
731 dev_err(mmc_dev(host->mmc),
732 "failed to config DMA channel. Falling back to PIO\n");
733 dma_release_channel(host->dma);
734 host->do_dma = 0;
e58f516f 735 host->dma = NULL;
f53fbde4
SH
736 }
737 }
d96be879 738
d96be879
SH
739 if (ios->bus_width == MMC_BUS_WIDTH_4)
740 host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
741 else
742 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
743
744 if (host->power_mode != ios->power_mode) {
d078d242 745 mxcmci_set_power(host, ios->power_mode, ios->vdd);
d96be879 746 host->power_mode = ios->power_mode;
74b66954 747
d96be879
SH
748 if (ios->power_mode == MMC_POWER_ON)
749 host->cmdat |= CMD_DAT_CONT_INIT;
750 }
751
752 if (ios->clock) {
753 mxcmci_set_clk_rate(host, ios->clock);
754 writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
755 } else {
756 writew(STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
757 }
758
759 host->clock = ios->clock;
760}
761
762static irqreturn_t mxcmci_detect_irq(int irq, void *data)
763{
764 struct mmc_host *mmc = data;
765
766 dev_dbg(mmc_dev(mmc), "%s\n", __func__);
767
768 mmc_detect_change(mmc, msecs_to_jiffies(250));
769 return IRQ_HANDLED;
770}
771
772static int mxcmci_get_ro(struct mmc_host *mmc)
773{
774 struct mxcmci_host *host = mmc_priv(mmc);
775
776 if (host->pdata && host->pdata->get_ro)
777 return !!host->pdata->get_ro(mmc_dev(mmc));
778 /*
779 * Board doesn't support read only detection; let the mmc core
780 * decide what to do.
781 */
782 return -ENOSYS;
783}
784
f441b993
DM
785static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
786{
787 struct mxcmci_host *host = mmc_priv(mmc);
788 unsigned long flags;
789 u32 int_cntr;
790
791 spin_lock_irqsave(&host->lock, flags);
792 host->use_sdio = enable;
793 int_cntr = readl(host->base + MMC_REG_INT_CNTR);
794
795 if (enable)
796 int_cntr |= INT_SDIO_IRQ_EN;
797 else
798 int_cntr &= ~INT_SDIO_IRQ_EN;
799
800 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
801 spin_unlock_irqrestore(&host->lock, flags);
802}
d96be879 803
3fcb027d
DM
804static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
805{
806 /*
807 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
808 * multi-block transfers when connected SDIO peripheral doesn't
809 * drive the BUSY line as required by the specs.
810 * One way to prevent this is to only allow 1-bit transfers.
811 */
812
813 if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
814 host->caps &= ~MMC_CAP_4_BIT_DATA;
815 else
816 host->caps |= MMC_CAP_4_BIT_DATA;
817}
818
f53fbde4
SH
819static bool filter(struct dma_chan *chan, void *param)
820{
821 struct mxcmci_host *host = param;
822
823 if (!imx_dma_is_general_purpose(chan))
824 return false;
825
826 chan->private = &host->dma_data;
827
828 return true;
829}
830
d96be879 831static const struct mmc_host_ops mxcmci_ops = {
f441b993
DM
832 .request = mxcmci_request,
833 .set_ios = mxcmci_set_ios,
834 .get_ro = mxcmci_get_ro,
835 .enable_sdio_irq = mxcmci_enable_sdio_irq,
3fcb027d 836 .init_card = mxcmci_init_card,
d96be879
SH
837};
838
839static int mxcmci_probe(struct platform_device *pdev)
840{
841 struct mmc_host *mmc;
842 struct mxcmci_host *host = NULL;
c0521baf 843 struct resource *iores, *r;
d96be879 844 int ret = 0, irq;
f53fbde4 845 dma_cap_mask_t mask;
d96be879 846
a3c76eb9 847 pr_info("i.MX SDHC driver\n");
d96be879 848
c0521baf 849 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
d96be879 850 irq = platform_get_irq(pdev, 0);
c0521baf 851 if (!iores || irq < 0)
d96be879
SH
852 return -EINVAL;
853
c0521baf 854 r = request_mem_region(iores->start, resource_size(iores), pdev->name);
d96be879
SH
855 if (!r)
856 return -EBUSY;
857
858 mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev);
859 if (!mmc) {
860 ret = -ENOMEM;
861 goto out_release_mem;
862 }
863
864 mmc->ops = &mxcmci_ops;
f441b993 865 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
d96be879
SH
866
867 /* MMC core transfer sizes tunable parameters */
a36274e0 868 mmc->max_segs = 64;
d96be879
SH
869 mmc->max_blk_size = 2048;
870 mmc->max_blk_count = 65535;
871 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
d759c374 872 mmc->max_seg_size = mmc->max_req_size;
d96be879
SH
873
874 host = mmc_priv(mmc);
875 host->base = ioremap(r->start, resource_size(r));
876 if (!host->base) {
877 ret = -ENOMEM;
878 goto out_free;
879 }
880
881 host->mmc = mmc;
882 host->pdata = pdev->dev.platform_data;
f441b993 883 spin_lock_init(&host->lock);
d96be879 884
74b66954 885 mxcmci_init_ocr(host);
d96be879 886
16b3bf8c
EB
887 if (host->pdata && host->pdata->dat3_card_detect)
888 host->default_irq_mask =
889 INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
890 else
891 host->default_irq_mask = 0;
892
d96be879
SH
893 host->res = r;
894 host->irq = irq;
895
06277b5c 896 host->clk = clk_get(&pdev->dev, NULL);
d96be879
SH
897 if (IS_ERR(host->clk)) {
898 ret = PTR_ERR(host->clk);
899 goto out_iounmap;
900 }
901 clk_enable(host->clk);
902
903 mxcmci_softreset(host);
904
905 host->rev_no = readw(host->base + MMC_REG_REV_NO);
906 if (host->rev_no != 0x400) {
907 ret = -ENODEV;
908 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
909 host->rev_no);
910 goto out_clk_put;
911 }
912
c499b067 913 mmc->f_min = clk_get_rate(host->clk) >> 16;
d96be879
SH
914 mmc->f_max = clk_get_rate(host->clk) >> 1;
915
916 /* recommended in data sheet */
917 writew(0x2db4, host->base + MMC_REG_READ_TO);
918
16b3bf8c 919 writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
d96be879 920
d96be879 921 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
f53fbde4
SH
922 if (r) {
923 host->dmareq = r->start;
924 host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
925 host->dma_data.priority = DMA_PRIO_LOW;
926 host->dma_data.dma_request = host->dmareq;
927 dma_cap_zero(mask);
928 dma_cap_set(DMA_SLAVE, mask);
929 host->dma = dma_request_channel(mask, filter, host);
930 if (host->dma)
931 mmc->max_seg_size = dma_get_max_seg_size(
932 host->dma->device->dev);
933 }
934
935 if (!host->dma)
936 dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
d96be879 937
d96be879
SH
938 INIT_WORK(&host->datawork, mxcmci_datawork);
939
940 ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
941 if (ret)
942 goto out_free_dma;
943
944 platform_set_drvdata(pdev, mmc);
945
946 if (host->pdata && host->pdata->init) {
947 ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
948 host->mmc);
949 if (ret)
950 goto out_free_irq;
951 }
952
953 mmc_add_host(mmc);
954
955 return 0;
956
957out_free_irq:
958 free_irq(host->irq, host);
959out_free_dma:
f53fbde4
SH
960 if (host->dma)
961 dma_release_channel(host->dma);
d96be879
SH
962out_clk_put:
963 clk_disable(host->clk);
964 clk_put(host->clk);
965out_iounmap:
966 iounmap(host->base);
967out_free:
968 mmc_free_host(mmc);
969out_release_mem:
c0521baf 970 release_mem_region(iores->start, resource_size(iores));
d96be879
SH
971 return ret;
972}
973
974static int mxcmci_remove(struct platform_device *pdev)
975{
976 struct mmc_host *mmc = platform_get_drvdata(pdev);
977 struct mxcmci_host *host = mmc_priv(mmc);
978
979 platform_set_drvdata(pdev, NULL);
980
981 mmc_remove_host(mmc);
982
74b66954
AP
983 if (host->vcc)
984 regulator_put(host->vcc);
985
d96be879
SH
986 if (host->pdata && host->pdata->exit)
987 host->pdata->exit(&pdev->dev, mmc);
988
989 free_irq(host->irq, host);
990 iounmap(host->base);
f53fbde4
SH
991
992 if (host->dma)
993 dma_release_channel(host->dma);
994
d96be879
SH
995 clk_disable(host->clk);
996 clk_put(host->clk);
997
998 release_mem_region(host->res->start, resource_size(host->res));
d96be879
SH
999
1000 mmc_free_host(mmc);
1001
1002 return 0;
1003}
1004
1005#ifdef CONFIG_PM
a7d403cf 1006static int mxcmci_suspend(struct device *dev)
d96be879 1007{
a7d403cf
EB
1008 struct mmc_host *mmc = dev_get_drvdata(dev);
1009 struct mxcmci_host *host = mmc_priv(mmc);
d96be879
SH
1010 int ret = 0;
1011
1012 if (mmc)
1a13f8fa 1013 ret = mmc_suspend_host(mmc);
a7d403cf 1014 clk_disable(host->clk);
d96be879
SH
1015
1016 return ret;
1017}
1018
a7d403cf 1019static int mxcmci_resume(struct device *dev)
d96be879 1020{
a7d403cf
EB
1021 struct mmc_host *mmc = dev_get_drvdata(dev);
1022 struct mxcmci_host *host = mmc_priv(mmc);
d96be879
SH
1023 int ret = 0;
1024
a7d403cf
EB
1025 clk_enable(host->clk);
1026 if (mmc)
d96be879 1027 ret = mmc_resume_host(mmc);
d96be879
SH
1028
1029 return ret;
1030}
a7d403cf
EB
1031
1032static const struct dev_pm_ops mxcmci_pm_ops = {
1033 .suspend = mxcmci_suspend,
1034 .resume = mxcmci_resume,
1035};
1036#endif
d96be879
SH
1037
1038static struct platform_driver mxcmci_driver = {
1039 .probe = mxcmci_probe,
1040 .remove = mxcmci_remove,
d96be879
SH
1041 .driver = {
1042 .name = DRIVER_NAME,
1043 .owner = THIS_MODULE,
a7d403cf
EB
1044#ifdef CONFIG_PM
1045 .pm = &mxcmci_pm_ops,
1046#endif
d96be879
SH
1047 }
1048};
1049
1050static int __init mxcmci_init(void)
1051{
1052 return platform_driver_register(&mxcmci_driver);
1053}
1054
1055static void __exit mxcmci_exit(void)
1056{
1057 platform_driver_unregister(&mxcmci_driver);
1058}
1059
1060module_init(mxcmci_init);
1061module_exit(mxcmci_exit);
1062
1063MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1064MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1065MODULE_LICENSE("GPL");
1066MODULE_ALIAS("platform:imx-mmc");