mmc: core: add missing pm event in mmc_pm_notify to fix hib restore
[linux-2.6-block.git] / drivers / mmc / host / sh_mmcif.c
CommitLineData
fdc50a94
YG
1/*
2 * MMCIF eMMC driver.
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
10 *
11 *
12 * TODO
13 * 1. DMA
14 * 2. Power management
15 * 3. Handle MMC errors better
16 *
17 */
18
f985da17
GL
19/*
20 * The MMCIF driver is now processing MMC requests asynchronously, according
21 * to the Linux MMC API requirement.
22 *
23 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
24 * data, and optional stop. To achieve asynchronous processing each of these
25 * stages is split into two halves: a top and a bottom half. The top half
26 * initialises the hardware, installs a timeout handler to handle completion
27 * timeouts, and returns. In case of the command stage this immediately returns
28 * control to the caller, leaving all further processing to run asynchronously.
29 * All further request processing is performed by the bottom halves.
30 *
31 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
32 * thread, a DMA completion callback, if DMA is used, a timeout work, and
33 * request- and stage-specific handler methods.
34 *
35 * Each bottom half run begins with either a hardware interrupt, a DMA callback
36 * invocation, or a timeout work run. In case of an error or a successful
37 * processing completion, the MMC core is informed and the request processing is
38 * finished. In case processing has to continue, i.e., if data has to be read
39 * from or written to the card, or if a stop command has to be sent, the next
40 * top half is called, which performs the necessary hardware handling and
41 * reschedules the timeout work. This returns the driver state machine into the
42 * bottom half waiting state.
43 */
44
86df1745 45#include <linux/bitops.h>
aa0787a9
GL
46#include <linux/clk.h>
47#include <linux/completion.h>
e47bf32a 48#include <linux/delay.h>
fdc50a94 49#include <linux/dma-mapping.h>
a782d688 50#include <linux/dmaengine.h>
fdc50a94
YG
51#include <linux/mmc/card.h>
52#include <linux/mmc/core.h>
e47bf32a 53#include <linux/mmc/host.h>
fdc50a94
YG
54#include <linux/mmc/mmc.h>
55#include <linux/mmc/sdio.h>
fdc50a94 56#include <linux/mmc/sh_mmcif.h>
e480606a 57#include <linux/mmc/slot-gpio.h>
bf68a812 58#include <linux/mod_devicetable.h>
8047310e 59#include <linux/mutex.h>
a782d688 60#include <linux/pagemap.h>
e47bf32a 61#include <linux/platform_device.h>
efe6a8ad 62#include <linux/pm_qos.h>
faca6648 63#include <linux/pm_runtime.h>
d00cadac 64#include <linux/sh_dma.h>
3b0beafc 65#include <linux/spinlock.h>
88b47679 66#include <linux/module.h>
fdc50a94
YG
67
68#define DRIVER_NAME "sh_mmcif"
69#define DRIVER_VERSION "2010-04-28"
70
fdc50a94
YG
71/* CE_CMD_SET */
72#define CMD_MASK 0x3f000000
73#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
74#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
75#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
76#define CMD_SET_RBSY (1 << 21) /* R1b */
77#define CMD_SET_CCSEN (1 << 20)
78#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
79#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
80#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
81#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
82#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
83#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
84#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
85#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
86#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
87#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
88#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
89#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
90#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
91#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
92#define CMD_SET_CCSH (1 << 5)
555061f9 93#define CMD_SET_DARS (1 << 2) /* Dual Data Rate */
fdc50a94
YG
94#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
95#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
96#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
97
98/* CE_CMD_CTRL */
99#define CMD_CTRL_BREAK (1 << 0)
100
101/* CE_BLOCK_SET */
102#define BLOCK_SIZE_MASK 0x0000ffff
103
fdc50a94
YG
104/* CE_INT */
105#define INT_CCSDE (1 << 29)
106#define INT_CMD12DRE (1 << 26)
107#define INT_CMD12RBE (1 << 25)
108#define INT_CMD12CRE (1 << 24)
109#define INT_DTRANE (1 << 23)
110#define INT_BUFRE (1 << 22)
111#define INT_BUFWEN (1 << 21)
112#define INT_BUFREN (1 << 20)
113#define INT_CCSRCV (1 << 19)
114#define INT_RBSYE (1 << 17)
115#define INT_CRSPE (1 << 16)
116#define INT_CMDVIO (1 << 15)
117#define INT_BUFVIO (1 << 14)
118#define INT_WDATERR (1 << 11)
119#define INT_RDATERR (1 << 10)
120#define INT_RIDXERR (1 << 9)
121#define INT_RSPERR (1 << 8)
122#define INT_CCSTO (1 << 5)
123#define INT_CRCSTO (1 << 4)
124#define INT_WDATTO (1 << 3)
125#define INT_RDATTO (1 << 2)
126#define INT_RBSYTO (1 << 1)
127#define INT_RSPTO (1 << 0)
128#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
129 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
130 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
131 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
132
8af50750
GL
133#define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
134 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
135 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
136
967bcb77
GL
137#define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
138
fdc50a94
YG
139/* CE_INT_MASK */
140#define MASK_ALL 0x00000000
141#define MASK_MCCSDE (1 << 29)
142#define MASK_MCMD12DRE (1 << 26)
143#define MASK_MCMD12RBE (1 << 25)
144#define MASK_MCMD12CRE (1 << 24)
145#define MASK_MDTRANE (1 << 23)
146#define MASK_MBUFRE (1 << 22)
147#define MASK_MBUFWEN (1 << 21)
148#define MASK_MBUFREN (1 << 20)
149#define MASK_MCCSRCV (1 << 19)
150#define MASK_MRBSYE (1 << 17)
151#define MASK_MCRSPE (1 << 16)
152#define MASK_MCMDVIO (1 << 15)
153#define MASK_MBUFVIO (1 << 14)
154#define MASK_MWDATERR (1 << 11)
155#define MASK_MRDATERR (1 << 10)
156#define MASK_MRIDXERR (1 << 9)
157#define MASK_MRSPERR (1 << 8)
158#define MASK_MCCSTO (1 << 5)
159#define MASK_MCRCSTO (1 << 4)
160#define MASK_MWDATTO (1 << 3)
161#define MASK_MRDATTO (1 << 2)
162#define MASK_MRBSYTO (1 << 1)
163#define MASK_MRSPTO (1 << 0)
164
ee4b8887
GL
165#define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
166 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
967bcb77 167 MASK_MCRCSTO | MASK_MWDATTO | \
ee4b8887
GL
168 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
169
8af50750
GL
170#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
171 MASK_MBUFREN | MASK_MBUFWEN | \
172 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
173 MASK_MCMD12RBE | MASK_MCMD12CRE)
174
fdc50a94
YG
175/* CE_HOST_STS1 */
176#define STS1_CMDSEQ (1 << 31)
177
178/* CE_HOST_STS2 */
179#define STS2_CRCSTE (1 << 31)
180#define STS2_CRC16E (1 << 30)
181#define STS2_AC12CRCE (1 << 29)
182#define STS2_RSPCRC7E (1 << 28)
183#define STS2_CRCSTEBE (1 << 27)
184#define STS2_RDATEBE (1 << 26)
185#define STS2_AC12REBE (1 << 25)
186#define STS2_RSPEBE (1 << 24)
187#define STS2_AC12IDXE (1 << 23)
188#define STS2_RSPIDXE (1 << 22)
189#define STS2_CCSTO (1 << 15)
190#define STS2_RDATTO (1 << 14)
191#define STS2_DATBSYTO (1 << 13)
192#define STS2_CRCSTTO (1 << 12)
193#define STS2_AC12BSYTO (1 << 11)
194#define STS2_RSPBSYTO (1 << 10)
195#define STS2_AC12RSPTO (1 << 9)
196#define STS2_RSPTO (1 << 8)
197#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
198 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
199#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
200 STS2_DATBSYTO | STS2_CRCSTTO | \
201 STS2_AC12BSYTO | STS2_RSPBSYTO | \
202 STS2_AC12RSPTO | STS2_RSPTO)
203
fdc50a94
YG
204#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
205#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
206#define CLKDEV_INIT 400000 /* 400 KHz */
207
3b0beafc
GL
208enum mmcif_state {
209 STATE_IDLE,
210 STATE_REQUEST,
211 STATE_IOS,
8047310e 212 STATE_TIMEOUT,
3b0beafc
GL
213};
214
f985da17
GL
215enum mmcif_wait_for {
216 MMCIF_WAIT_FOR_REQUEST,
217 MMCIF_WAIT_FOR_CMD,
218 MMCIF_WAIT_FOR_MREAD,
219 MMCIF_WAIT_FOR_MWRITE,
220 MMCIF_WAIT_FOR_READ,
221 MMCIF_WAIT_FOR_WRITE,
222 MMCIF_WAIT_FOR_READ_END,
223 MMCIF_WAIT_FOR_WRITE_END,
224 MMCIF_WAIT_FOR_STOP,
225};
226
fdc50a94
YG
227struct sh_mmcif_host {
228 struct mmc_host *mmc;
f985da17 229 struct mmc_request *mrq;
fdc50a94
YG
230 struct platform_device *pd;
231 struct clk *hclk;
232 unsigned int clk;
233 int bus_width;
555061f9 234 unsigned char timing;
aa0787a9 235 bool sd_error;
f985da17 236 bool dying;
fdc50a94
YG
237 long timeout;
238 void __iomem *addr;
f985da17 239 u32 *pio_ptr;
ee4b8887 240 spinlock_t lock; /* protect sh_mmcif_host::state */
3b0beafc 241 enum mmcif_state state;
f985da17
GL
242 enum mmcif_wait_for wait_for;
243 struct delayed_work timeout_work;
244 size_t blocksize;
245 int sg_idx;
246 int sg_blkidx;
faca6648 247 bool power;
c9b0cef2 248 bool card_present;
967bcb77 249 bool ccs_enable; /* Command Completion Signal support */
6d6fd367 250 bool clk_ctrl2_enable;
8047310e 251 struct mutex thread_lock;
fdc50a94 252
a782d688
GL
253 /* DMA support */
254 struct dma_chan *chan_rx;
255 struct dma_chan *chan_tx;
256 struct completion dma_complete;
f38f94c6 257 bool dma_active;
a782d688 258};
fdc50a94
YG
259
260static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
261 unsigned int reg, u32 val)
262{
487d9fc5 263 writel(val | readl(host->addr + reg), host->addr + reg);
fdc50a94
YG
264}
265
266static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
267 unsigned int reg, u32 val)
268{
487d9fc5 269 writel(~val & readl(host->addr + reg), host->addr + reg);
fdc50a94
YG
270}
271
a782d688
GL
272static void mmcif_dma_complete(void *arg)
273{
274 struct sh_mmcif_host *host = arg;
8047310e 275 struct mmc_request *mrq = host->mrq;
69983404 276
a782d688
GL
277 dev_dbg(&host->pd->dev, "Command completed\n");
278
8047310e 279 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
a782d688
GL
280 dev_name(&host->pd->dev)))
281 return;
282
a782d688
GL
283 complete(&host->dma_complete);
284}
285
286static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
287{
69983404
GL
288 struct mmc_data *data = host->mrq->data;
289 struct scatterlist *sg = data->sg;
a782d688
GL
290 struct dma_async_tx_descriptor *desc = NULL;
291 struct dma_chan *chan = host->chan_rx;
292 dma_cookie_t cookie = -EINVAL;
293 int ret;
294
69983404 295 ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
1ed828db 296 DMA_FROM_DEVICE);
a782d688 297 if (ret > 0) {
f38f94c6 298 host->dma_active = true;
16052827 299 desc = dmaengine_prep_slave_sg(chan, sg, ret,
05f5799c 300 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
a782d688
GL
301 }
302
303 if (desc) {
304 desc->callback = mmcif_dma_complete;
305 desc->callback_param = host;
a5ece7d2
LW
306 cookie = dmaengine_submit(desc);
307 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
308 dma_async_issue_pending(chan);
a782d688
GL
309 }
310 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
69983404 311 __func__, data->sg_len, ret, cookie);
a782d688
GL
312
313 if (!desc) {
314 /* DMA failed, fall back to PIO */
315 if (ret >= 0)
316 ret = -EIO;
317 host->chan_rx = NULL;
f38f94c6 318 host->dma_active = false;
a782d688
GL
319 dma_release_channel(chan);
320 /* Free the Tx channel too */
321 chan = host->chan_tx;
322 if (chan) {
323 host->chan_tx = NULL;
324 dma_release_channel(chan);
325 }
326 dev_warn(&host->pd->dev,
327 "DMA failed: %d, falling back to PIO\n", ret);
328 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
329 }
330
331 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
69983404 332 desc, cookie, data->sg_len);
a782d688
GL
333}
334
335static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
336{
69983404
GL
337 struct mmc_data *data = host->mrq->data;
338 struct scatterlist *sg = data->sg;
a782d688
GL
339 struct dma_async_tx_descriptor *desc = NULL;
340 struct dma_chan *chan = host->chan_tx;
341 dma_cookie_t cookie = -EINVAL;
342 int ret;
343
69983404 344 ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
1ed828db 345 DMA_TO_DEVICE);
a782d688 346 if (ret > 0) {
f38f94c6 347 host->dma_active = true;
16052827 348 desc = dmaengine_prep_slave_sg(chan, sg, ret,
05f5799c 349 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
a782d688
GL
350 }
351
352 if (desc) {
353 desc->callback = mmcif_dma_complete;
354 desc->callback_param = host;
a5ece7d2
LW
355 cookie = dmaengine_submit(desc);
356 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
357 dma_async_issue_pending(chan);
a782d688
GL
358 }
359 dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
69983404 360 __func__, data->sg_len, ret, cookie);
a782d688
GL
361
362 if (!desc) {
363 /* DMA failed, fall back to PIO */
364 if (ret >= 0)
365 ret = -EIO;
366 host->chan_tx = NULL;
f38f94c6 367 host->dma_active = false;
a782d688
GL
368 dma_release_channel(chan);
369 /* Free the Rx channel too */
370 chan = host->chan_rx;
371 if (chan) {
372 host->chan_rx = NULL;
373 dma_release_channel(chan);
374 }
375 dev_warn(&host->pd->dev,
376 "DMA failed: %d, falling back to PIO\n", ret);
377 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
378 }
379
380 dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
381 desc, cookie);
382}
383
e5a233cb
LP
384static struct dma_chan *
385sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
386 struct sh_mmcif_plat_data *pdata,
387 enum dma_transfer_direction direction)
a782d688 388{
d25006e7 389 struct dma_slave_config cfg = { 0, };
e5a233cb 390 struct dma_chan *chan;
5f48dd06 391 void *slave_data = NULL;
e5a233cb 392 struct resource *res;
0e79f9ae
GL
393 dma_cap_mask_t mask;
394 int ret;
a782d688 395
e5a233cb
LP
396 dma_cap_zero(mask);
397 dma_cap_set(DMA_SLAVE, mask);
398
399 if (pdata)
5f48dd06
KM
400 slave_data = direction == DMA_MEM_TO_DEV ?
401 (void *)pdata->slave_id_tx :
402 (void *)pdata->slave_id_rx;
e5a233cb
LP
403
404 chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
5f48dd06 405 slave_data, &host->pd->dev,
e5a233cb
LP
406 direction == DMA_MEM_TO_DEV ? "tx" : "rx");
407
408 dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
409 direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan);
410
411 if (!chan)
412 return NULL;
413
414 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
415
e5a233cb 416 cfg.direction = direction;
d25006e7 417
e36152aa 418 if (direction == DMA_DEV_TO_MEM) {
d25006e7 419 cfg.src_addr = res->start + MMCIF_CE_DATA;
e36152aa
LP
420 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
421 } else {
d25006e7 422 cfg.dst_addr = res->start + MMCIF_CE_DATA;
e36152aa
LP
423 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
424 }
d25006e7 425
e5a233cb
LP
426 ret = dmaengine_slave_config(chan, &cfg);
427 if (ret < 0) {
428 dma_release_channel(chan);
429 return NULL;
430 }
431
432 return chan;
433}
434
435static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
436 struct sh_mmcif_plat_data *pdata)
437{
f38f94c6 438 host->dma_active = false;
a782d688 439
acd6d772
GL
440 if (pdata) {
441 if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
442 return;
443 } else if (!host->pd->dev.of_node) {
0e79f9ae 444 return;
acd6d772 445 }
a782d688 446
0e79f9ae 447 /* We can only either use DMA for both Tx and Rx or not use it at all */
e5a233cb 448 host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV);
0e79f9ae
GL
449 if (!host->chan_tx)
450 return;
a782d688 451
e5a233cb
LP
452 host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM);
453 if (!host->chan_rx) {
454 dma_release_channel(host->chan_tx);
455 host->chan_tx = NULL;
456 }
a782d688
GL
457}
458
459static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
460{
461 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
462 /* Descriptors are freed automatically */
463 if (host->chan_tx) {
464 struct dma_chan *chan = host->chan_tx;
465 host->chan_tx = NULL;
466 dma_release_channel(chan);
467 }
468 if (host->chan_rx) {
469 struct dma_chan *chan = host->chan_rx;
470 host->chan_rx = NULL;
471 dma_release_channel(chan);
472 }
473
f38f94c6 474 host->dma_active = false;
a782d688 475}
fdc50a94
YG
476
477static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
478{
479 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
bf68a812 480 bool sup_pclk = p ? p->sup_pclk : false;
fdc50a94
YG
481
482 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
483 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
484
485 if (!clk)
486 return;
bf68a812 487 if (sup_pclk && clk == host->clk)
fdc50a94
YG
488 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
489 else
490 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
f9388257
SH
491 ((fls(DIV_ROUND_UP(host->clk,
492 clk) - 1) - 1) << 16));
fdc50a94
YG
493
494 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
495}
496
497static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
498{
499 u32 tmp;
500
487d9fc5 501 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
fdc50a94 502
487d9fc5
MD
503 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
504 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
967bcb77
GL
505 if (host->ccs_enable)
506 tmp |= SCCSTO_29;
6d6fd367
GL
507 if (host->clk_ctrl2_enable)
508 sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
fdc50a94 509 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
967bcb77 510 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
fdc50a94
YG
511 /* byte swap on */
512 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
513}
514
515static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
516{
517 u32 state1, state2;
ee4b8887 518 int ret, timeout;
fdc50a94 519
aa0787a9 520 host->sd_error = false;
fdc50a94 521
487d9fc5
MD
522 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
523 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
e47bf32a
GL
524 dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
525 dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
fdc50a94
YG
526
527 if (state1 & STS1_CMDSEQ) {
528 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
529 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
ee4b8887 530 for (timeout = 10000000; timeout; timeout--) {
487d9fc5 531 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
ee4b8887 532 & STS1_CMDSEQ))
fdc50a94
YG
533 break;
534 mdelay(1);
535 }
ee4b8887
GL
536 if (!timeout) {
537 dev_err(&host->pd->dev,
538 "Forced end of command sequence timeout err\n");
539 return -EIO;
540 }
fdc50a94 541 sh_mmcif_sync_reset(host);
e47bf32a 542 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
fdc50a94
YG
543 return -EIO;
544 }
545
546 if (state2 & STS2_CRC_ERR) {
e475b270
TK
547 dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n",
548 host->state, host->wait_for);
fdc50a94
YG
549 ret = -EIO;
550 } else if (state2 & STS2_TIMEOUT_ERR) {
e475b270
TK
551 dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n",
552 host->state, host->wait_for);
fdc50a94
YG
553 ret = -ETIMEDOUT;
554 } else {
e475b270
TK
555 dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n",
556 host->state, host->wait_for);
fdc50a94
YG
557 ret = -EIO;
558 }
559 return ret;
560}
561
f985da17 562static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
fdc50a94 563{
f985da17
GL
564 struct mmc_data *data = host->mrq->data;
565
566 host->sg_blkidx += host->blocksize;
567
568 /* data->sg->length must be a multiple of host->blocksize? */
569 BUG_ON(host->sg_blkidx > data->sg->length);
570
571 if (host->sg_blkidx == data->sg->length) {
572 host->sg_blkidx = 0;
573 if (++host->sg_idx < data->sg_len)
574 host->pio_ptr = sg_virt(++data->sg);
575 } else {
576 host->pio_ptr = p;
577 }
578
99eb9d8d 579 return host->sg_idx != data->sg_len;
f985da17
GL
580}
581
582static void sh_mmcif_single_read(struct sh_mmcif_host *host,
583 struct mmc_request *mrq)
584{
585 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
586 BLOCK_SIZE_MASK) + 3;
587
588 host->wait_for = MMCIF_WAIT_FOR_READ;
fdc50a94 589
fdc50a94
YG
590 /* buf read enable */
591 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
f985da17
GL
592}
593
594static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
595{
596 struct mmc_data *data = host->mrq->data;
597 u32 *p = sg_virt(data->sg);
598 int i;
599
600 if (host->sd_error) {
601 data->error = sh_mmcif_error_manage(host);
e475b270 602 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
f985da17
GL
603 return false;
604 }
605
606 for (i = 0; i < host->blocksize / 4; i++)
487d9fc5 607 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
fdc50a94
YG
608
609 /* buffer read end */
610 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
f985da17 611 host->wait_for = MMCIF_WAIT_FOR_READ_END;
fdc50a94 612
f985da17 613 return true;
fdc50a94
YG
614}
615
f985da17
GL
616static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
617 struct mmc_request *mrq)
fdc50a94
YG
618{
619 struct mmc_data *data = mrq->data;
f985da17
GL
620
621 if (!data->sg_len || !data->sg->length)
622 return;
623
624 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
625 BLOCK_SIZE_MASK;
626
627 host->wait_for = MMCIF_WAIT_FOR_MREAD;
628 host->sg_idx = 0;
629 host->sg_blkidx = 0;
630 host->pio_ptr = sg_virt(data->sg);
5df460b1 631
f985da17
GL
632 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
633}
634
635static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
636{
637 struct mmc_data *data = host->mrq->data;
638 u32 *p = host->pio_ptr;
639 int i;
640
641 if (host->sd_error) {
642 data->error = sh_mmcif_error_manage(host);
e475b270 643 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
f985da17 644 return false;
fdc50a94 645 }
f985da17
GL
646
647 BUG_ON(!data->sg->length);
648
649 for (i = 0; i < host->blocksize / 4; i++)
650 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
651
652 if (!sh_mmcif_next_block(host, p))
653 return false;
654
f985da17
GL
655 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
656
657 return true;
fdc50a94
YG
658}
659
f985da17 660static void sh_mmcif_single_write(struct sh_mmcif_host *host,
fdc50a94
YG
661 struct mmc_request *mrq)
662{
f985da17
GL
663 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
664 BLOCK_SIZE_MASK) + 3;
fdc50a94 665
f985da17 666 host->wait_for = MMCIF_WAIT_FOR_WRITE;
fdc50a94
YG
667
668 /* buf write enable */
f985da17
GL
669 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
670}
671
672static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
673{
674 struct mmc_data *data = host->mrq->data;
675 u32 *p = sg_virt(data->sg);
676 int i;
677
678 if (host->sd_error) {
679 data->error = sh_mmcif_error_manage(host);
e475b270 680 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
f985da17
GL
681 return false;
682 }
683
684 for (i = 0; i < host->blocksize / 4; i++)
487d9fc5 685 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
fdc50a94
YG
686
687 /* buffer write end */
688 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
f985da17 689 host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
fdc50a94 690
f985da17 691 return true;
fdc50a94
YG
692}
693
f985da17
GL
694static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
695 struct mmc_request *mrq)
fdc50a94
YG
696{
697 struct mmc_data *data = mrq->data;
fdc50a94 698
f985da17
GL
699 if (!data->sg_len || !data->sg->length)
700 return;
fdc50a94 701
f985da17
GL
702 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
703 BLOCK_SIZE_MASK;
fdc50a94 704
f985da17
GL
705 host->wait_for = MMCIF_WAIT_FOR_MWRITE;
706 host->sg_idx = 0;
707 host->sg_blkidx = 0;
708 host->pio_ptr = sg_virt(data->sg);
5df460b1 709
f985da17
GL
710 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
711}
fdc50a94 712
f985da17
GL
713static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
714{
715 struct mmc_data *data = host->mrq->data;
716 u32 *p = host->pio_ptr;
717 int i;
718
719 if (host->sd_error) {
720 data->error = sh_mmcif_error_manage(host);
e475b270 721 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error);
f985da17 722 return false;
fdc50a94 723 }
f985da17
GL
724
725 BUG_ON(!data->sg->length);
726
727 for (i = 0; i < host->blocksize / 4; i++)
728 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
729
730 if (!sh_mmcif_next_block(host, p))
731 return false;
732
f985da17
GL
733 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
734
735 return true;
fdc50a94
YG
736}
737
738static void sh_mmcif_get_response(struct sh_mmcif_host *host,
739 struct mmc_command *cmd)
740{
741 if (cmd->flags & MMC_RSP_136) {
487d9fc5
MD
742 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
743 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
744 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
745 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
fdc50a94 746 } else
487d9fc5 747 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
fdc50a94
YG
748}
749
750static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
751 struct mmc_command *cmd)
752{
487d9fc5 753 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
fdc50a94
YG
754}
755
756static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
69983404 757 struct mmc_request *mrq)
fdc50a94 758{
69983404
GL
759 struct mmc_data *data = mrq->data;
760 struct mmc_command *cmd = mrq->cmd;
761 u32 opc = cmd->opcode;
fdc50a94
YG
762 u32 tmp = 0;
763
764 /* Response Type check */
765 switch (mmc_resp_type(cmd)) {
766 case MMC_RSP_NONE:
767 tmp |= CMD_SET_RTYP_NO;
768 break;
769 case MMC_RSP_R1:
770 case MMC_RSP_R1B:
771 case MMC_RSP_R3:
772 tmp |= CMD_SET_RTYP_6B;
773 break;
774 case MMC_RSP_R2:
775 tmp |= CMD_SET_RTYP_17B;
776 break;
777 default:
e47bf32a 778 dev_err(&host->pd->dev, "Unsupported response type.\n");
fdc50a94
YG
779 break;
780 }
781 switch (opc) {
782 /* RBSY */
a812ba0f 783 case MMC_SLEEP_AWAKE:
fdc50a94
YG
784 case MMC_SWITCH:
785 case MMC_STOP_TRANSMISSION:
786 case MMC_SET_WRITE_PROT:
787 case MMC_CLR_WRITE_PROT:
788 case MMC_ERASE:
fdc50a94
YG
789 tmp |= CMD_SET_RBSY;
790 break;
791 }
792 /* WDAT / DATW */
69983404 793 if (data) {
fdc50a94
YG
794 tmp |= CMD_SET_WDAT;
795 switch (host->bus_width) {
796 case MMC_BUS_WIDTH_1:
797 tmp |= CMD_SET_DATW_1;
798 break;
799 case MMC_BUS_WIDTH_4:
800 tmp |= CMD_SET_DATW_4;
801 break;
802 case MMC_BUS_WIDTH_8:
803 tmp |= CMD_SET_DATW_8;
804 break;
805 default:
e47bf32a 806 dev_err(&host->pd->dev, "Unsupported bus width.\n");
fdc50a94
YG
807 break;
808 }
555061f9 809 switch (host->timing) {
4039ff47 810 case MMC_TIMING_MMC_DDR52:
555061f9
TK
811 /*
812 * MMC core will only set this timing, if the host
4039ff47
SJ
813 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
814 * capability. MMCIF implementations with this
815 * capability, e.g. sh73a0, will have to set it
816 * in their platform data.
555061f9
TK
817 */
818 tmp |= CMD_SET_DARS;
819 break;
820 }
fdc50a94
YG
821 }
822 /* DWEN */
823 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
824 tmp |= CMD_SET_DWEN;
825 /* CMLTE/CMD12EN */
826 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
827 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
828 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
69983404 829 data->blocks << 16);
fdc50a94
YG
830 }
831 /* RIDXC[1:0] check bits */
832 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
833 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
834 tmp |= CMD_SET_RIDXC_BITS;
835 /* RCRC7C[1:0] check bits */
836 if (opc == MMC_SEND_OP_COND)
837 tmp |= CMD_SET_CRC7C_BITS;
838 /* RCRC7C[1:0] internal CRC7 */
839 if (opc == MMC_ALL_SEND_CID ||
840 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
841 tmp |= CMD_SET_CRC7C_INTERNAL;
842
69983404 843 return (opc << 24) | tmp;
fdc50a94
YG
844}
845
e47bf32a 846static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
f985da17 847 struct mmc_request *mrq, u32 opc)
fdc50a94 848{
fdc50a94
YG
849 switch (opc) {
850 case MMC_READ_MULTIPLE_BLOCK:
f985da17
GL
851 sh_mmcif_multi_read(host, mrq);
852 return 0;
fdc50a94 853 case MMC_WRITE_MULTIPLE_BLOCK:
f985da17
GL
854 sh_mmcif_multi_write(host, mrq);
855 return 0;
fdc50a94 856 case MMC_WRITE_BLOCK:
f985da17
GL
857 sh_mmcif_single_write(host, mrq);
858 return 0;
fdc50a94
YG
859 case MMC_READ_SINGLE_BLOCK:
860 case MMC_SEND_EXT_CSD:
f985da17
GL
861 sh_mmcif_single_read(host, mrq);
862 return 0;
fdc50a94 863 default:
e475b270 864 dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc);
ee4b8887 865 return -EINVAL;
fdc50a94 866 }
fdc50a94
YG
867}
868
869static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
ee4b8887 870 struct mmc_request *mrq)
fdc50a94 871{
ee4b8887 872 struct mmc_command *cmd = mrq->cmd;
f985da17
GL
873 u32 opc = cmd->opcode;
874 u32 mask;
dbb42d96 875 unsigned long flags;
fdc50a94 876
fdc50a94 877 switch (opc) {
ee4b8887 878 /* response busy check */
a812ba0f 879 case MMC_SLEEP_AWAKE:
fdc50a94
YG
880 case MMC_SWITCH:
881 case MMC_STOP_TRANSMISSION:
882 case MMC_SET_WRITE_PROT:
883 case MMC_CLR_WRITE_PROT:
884 case MMC_ERASE:
ee4b8887 885 mask = MASK_START_CMD | MASK_MRBSYE;
fdc50a94
YG
886 break;
887 default:
ee4b8887 888 mask = MASK_START_CMD | MASK_MCRSPE;
fdc50a94
YG
889 break;
890 }
fdc50a94 891
967bcb77
GL
892 if (host->ccs_enable)
893 mask |= MASK_MCCSTO;
894
69983404 895 if (mrq->data) {
487d9fc5
MD
896 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
897 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
898 mrq->data->blksz);
fdc50a94 899 }
69983404 900 opc = sh_mmcif_set_cmd(host, mrq);
fdc50a94 901
967bcb77
GL
902 if (host->ccs_enable)
903 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
904 else
905 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
487d9fc5 906 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
fdc50a94 907 /* set arg */
487d9fc5 908 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
fdc50a94 909 /* set cmd */
dbb42d96 910 spin_lock_irqsave(&host->lock, flags);
487d9fc5 911 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
fdc50a94 912
f985da17
GL
913 host->wait_for = MMCIF_WAIT_FOR_CMD;
914 schedule_delayed_work(&host->timeout_work, host->timeout);
dbb42d96 915 spin_unlock_irqrestore(&host->lock, flags);
fdc50a94
YG
916}
917
918static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
ee4b8887 919 struct mmc_request *mrq)
fdc50a94 920{
69983404
GL
921 switch (mrq->cmd->opcode) {
922 case MMC_READ_MULTIPLE_BLOCK:
fdc50a94 923 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
69983404
GL
924 break;
925 case MMC_WRITE_MULTIPLE_BLOCK:
fdc50a94 926 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
69983404
GL
927 break;
928 default:
e47bf32a 929 dev_err(&host->pd->dev, "unsupported stop cmd\n");
69983404 930 mrq->stop->error = sh_mmcif_error_manage(host);
fdc50a94
YG
931 return;
932 }
933
f985da17 934 host->wait_for = MMCIF_WAIT_FOR_STOP;
fdc50a94
YG
935}
936
937static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
938{
939 struct sh_mmcif_host *host = mmc_priv(mmc);
3b0beafc
GL
940 unsigned long flags;
941
942 spin_lock_irqsave(&host->lock, flags);
943 if (host->state != STATE_IDLE) {
e475b270 944 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
3b0beafc
GL
945 spin_unlock_irqrestore(&host->lock, flags);
946 mrq->cmd->error = -EAGAIN;
947 mmc_request_done(mmc, mrq);
948 return;
949 }
950
951 host->state = STATE_REQUEST;
952 spin_unlock_irqrestore(&host->lock, flags);
fdc50a94
YG
953
954 switch (mrq->cmd->opcode) {
955 /* MMCIF does not support SD/SDIO command */
7541ca98
LP
956 case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
957 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
958 if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
959 break;
fdc50a94 960 case MMC_APP_CMD:
92ff0c5b 961 case SD_IO_RW_DIRECT:
3b0beafc 962 host->state = STATE_IDLE;
fdc50a94
YG
963 mrq->cmd->error = -ETIMEDOUT;
964 mmc_request_done(mmc, mrq);
965 return;
fdc50a94
YG
966 default:
967 break;
968 }
f985da17
GL
969
970 host->mrq = mrq;
fdc50a94 971
f985da17 972 sh_mmcif_start_cmd(host, mrq);
fdc50a94
YG
973}
974
a6609267
GL
975static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
976{
ac0a2e98 977 int ret = clk_prepare_enable(host->hclk);
a6609267
GL
978
979 if (!ret) {
980 host->clk = clk_get_rate(host->hclk);
981 host->mmc->f_max = host->clk / 2;
982 host->mmc->f_min = host->clk / 512;
983 }
984
985 return ret;
986}
987
7d17baa0
GL
988static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
989{
7d17baa0
GL
990 struct mmc_host *mmc = host->mmc;
991
7d17baa0
GL
992 if (!IS_ERR(mmc->supply.vmmc))
993 /* Errors ignored... */
994 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
995 ios->power_mode ? ios->vdd : 0);
996}
997
fdc50a94
YG
998static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
999{
1000 struct sh_mmcif_host *host = mmc_priv(mmc);
3b0beafc
GL
1001 unsigned long flags;
1002
1003 spin_lock_irqsave(&host->lock, flags);
1004 if (host->state != STATE_IDLE) {
e475b270 1005 dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state);
3b0beafc
GL
1006 spin_unlock_irqrestore(&host->lock, flags);
1007 return;
1008 }
1009
1010 host->state = STATE_IOS;
1011 spin_unlock_irqrestore(&host->lock, flags);
fdc50a94 1012
f5e0cec4 1013 if (ios->power_mode == MMC_POWER_UP) {
c9b0cef2 1014 if (!host->card_present) {
faca6648
GL
1015 /* See if we also get DMA */
1016 sh_mmcif_request_dma(host, host->pd->dev.platform_data);
c9b0cef2 1017 host->card_present = true;
faca6648 1018 }
7d17baa0 1019 sh_mmcif_set_power(host, ios);
f5e0cec4 1020 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
fdc50a94
YG
1021 /* clock stop */
1022 sh_mmcif_clock_control(host, 0);
faca6648 1023 if (ios->power_mode == MMC_POWER_OFF) {
c9b0cef2 1024 if (host->card_present) {
faca6648 1025 sh_mmcif_release_dma(host);
c9b0cef2 1026 host->card_present = false;
faca6648 1027 }
c9b0cef2
GL
1028 }
1029 if (host->power) {
f8a8ced7 1030 pm_runtime_put_sync(&host->pd->dev);
ac0a2e98 1031 clk_disable_unprepare(host->hclk);
c9b0cef2 1032 host->power = false;
7d17baa0
GL
1033 if (ios->power_mode == MMC_POWER_OFF)
1034 sh_mmcif_set_power(host, ios);
faca6648 1035 }
3b0beafc 1036 host->state = STATE_IDLE;
fdc50a94 1037 return;
fdc50a94
YG
1038 }
1039
c9b0cef2
GL
1040 if (ios->clock) {
1041 if (!host->power) {
a6609267 1042 sh_mmcif_clk_update(host);
c9b0cef2
GL
1043 pm_runtime_get_sync(&host->pd->dev);
1044 host->power = true;
1045 sh_mmcif_sync_reset(host);
1046 }
fdc50a94 1047 sh_mmcif_clock_control(host, ios->clock);
c9b0cef2 1048 }
fdc50a94 1049
555061f9 1050 host->timing = ios->timing;
fdc50a94 1051 host->bus_width = ios->bus_width;
3b0beafc 1052 host->state = STATE_IDLE;
fdc50a94
YG
1053}
1054
777271d0
AH
1055static int sh_mmcif_get_cd(struct mmc_host *mmc)
1056{
1057 struct sh_mmcif_host *host = mmc_priv(mmc);
1058 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
e480606a
GL
1059 int ret = mmc_gpio_get_cd(mmc);
1060
1061 if (ret >= 0)
1062 return ret;
777271d0 1063
bf68a812 1064 if (!p || !p->get_cd)
777271d0
AH
1065 return -ENOSYS;
1066 else
1067 return p->get_cd(host->pd);
1068}
1069
fdc50a94
YG
1070static struct mmc_host_ops sh_mmcif_ops = {
1071 .request = sh_mmcif_request,
1072 .set_ios = sh_mmcif_set_ios,
777271d0 1073 .get_cd = sh_mmcif_get_cd,
fdc50a94
YG
1074};
1075
f985da17
GL
1076static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1077{
1078 struct mmc_command *cmd = host->mrq->cmd;
69983404 1079 struct mmc_data *data = host->mrq->data;
f985da17
GL
1080 long time;
1081
1082 if (host->sd_error) {
1083 switch (cmd->opcode) {
1084 case MMC_ALL_SEND_CID:
1085 case MMC_SELECT_CARD:
1086 case MMC_APP_CMD:
1087 cmd->error = -ETIMEDOUT;
f985da17
GL
1088 break;
1089 default:
1090 cmd->error = sh_mmcif_error_manage(host);
f985da17
GL
1091 break;
1092 }
e475b270
TK
1093 dev_dbg(&host->pd->dev, "CMD%d error %d\n",
1094 cmd->opcode, cmd->error);
aba9d646 1095 host->sd_error = false;
f985da17
GL
1096 return false;
1097 }
1098 if (!(cmd->flags & MMC_RSP_PRESENT)) {
1099 cmd->error = 0;
1100 return false;
1101 }
1102
1103 sh_mmcif_get_response(host, cmd);
1104
69983404 1105 if (!data)
f985da17
GL
1106 return false;
1107
90f1cb43
GL
1108 /*
1109 * Completion can be signalled from DMA callback and error, so, have to
1110 * reset here, before setting .dma_active
1111 */
1112 init_completion(&host->dma_complete);
1113
69983404 1114 if (data->flags & MMC_DATA_READ) {
f985da17
GL
1115 if (host->chan_rx)
1116 sh_mmcif_start_dma_rx(host);
1117 } else {
1118 if (host->chan_tx)
1119 sh_mmcif_start_dma_tx(host);
1120 }
1121
1122 if (!host->dma_active) {
69983404 1123 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
99eb9d8d 1124 return !data->error;
f985da17
GL
1125 }
1126
1127 /* Running in the IRQ thread, can sleep */
1128 time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1129 host->timeout);
eae30983
TK
1130
1131 if (data->flags & MMC_DATA_READ)
1132 dma_unmap_sg(host->chan_rx->device->dev,
1133 data->sg, data->sg_len,
1134 DMA_FROM_DEVICE);
1135 else
1136 dma_unmap_sg(host->chan_tx->device->dev,
1137 data->sg, data->sg_len,
1138 DMA_TO_DEVICE);
1139
f985da17
GL
1140 if (host->sd_error) {
1141 dev_err(host->mmc->parent,
1142 "Error IRQ while waiting for DMA completion!\n");
1143 /* Woken up by an error IRQ: abort DMA */
69983404 1144 data->error = sh_mmcif_error_manage(host);
f985da17 1145 } else if (!time) {
e475b270 1146 dev_err(host->mmc->parent, "DMA timeout!\n");
69983404 1147 data->error = -ETIMEDOUT;
f985da17 1148 } else if (time < 0) {
e475b270
TK
1149 dev_err(host->mmc->parent,
1150 "wait_for_completion_...() error %ld!\n", time);
69983404 1151 data->error = time;
f985da17
GL
1152 }
1153 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1154 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1155 host->dma_active = false;
1156
eae30983 1157 if (data->error) {
69983404 1158 data->bytes_xfered = 0;
eae30983
TK
1159 /* Abort DMA */
1160 if (data->flags & MMC_DATA_READ)
1161 dmaengine_terminate_all(host->chan_rx);
1162 else
1163 dmaengine_terminate_all(host->chan_tx);
1164 }
f985da17
GL
1165
1166 return false;
1167}
1168
1169static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1170{
1171 struct sh_mmcif_host *host = dev_id;
8047310e 1172 struct mmc_request *mrq;
5df460b1 1173 bool wait = false;
dbb42d96
KT
1174 unsigned long flags;
1175 int wait_work;
1176
1177 spin_lock_irqsave(&host->lock, flags);
1178 wait_work = host->wait_for;
1179 spin_unlock_irqrestore(&host->lock, flags);
f985da17
GL
1180
1181 cancel_delayed_work_sync(&host->timeout_work);
1182
8047310e
GL
1183 mutex_lock(&host->thread_lock);
1184
1185 mrq = host->mrq;
1186 if (!mrq) {
1187 dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1188 host->state, host->wait_for);
1189 mutex_unlock(&host->thread_lock);
1190 return IRQ_HANDLED;
1191 }
1192
f985da17
GL
1193 /*
1194 * All handlers return true, if processing continues, and false, if the
1195 * request has to be completed - successfully or not
1196 */
dbb42d96 1197 switch (wait_work) {
f985da17
GL
1198 case MMCIF_WAIT_FOR_REQUEST:
1199 /* We're too late, the timeout has already kicked in */
8047310e 1200 mutex_unlock(&host->thread_lock);
f985da17
GL
1201 return IRQ_HANDLED;
1202 case MMCIF_WAIT_FOR_CMD:
5df460b1
GL
1203 /* Wait for data? */
1204 wait = sh_mmcif_end_cmd(host);
f985da17
GL
1205 break;
1206 case MMCIF_WAIT_FOR_MREAD:
5df460b1
GL
1207 /* Wait for more data? */
1208 wait = sh_mmcif_mread_block(host);
f985da17
GL
1209 break;
1210 case MMCIF_WAIT_FOR_READ:
5df460b1
GL
1211 /* Wait for data end? */
1212 wait = sh_mmcif_read_block(host);
f985da17
GL
1213 break;
1214 case MMCIF_WAIT_FOR_MWRITE:
5df460b1
GL
1215 /* Wait data to write? */
1216 wait = sh_mmcif_mwrite_block(host);
f985da17
GL
1217 break;
1218 case MMCIF_WAIT_FOR_WRITE:
5df460b1
GL
1219 /* Wait for data end? */
1220 wait = sh_mmcif_write_block(host);
f985da17
GL
1221 break;
1222 case MMCIF_WAIT_FOR_STOP:
1223 if (host->sd_error) {
1224 mrq->stop->error = sh_mmcif_error_manage(host);
e475b270 1225 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error);
f985da17
GL
1226 break;
1227 }
1228 sh_mmcif_get_cmd12response(host, mrq->stop);
1229 mrq->stop->error = 0;
1230 break;
1231 case MMCIF_WAIT_FOR_READ_END:
1232 case MMCIF_WAIT_FOR_WRITE_END:
e475b270 1233 if (host->sd_error) {
91ab252a 1234 mrq->data->error = sh_mmcif_error_manage(host);
e475b270
TK
1235 dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error);
1236 }
f985da17
GL
1237 break;
1238 default:
1239 BUG();
1240 }
1241
5df460b1
GL
1242 if (wait) {
1243 schedule_delayed_work(&host->timeout_work, host->timeout);
1244 /* Wait for more data */
8047310e 1245 mutex_unlock(&host->thread_lock);
5df460b1
GL
1246 return IRQ_HANDLED;
1247 }
1248
f985da17 1249 if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
91ab252a 1250 struct mmc_data *data = mrq->data;
69983404
GL
1251 if (!mrq->cmd->error && data && !data->error)
1252 data->bytes_xfered =
1253 data->blocks * data->blksz;
f985da17 1254
69983404 1255 if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
f985da17 1256 sh_mmcif_stop_cmd(host, mrq);
5df460b1
GL
1257 if (!mrq->stop->error) {
1258 schedule_delayed_work(&host->timeout_work, host->timeout);
8047310e 1259 mutex_unlock(&host->thread_lock);
f985da17 1260 return IRQ_HANDLED;
5df460b1 1261 }
f985da17
GL
1262 }
1263 }
1264
1265 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1266 host->state = STATE_IDLE;
69983404 1267 host->mrq = NULL;
f985da17
GL
1268 mmc_request_done(host->mmc, mrq);
1269
8047310e
GL
1270 mutex_unlock(&host->thread_lock);
1271
f985da17
GL
1272 return IRQ_HANDLED;
1273}
1274
fdc50a94
YG
1275static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1276{
1277 struct sh_mmcif_host *host = dev_id;
967bcb77 1278 u32 state, mask;
fdc50a94 1279
487d9fc5 1280 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
967bcb77
GL
1281 mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1282 if (host->ccs_enable)
1283 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1284 else
1285 sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
8af50750 1286 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
fdc50a94 1287
8af50750
GL
1288 if (state & ~MASK_CLEAN)
1289 dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n",
1290 state);
1291
1292 if (state & INT_ERR_STS || state & ~INT_ALL) {
aa0787a9 1293 host->sd_error = true;
8af50750 1294 dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);
fdc50a94 1295 }
f985da17 1296 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
8af50750
GL
1297 if (!host->mrq)
1298 dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state);
f985da17
GL
1299 if (!host->dma_active)
1300 return IRQ_WAKE_THREAD;
1301 else if (host->sd_error)
1302 mmcif_dma_complete(host);
1303 } else {
aa0787a9 1304 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
f985da17 1305 }
fdc50a94
YG
1306
1307 return IRQ_HANDLED;
1308}
1309
f985da17
GL
1310static void mmcif_timeout_work(struct work_struct *work)
1311{
1312 struct delayed_work *d = container_of(work, struct delayed_work, work);
1313 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1314 struct mmc_request *mrq = host->mrq;
8047310e 1315 unsigned long flags;
f985da17
GL
1316
1317 if (host->dying)
1318 /* Don't run after mmc_remove_host() */
1319 return;
1320
8047310e
GL
1321 spin_lock_irqsave(&host->lock, flags);
1322 if (host->state == STATE_IDLE) {
1323 spin_unlock_irqrestore(&host->lock, flags);
1324 return;
1325 }
1326
4cbd5224
KT
1327 dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n",
1328 host->wait_for, mrq->cmd->opcode);
1329
8047310e
GL
1330 host->state = STATE_TIMEOUT;
1331 spin_unlock_irqrestore(&host->lock, flags);
1332
f985da17
GL
1333 /*
1334 * Handle races with cancel_delayed_work(), unless
1335 * cancel_delayed_work_sync() is used
1336 */
1337 switch (host->wait_for) {
1338 case MMCIF_WAIT_FOR_CMD:
1339 mrq->cmd->error = sh_mmcif_error_manage(host);
1340 break;
1341 case MMCIF_WAIT_FOR_STOP:
1342 mrq->stop->error = sh_mmcif_error_manage(host);
1343 break;
1344 case MMCIF_WAIT_FOR_MREAD:
1345 case MMCIF_WAIT_FOR_MWRITE:
1346 case MMCIF_WAIT_FOR_READ:
1347 case MMCIF_WAIT_FOR_WRITE:
1348 case MMCIF_WAIT_FOR_READ_END:
1349 case MMCIF_WAIT_FOR_WRITE_END:
69983404 1350 mrq->data->error = sh_mmcif_error_manage(host);
f985da17
GL
1351 break;
1352 default:
1353 BUG();
1354 }
1355
1356 host->state = STATE_IDLE;
1357 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
f985da17
GL
1358 host->mrq = NULL;
1359 mmc_request_done(host->mmc, mrq);
1360}
1361
7d17baa0
GL
1362static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1363{
1364 struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
1365 struct mmc_host *mmc = host->mmc;
1366
1367 mmc_regulator_get_supply(mmc);
1368
bf68a812
GL
1369 if (!pd)
1370 return;
1371
7d17baa0
GL
1372 if (!mmc->ocr_avail)
1373 mmc->ocr_avail = pd->ocr;
1374 else if (pd->ocr)
1375 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1376}
1377
c3be1efd 1378static int sh_mmcif_probe(struct platform_device *pdev)
fdc50a94
YG
1379{
1380 int ret = 0, irq[2];
1381 struct mmc_host *mmc;
e47bf32a 1382 struct sh_mmcif_host *host;
e1aae2eb 1383 struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
fdc50a94
YG
1384 struct resource *res;
1385 void __iomem *reg;
2cd5b3e0 1386 const char *name;
fdc50a94
YG
1387
1388 irq[0] = platform_get_irq(pdev, 0);
1389 irq[1] = platform_get_irq(pdev, 1);
2cd5b3e0 1390 if (irq[0] < 0) {
e47bf32a 1391 dev_err(&pdev->dev, "Get irq error\n");
fdc50a94
YG
1392 return -ENXIO;
1393 }
18f55fcc 1394
fdc50a94 1395 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
18f55fcc
BD
1396 reg = devm_ioremap_resource(&pdev->dev, res);
1397 if (IS_ERR(reg))
1398 return PTR_ERR(reg);
e1aae2eb 1399
fdc50a94 1400 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
18f55fcc
BD
1401 if (!mmc)
1402 return -ENOMEM;
2c9054dc
SB
1403
1404 ret = mmc_of_parse(mmc);
1405 if (ret < 0)
46991005 1406 goto err_host;
2c9054dc 1407
fdc50a94
YG
1408 host = mmc_priv(mmc);
1409 host->mmc = mmc;
1410 host->addr = reg;
f9fd54f2 1411 host->timeout = msecs_to_jiffies(1000);
967bcb77 1412 host->ccs_enable = !pd || !pd->ccs_unsupported;
6d6fd367 1413 host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
fdc50a94 1414
fdc50a94
YG
1415 host->pd = pdev;
1416
3b0beafc 1417 spin_lock_init(&host->lock);
fdc50a94
YG
1418
1419 mmc->ops = &sh_mmcif_ops;
7d17baa0
GL
1420 sh_mmcif_init_ocr(host);
1421
eca889f6 1422 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
bf68a812 1423 if (pd && pd->caps)
fdc50a94 1424 mmc->caps |= pd->caps;
a782d688 1425 mmc->max_segs = 32;
fdc50a94 1426 mmc->max_blk_size = 512;
a782d688
GL
1427 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1428 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
fdc50a94
YG
1429 mmc->max_seg_size = mmc->max_req_size;
1430
fdc50a94 1431 platform_set_drvdata(pdev, host);
a782d688 1432
faca6648
GL
1433 pm_runtime_enable(&pdev->dev);
1434 host->power = false;
1435
46991005 1436 host->hclk = devm_clk_get(&pdev->dev, NULL);
b289174f
GL
1437 if (IS_ERR(host->hclk)) {
1438 ret = PTR_ERR(host->hclk);
047a9ce7 1439 dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
46991005 1440 goto err_pm;
b289174f 1441 }
a6609267
GL
1442 ret = sh_mmcif_clk_update(host);
1443 if (ret < 0)
46991005 1444 goto err_pm;
b289174f 1445
faca6648
GL
1446 ret = pm_runtime_resume(&pdev->dev);
1447 if (ret < 0)
46991005 1448 goto err_clk;
a782d688 1449
5ba85d95 1450 INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
fdc50a94 1451
b289174f 1452 sh_mmcif_sync_reset(host);
3b0beafc
GL
1453 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1454
2cd5b3e0 1455 name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error";
6f4789e6
BD
1456 ret = devm_request_threaded_irq(&pdev->dev, irq[0], sh_mmcif_intr,
1457 sh_mmcif_irqt, 0, name, host);
fdc50a94 1458 if (ret) {
2cd5b3e0 1459 dev_err(&pdev->dev, "request_irq error (%s)\n", name);
11a80852 1460 goto err_clk;
fdc50a94 1461 }
2cd5b3e0 1462 if (irq[1] >= 0) {
6f4789e6
BD
1463 ret = devm_request_threaded_irq(&pdev->dev, irq[1],
1464 sh_mmcif_intr, sh_mmcif_irqt,
1465 0, "sh_mmc:int", host);
2cd5b3e0
SK
1466 if (ret) {
1467 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
11a80852 1468 goto err_clk;
2cd5b3e0 1469 }
fdc50a94
YG
1470 }
1471
e480606a 1472 if (pd && pd->use_cd_gpio) {
214fc309 1473 ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
e480606a 1474 if (ret < 0)
7f67f3a2 1475 goto err_clk;
e480606a
GL
1476 }
1477
8047310e
GL
1478 mutex_init(&host->thread_lock);
1479
5ba85d95
GL
1480 ret = mmc_add_host(mmc);
1481 if (ret < 0)
7f67f3a2 1482 goto err_clk;
fdc50a94 1483
efe6a8ad
RW
1484 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1485
ce7eb688
BD
1486 dev_info(&pdev->dev, "Chip version 0x%04x, clock rate %luMHz\n",
1487 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1488 clk_get_rate(host->hclk) / 1000000UL);
1489
1490 clk_disable_unprepare(host->hclk);
fdc50a94
YG
1491 return ret;
1492
46991005 1493err_clk:
ac0a2e98 1494 clk_disable_unprepare(host->hclk);
46991005 1495err_pm:
b289174f 1496 pm_runtime_disable(&pdev->dev);
46991005 1497err_host:
fdc50a94 1498 mmc_free_host(mmc);
fdc50a94
YG
1499 return ret;
1500}
1501
6e0ee714 1502static int sh_mmcif_remove(struct platform_device *pdev)
fdc50a94
YG
1503{
1504 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
fdc50a94 1505
f985da17 1506 host->dying = true;
ac0a2e98 1507 clk_prepare_enable(host->hclk);
faca6648 1508 pm_runtime_get_sync(&pdev->dev);
fdc50a94 1509
efe6a8ad
RW
1510 dev_pm_qos_hide_latency_limit(&pdev->dev);
1511
faca6648 1512 mmc_remove_host(host->mmc);
3b0beafc
GL
1513 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1514
f985da17
GL
1515 /*
1516 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1517 * mmc_remove_host() call above. But swapping order doesn't help either
1518 * (a query on the linux-mmc mailing list didn't bring any replies).
1519 */
1520 cancel_delayed_work_sync(&host->timeout_work);
1521
ac0a2e98 1522 clk_disable_unprepare(host->hclk);
fdc50a94 1523 mmc_free_host(host->mmc);
faca6648
GL
1524 pm_runtime_put_sync(&pdev->dev);
1525 pm_runtime_disable(&pdev->dev);
fdc50a94
YG
1526
1527 return 0;
1528}
1529
51129f31 1530#ifdef CONFIG_PM_SLEEP
faca6648
GL
1531static int sh_mmcif_suspend(struct device *dev)
1532{
b289174f 1533 struct sh_mmcif_host *host = dev_get_drvdata(dev);
faca6648 1534
cb3ca1ae 1535 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
faca6648 1536
cb3ca1ae 1537 return 0;
faca6648
GL
1538}
1539
1540static int sh_mmcif_resume(struct device *dev)
1541{
cb3ca1ae 1542 return 0;
faca6648 1543}
51129f31 1544#endif
faca6648 1545
bf68a812
GL
1546static const struct of_device_id mmcif_of_match[] = {
1547 { .compatible = "renesas,sh-mmcif" },
1548 { }
1549};
1550MODULE_DEVICE_TABLE(of, mmcif_of_match);
1551
faca6648 1552static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
51129f31 1553 SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
faca6648
GL
1554};
1555
fdc50a94
YG
1556static struct platform_driver sh_mmcif_driver = {
1557 .probe = sh_mmcif_probe,
1558 .remove = sh_mmcif_remove,
1559 .driver = {
1560 .name = DRIVER_NAME,
faca6648 1561 .pm = &sh_mmcif_dev_pm_ops,
bf68a812 1562 .of_match_table = mmcif_of_match,
fdc50a94
YG
1563 },
1564};
1565
d1f81a64 1566module_platform_driver(sh_mmcif_driver);
fdc50a94
YG
1567
1568MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1569MODULE_LICENSE("GPL");
aa0787a9 1570MODULE_ALIAS("platform:" DRIVER_NAME);
fdc50a94 1571MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");