1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Realtek PCI-Express card reader
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
7 * Wei WANG <wei_wang@realsil.com.cn>
10 #include <linux/pci.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/idr.h>
18 #include <linux/platform_device.h>
19 #include <linux/mfd/core.h>
20 #include <linux/rtsx_pci.h>
21 #include <linux/mmc/card.h>
22 #include <asm/unaligned.h>
24 #include <linux/pm_runtime.h>
31 static bool msi_en = true;
32 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
33 MODULE_PARM_DESC(msi_en, "Enable MSI");
35 static DEFINE_IDR(rtsx_pci_idr);
36 static DEFINE_SPINLOCK(rtsx_pci_lock);
38 static struct mfd_cell rtsx_pcr_cells[] = {
40 .name = DRV_NAME_RTSX_PCI_SDMMC,
44 static const struct pci_device_id rtsx_pci_ids[] = {
45 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
62 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
64 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
66 rtsx_pci_write_register(pcr, MSGTXDATA0,
67 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
68 rtsx_pci_write_register(pcr, MSGTXDATA1,
69 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
70 rtsx_pci_write_register(pcr, MSGTXDATA2,
71 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
72 rtsx_pci_write_register(pcr, MSGTXDATA3,
73 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
74 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
75 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
80 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
82 return rtsx_comm_set_ltr_latency(pcr, latency);
85 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
87 if (pcr->aspm_enabled == enable)
90 if (pcr->aspm_mode == ASPM_MODE_CFG) {
91 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
93 enable ? pcr->aspm_en : 0);
94 } else if (pcr->aspm_mode == ASPM_MODE_REG) {
95 if (pcr->aspm_en & 0x02)
96 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
97 FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
99 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
100 FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
103 if (!enable && (pcr->aspm_en & 0x02))
106 pcr->aspm_enabled = enable;
109 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
111 if (pcr->ops->set_aspm)
112 pcr->ops->set_aspm(pcr, false);
114 rtsx_comm_set_aspm(pcr, false);
117 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
119 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
124 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
126 if (pcr->ops->set_l1off_cfg_sub_d0)
127 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
130 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
132 struct rtsx_cr_option *option = &pcr->option;
134 rtsx_disable_aspm(pcr);
136 /* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
139 if (option->ltr_enabled)
140 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
142 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
143 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
146 static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
148 rtsx_comm_pm_full_on(pcr);
151 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
153 /* If pci device removed, don't queue idle work any more */
157 if (pcr->state != PDEV_STAT_RUN) {
158 pcr->state = PDEV_STAT_RUN;
159 if (pcr->ops->enable_auto_blink)
160 pcr->ops->enable_auto_blink(pcr);
161 rtsx_pm_full_on(pcr);
164 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
166 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
169 u32 val = HAIMR_WRITE_START;
171 val |= (u32)(addr & 0x3FFF) << 16;
172 val |= (u32)mask << 8;
175 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
177 for (i = 0; i < MAX_RW_REG_CNT; i++) {
178 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
179 if ((val & HAIMR_TRANS_END) == 0) {
188 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
190 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
192 u32 val = HAIMR_READ_START;
195 val |= (u32)(addr & 0x3FFF) << 16;
196 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
198 for (i = 0; i < MAX_RW_REG_CNT; i++) {
199 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
200 if ((val & HAIMR_TRANS_END) == 0)
204 if (i >= MAX_RW_REG_CNT)
208 *data = (u8)(val & 0xFF);
212 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
214 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
216 int err, i, finished = 0;
219 rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
220 rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
221 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
222 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
224 for (i = 0; i < 100000; i++) {
225 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
241 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
243 if (pcr->ops->write_phy)
244 return pcr->ops->write_phy(pcr, addr, val);
246 return __rtsx_pci_write_phy_register(pcr, addr, val);
248 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
250 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
252 int err, i, finished = 0;
256 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
257 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
259 for (i = 0; i < 100000; i++) {
260 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
273 rtsx_pci_read_register(pcr, PHYDATA0, &val1);
274 rtsx_pci_read_register(pcr, PHYDATA1, &val2);
275 data = val1 | (val2 << 8);
283 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
285 if (pcr->ops->read_phy)
286 return pcr->ops->read_phy(pcr, addr, val);
288 return __rtsx_pci_read_phy_register(pcr, addr, val);
290 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
292 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
294 if (pcr->ops->stop_cmd)
295 return pcr->ops->stop_cmd(pcr);
297 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
298 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
300 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
301 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
303 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
305 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
306 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
310 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
312 val |= (u32)(cmd_type & 0x03) << 30;
313 val |= (u32)(reg_addr & 0x3FFF) << 16;
314 val |= (u32)mask << 8;
317 spin_lock_irqsave(&pcr->lock, flags);
319 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
320 put_unaligned_le32(val, ptr);
324 spin_unlock_irqrestore(&pcr->lock, flags);
326 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
328 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
332 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
334 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
335 /* Hardware Auto Response */
337 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
339 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
341 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
343 struct completion trans_done;
349 spin_lock_irqsave(&pcr->lock, flags);
351 /* set up data structures for the wakeup system */
352 pcr->done = &trans_done;
353 pcr->trans_result = TRANS_NOT_READY;
354 init_completion(&trans_done);
356 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
358 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
359 /* Hardware Auto Response */
361 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
363 spin_unlock_irqrestore(&pcr->lock, flags);
365 /* Wait for TRANS_OK_INT */
366 timeleft = wait_for_completion_interruptible_timeout(
367 &trans_done, msecs_to_jiffies(timeout));
369 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
371 goto finish_send_cmd;
374 spin_lock_irqsave(&pcr->lock, flags);
375 if (pcr->trans_result == TRANS_RESULT_FAIL)
377 else if (pcr->trans_result == TRANS_RESULT_OK)
379 else if (pcr->trans_result == TRANS_NO_DEVICE)
381 spin_unlock_irqrestore(&pcr->lock, flags);
384 spin_lock_irqsave(&pcr->lock, flags);
386 spin_unlock_irqrestore(&pcr->lock, flags);
388 if ((err < 0) && (err != -ENODEV))
389 rtsx_pci_stop_cmd(pcr);
392 complete(pcr->finish_me);
396 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
398 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
399 dma_addr_t addr, unsigned int len, int end)
401 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
403 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
405 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
408 option |= RTSX_SG_END;
410 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
412 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
413 | (((u64)len >> 16) << 6) | option;
415 val = ((u64)addr << 32) | ((u64)len << 16) | option;
417 val = ((u64)addr << 32) | ((u64)len << 12) | option;
419 put_unaligned_le64(val, ptr);
423 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
424 int num_sg, bool read, int timeout)
428 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
429 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
432 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
434 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
436 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
440 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
442 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
443 int num_sg, bool read)
445 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
450 if ((sglist == NULL) || (num_sg <= 0))
453 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
455 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
457 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
458 int num_sg, bool read)
460 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
462 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
464 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
466 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
467 int count, bool read, int timeout)
469 struct completion trans_done;
470 struct scatterlist *sg;
477 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
482 if ((sglist == NULL) || (count < 1))
485 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
487 for_each_sg(sglist, sg, count, i) {
488 addr = sg_dma_address(sg);
489 len = sg_dma_len(sg);
490 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
493 spin_lock_irqsave(&pcr->lock, flags);
495 pcr->done = &trans_done;
496 pcr->trans_result = TRANS_NOT_READY;
497 init_completion(&trans_done);
498 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
499 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
501 spin_unlock_irqrestore(&pcr->lock, flags);
503 timeleft = wait_for_completion_interruptible_timeout(
504 &trans_done, msecs_to_jiffies(timeout));
506 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
511 spin_lock_irqsave(&pcr->lock, flags);
512 if (pcr->trans_result == TRANS_RESULT_FAIL) {
514 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
515 pcr->dma_error_count++;
518 else if (pcr->trans_result == TRANS_NO_DEVICE)
520 spin_unlock_irqrestore(&pcr->lock, flags);
523 spin_lock_irqsave(&pcr->lock, flags);
525 spin_unlock_irqrestore(&pcr->lock, flags);
527 if ((err < 0) && (err != -ENODEV))
528 rtsx_pci_stop_cmd(pcr);
531 complete(pcr->finish_me);
535 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
537 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
549 for (i = 0; i < buf_len / 256; i++) {
550 rtsx_pci_init_cmd(pcr);
552 for (j = 0; j < 256; j++)
553 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
555 err = rtsx_pci_send_cmd(pcr, 250);
559 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
564 rtsx_pci_init_cmd(pcr);
566 for (j = 0; j < buf_len % 256; j++)
567 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
569 err = rtsx_pci_send_cmd(pcr, 250);
574 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
578 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
580 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
592 for (i = 0; i < buf_len / 256; i++) {
593 rtsx_pci_init_cmd(pcr);
595 for (j = 0; j < 256; j++) {
596 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
601 err = rtsx_pci_send_cmd(pcr, 250);
607 rtsx_pci_init_cmd(pcr);
609 for (j = 0; j < buf_len % 256; j++) {
610 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
615 err = rtsx_pci_send_cmd(pcr, 250);
622 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
624 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
626 rtsx_pci_init_cmd(pcr);
628 while (*tbl & 0xFFFF0000) {
629 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
630 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
634 return rtsx_pci_send_cmd(pcr, 100);
637 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
641 if (card == RTSX_SD_CARD)
642 tbl = pcr->sd_pull_ctl_enable_tbl;
643 else if (card == RTSX_MS_CARD)
644 tbl = pcr->ms_pull_ctl_enable_tbl;
648 return rtsx_pci_set_pull_ctl(pcr, tbl);
650 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
652 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
656 if (card == RTSX_SD_CARD)
657 tbl = pcr->sd_pull_ctl_disable_tbl;
658 else if (card == RTSX_MS_CARD)
659 tbl = pcr->ms_pull_ctl_disable_tbl;
663 return rtsx_pci_set_pull_ctl(pcr, tbl);
665 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
667 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
669 struct rtsx_hw_param *hw_param = &pcr->hw_param;
671 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
672 | hw_param->interrupt_en;
674 if (pcr->num_slots > 1)
675 pcr->bier |= MS_INT_EN;
677 /* Enable Bus Interrupt */
678 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
680 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
683 static inline u8 double_ssc_depth(u8 depth)
685 return ((depth > 1) ? (depth - 1) : depth);
688 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
690 if (div > CLK_DIV_1) {
691 if (ssc_depth > (div - 1))
692 ssc_depth -= (div - 1);
694 ssc_depth = SSC_DEPTH_4M;
700 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
701 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
704 u8 n, clk_divider, mcu_cnt, div;
705 static const u8 depth[] = {
706 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
707 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
708 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
709 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
710 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
713 if (PCI_PID(pcr) == PID_5261)
714 return rts5261_pci_switch_clock(pcr, card_clock,
715 ssc_depth, initial_mode, double_clk, vpclk);
716 if (PCI_PID(pcr) == PID_5228)
717 return rts5228_pci_switch_clock(pcr, card_clock,
718 ssc_depth, initial_mode, double_clk, vpclk);
719 if (PCI_PID(pcr) == PID_5264)
720 return rts5264_pci_switch_clock(pcr, card_clock,
721 ssc_depth, initial_mode, double_clk, vpclk);
724 /* We use 250k(around) here, in initial stage */
725 clk_divider = SD_CLK_DIVIDE_128;
726 card_clock = 30000000;
728 clk_divider = SD_CLK_DIVIDE_0;
730 err = rtsx_pci_write_register(pcr, SD_CFG1,
731 SD_CLK_DIVIDE_MASK, clk_divider);
735 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
736 if (card_clock == UHS_SDR104_MAX_DTR &&
737 pcr->dma_error_count &&
738 PCI_PID(pcr) == RTS5227_DEVICE_ID)
739 card_clock = UHS_SDR104_MAX_DTR -
740 (pcr->dma_error_count * 20000000);
742 card_clock /= 1000000;
743 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
746 if (!initial_mode && double_clk)
747 clk = card_clock * 2;
748 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
749 clk, pcr->cur_clock);
751 if (clk == pcr->cur_clock)
754 if (pcr->ops->conv_clk_and_div_n)
755 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
758 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
761 mcu_cnt = (u8)(125/clk + 3);
765 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
767 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
768 if (pcr->ops->conv_clk_and_div_n) {
769 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
771 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
778 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
780 ssc_depth = depth[ssc_depth];
782 ssc_depth = double_ssc_depth(ssc_depth);
784 ssc_depth = revise_ssc_depth(ssc_depth, div);
785 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
787 rtsx_pci_init_cmd(pcr);
788 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
789 CLK_LOW_FREQ, CLK_LOW_FREQ);
790 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
791 0xFF, (div << 4) | mcu_cnt);
792 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
793 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
794 SSC_DEPTH_MASK, ssc_depth);
795 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
796 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
798 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
800 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
801 PHASE_NOT_RESET, PHASE_NOT_RESET);
804 err = rtsx_pci_send_cmd(pcr, 2000);
808 /* Wait SSC clock stable */
809 udelay(SSC_CLOCK_STABLE_WAIT);
810 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
814 pcr->cur_clock = clk;
817 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
819 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
821 if (pcr->ops->card_power_on)
822 return pcr->ops->card_power_on(pcr, card);
826 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
828 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
830 if (pcr->ops->card_power_off)
831 return pcr->ops->card_power_off(pcr, card);
835 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
837 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
839 static const unsigned int cd_mask[] = {
840 [RTSX_SD_CARD] = SD_EXIST,
841 [RTSX_MS_CARD] = MS_EXIST
844 if (!(pcr->flags & PCR_MS_PMOS)) {
845 /* When using single PMOS, accessing card is not permitted
846 * if the existing card is not the designated one.
848 if (pcr->card_exist & (~cd_mask[card]))
854 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
856 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
858 if (pcr->ops->switch_output_voltage)
859 return pcr->ops->switch_output_voltage(pcr, voltage);
863 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
865 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
869 val = rtsx_pci_readl(pcr, RTSX_BIPR);
870 if (pcr->ops->cd_deglitch)
871 val = pcr->ops->cd_deglitch(pcr);
875 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
877 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
879 struct completion finish;
881 pcr->finish_me = &finish;
882 init_completion(&finish);
887 if (!pcr->remove_pci)
888 rtsx_pci_stop_cmd(pcr);
890 wait_for_completion_interruptible_timeout(&finish,
891 msecs_to_jiffies(2));
892 pcr->finish_me = NULL;
894 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
896 static void rtsx_pci_card_detect(struct work_struct *work)
898 struct delayed_work *dwork;
899 struct rtsx_pcr *pcr;
901 unsigned int card_detect = 0, card_inserted, card_removed;
904 dwork = to_delayed_work(work);
905 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
907 pcr_dbg(pcr, "--> %s\n", __func__);
909 mutex_lock(&pcr->pcr_mutex);
910 spin_lock_irqsave(&pcr->lock, flags);
912 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
913 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
915 irq_status &= CARD_EXIST;
916 card_inserted = pcr->card_inserted & irq_status;
917 card_removed = pcr->card_removed;
918 pcr->card_inserted = 0;
919 pcr->card_removed = 0;
921 spin_unlock_irqrestore(&pcr->lock, flags);
923 if (card_inserted || card_removed) {
924 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
925 card_inserted, card_removed);
927 if (pcr->ops->cd_deglitch)
928 card_inserted = pcr->ops->cd_deglitch(pcr);
930 card_detect = card_inserted | card_removed;
932 pcr->card_exist |= card_inserted;
933 pcr->card_exist &= ~card_removed;
936 mutex_unlock(&pcr->pcr_mutex);
938 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
939 pcr->slots[RTSX_SD_CARD].card_event(
940 pcr->slots[RTSX_SD_CARD].p_dev);
941 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
942 pcr->slots[RTSX_MS_CARD].card_event(
943 pcr->slots[RTSX_MS_CARD].p_dev);
946 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
948 if (pcr->ops->process_ocp) {
949 pcr->ops->process_ocp(pcr);
951 if (!pcr->option.ocp_en)
953 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
954 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
955 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
956 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
957 rtsx_pci_clear_ocpstat(pcr);
963 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
965 if (pcr->option.ocp_en)
966 rtsx_pci_process_ocp(pcr);
971 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
973 struct rtsx_pcr *pcr = dev_id;
979 spin_lock(&pcr->lock);
981 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
982 /* Clear interrupt flag */
983 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
984 if ((int_reg & pcr->bier) == 0) {
985 spin_unlock(&pcr->lock);
988 if (int_reg == 0xFFFFFFFF) {
989 spin_unlock(&pcr->lock);
993 int_reg &= (pcr->bier | 0x7FFFFF);
995 if ((int_reg & SD_OC_INT) ||
996 ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
997 rtsx_pci_process_ocp_interrupt(pcr);
999 if (int_reg & SD_INT) {
1000 if (int_reg & SD_EXIST) {
1001 pcr->card_inserted |= SD_EXIST;
1003 pcr->card_removed |= SD_EXIST;
1004 pcr->card_inserted &= ~SD_EXIST;
1005 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
1006 rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1007 RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1008 pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1011 pcr->dma_error_count = 0;
1014 if (int_reg & MS_INT) {
1015 if (int_reg & MS_EXIST) {
1016 pcr->card_inserted |= MS_EXIST;
1018 pcr->card_removed |= MS_EXIST;
1019 pcr->card_inserted &= ~MS_EXIST;
1023 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1024 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1025 pcr->trans_result = TRANS_RESULT_FAIL;
1027 complete(pcr->done);
1028 } else if (int_reg & TRANS_OK_INT) {
1029 pcr->trans_result = TRANS_RESULT_OK;
1031 complete(pcr->done);
1035 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1036 schedule_delayed_work(&pcr->carddet_work,
1037 msecs_to_jiffies(200));
1039 spin_unlock(&pcr->lock);
1043 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1045 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1046 __func__, pcr->msi_en, pcr->pci->irq);
1048 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1049 pcr->msi_en ? 0 : IRQF_SHARED,
1050 DRV_NAME_RTSX_PCI, pcr)) {
1051 dev_err(&(pcr->pci->dev),
1052 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1057 pcr->irq = pcr->pci->irq;
1058 pci_intx(pcr->pci, !pcr->msi_en);
1063 static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
1065 /* Set relink_time to 0 */
1066 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1067 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1068 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1069 RELINK_TIME_MASK, 0);
1071 rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1072 D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1074 rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1077 static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
1079 if (pcr->ops->turn_off_led)
1080 pcr->ops->turn_off_led(pcr);
1082 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1085 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1086 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1088 if (pcr->ops->force_power_down)
1089 pcr->ops->force_power_down(pcr, pm_state, runtime);
1091 rtsx_base_force_power_down(pcr);
1094 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1096 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1098 if (pcr->ops->enable_ocp) {
1099 pcr->ops->enable_ocp(pcr);
1101 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1102 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1107 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1109 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1111 if (pcr->ops->disable_ocp) {
1112 pcr->ops->disable_ocp(pcr);
1114 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1115 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1120 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1122 if (pcr->ops->init_ocp) {
1123 pcr->ops->init_ocp(pcr);
1125 struct rtsx_cr_option *option = &(pcr->option);
1127 if (option->ocp_en) {
1128 u8 val = option->sd_800mA_ocp_thd;
1130 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1131 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1132 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1133 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1134 SD_OCP_THD_MASK, val);
1135 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1136 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1137 rtsx_pci_enable_ocp(pcr);
1142 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1144 if (pcr->ops->get_ocpstat)
1145 return pcr->ops->get_ocpstat(pcr, val);
1147 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1150 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1152 if (pcr->ops->clear_ocpstat) {
1153 pcr->ops->clear_ocpstat(pcr);
1155 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1156 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1158 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1160 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1164 void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1168 if ((PCI_PID(pcr) != PID_525A) &&
1169 (PCI_PID(pcr) != PID_5260) &&
1170 (PCI_PID(pcr) != PID_5264)) {
1171 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1173 rtsx_pci_write_phy_register(pcr, 0x01, val);
1175 rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1176 rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1177 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1178 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1182 void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1186 if ((PCI_PID(pcr) != PID_525A) &&
1187 (PCI_PID(pcr) != PID_5260) &&
1188 (PCI_PID(pcr) != PID_5264)) {
1189 rtsx_pci_read_phy_register(pcr, 0x01, &val);
1191 rtsx_pci_write_phy_register(pcr, 0x01, val);
1193 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1194 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1198 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1200 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1201 MS_CLK_EN | SD40_CLK_EN, 0);
1202 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1203 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1207 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1212 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1214 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1215 MS_CLK_EN | SD40_CLK_EN, 0);
1217 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1219 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1220 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1225 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1227 struct pci_dev *pdev = pcr->pci;
1230 if (PCI_PID(pcr) == PID_5228)
1231 rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1232 RTS5228_LDO1_SR_0_5);
1234 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1236 rtsx_pci_enable_bus_int(pcr);
1239 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
1240 /* Gating real mcu clock */
1241 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1242 RTS5261_MCU_CLOCK_GATING, 0);
1243 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1246 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1251 /* Wait SSC power stable */
1254 rtsx_disable_aspm(pcr);
1255 if (pcr->ops->optimize_phy) {
1256 err = pcr->ops->optimize_phy(pcr);
1261 rtsx_pci_init_cmd(pcr);
1263 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1264 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1266 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1267 /* Disable card clock */
1268 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1269 /* Reset delink mode */
1270 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1271 /* Card driving select */
1272 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1273 0xFF, pcr->card_drive_sel);
1274 /* Enable SSC Clock */
1275 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1276 0xFF, SSC_8X_EN | SSC_SEL_4M);
1277 if (PCI_PID(pcr) == PID_5261)
1278 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1279 RTS5261_SSC_DEPTH_2M);
1280 else if (PCI_PID(pcr) == PID_5228)
1281 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1282 RTS5228_SSC_DEPTH_2M);
1283 else if (is_version(pcr, 0x5264, IC_VER_A))
1284 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
1285 else if (PCI_PID(pcr) == PID_5264)
1286 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1287 RTS5264_SSC_DEPTH_2M);
1289 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1291 /* Disable cd_pwr_save */
1292 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1293 /* Clear Link Ready Interrupt */
1294 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1295 LINK_RDY_INT, LINK_RDY_INT);
1296 /* Enlarge the estimation window of PERST# glitch
1297 * to reduce the chance of invalid card interrupt
1299 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1300 /* Update RC oscillator to 400k
1301 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1304 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1305 /* Set interrupt write clear
1306 * bit 1: U_elbi_if_rd_clr_en
1307 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1308 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1310 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1312 err = rtsx_pci_send_cmd(pcr, 100);
1316 switch (PCI_PID(pcr)) {
1324 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1331 rtsx_pci_init_ocp(pcr);
1333 /* Enable clk_request_n to enable clock power management */
1334 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1335 0, PCI_EXP_LNKCTL_CLKREQ_EN);
1336 /* Enter L1 when host tx idle */
1337 pci_write_config_byte(pdev, 0x70F, 0x5B);
1339 if (pcr->ops->extra_init_hw) {
1340 err = pcr->ops->extra_init_hw(pcr);
1345 if (pcr->aspm_mode == ASPM_MODE_REG)
1346 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1348 /* No CD interrupt if probing driver with card inserted.
1349 * So we need to initialize pcr->card_exist here.
1351 if (pcr->ops->cd_deglitch)
1352 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1354 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1359 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1361 struct rtsx_cr_option *option = &(pcr->option);
1367 spin_lock_init(&pcr->lock);
1368 mutex_init(&pcr->pcr_mutex);
1370 switch (PCI_PID(pcr)) {
1373 rts5209_init_params(pcr);
1377 rts5229_init_params(pcr);
1381 rtl8411_init_params(pcr);
1385 rts5227_init_params(pcr);
1389 rts522a_init_params(pcr);
1393 rts5249_init_params(pcr);
1397 rts524a_init_params(pcr);
1401 rts525a_init_params(pcr);
1405 rtl8411b_init_params(pcr);
1409 rtl8402_init_params(pcr);
1413 rts5260_init_params(pcr);
1417 rts5261_init_params(pcr);
1421 rts5228_init_params(pcr);
1425 rts5264_init_params(pcr);
1429 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1430 PCI_PID(pcr), pcr->ic_version);
1432 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1437 if (pcr->aspm_mode == ASPM_MODE_CFG) {
1438 pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1439 if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1440 pcr->aspm_enabled = true;
1442 pcr->aspm_enabled = false;
1444 } else if (pcr->aspm_mode == ASPM_MODE_REG) {
1445 rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1446 if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1447 pcr->aspm_enabled = false;
1449 pcr->aspm_enabled = true;
1452 l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
1454 pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
1456 if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
1457 rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
1459 rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
1461 if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
1462 rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
1464 rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
1466 if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
1467 rtsx_set_dev_flag(pcr, PM_L1_1_EN);
1469 rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
1471 if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
1472 rtsx_set_dev_flag(pcr, PM_L1_2_EN);
1474 rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
1476 pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
1477 if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
1478 option->ltr_enabled = true;
1479 option->ltr_active = true;
1481 option->ltr_enabled = false;
1484 if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
1485 | PM_L1_1_EN | PM_L1_2_EN))
1486 option->force_clkreq_0 = false;
1488 option->force_clkreq_0 = true;
1490 option->ltr_enabled = false;
1491 option->force_clkreq_0 = true;
1494 if (pcr->ops->fetch_vendor_settings)
1495 pcr->ops->fetch_vendor_settings(pcr);
1497 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1498 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1499 pcr->sd30_drive_sel_1v8);
1500 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1501 pcr->sd30_drive_sel_3v3);
1502 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1503 pcr->card_drive_sel);
1504 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1506 pcr->state = PDEV_STAT_IDLE;
1507 err = rtsx_pci_init_hw(pcr);
1516 static int rtsx_pci_probe(struct pci_dev *pcidev,
1517 const struct pci_device_id *id)
1519 struct rtsx_pcr *pcr;
1520 struct pcr_handle *handle;
1522 int ret, i, bar = 0;
1524 dev_dbg(&(pcidev->dev),
1525 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1526 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1527 (int)pcidev->revision);
1529 ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1533 ret = pci_enable_device(pcidev);
1537 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1541 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1547 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1554 idr_preload(GFP_KERNEL);
1555 spin_lock(&rtsx_pci_lock);
1556 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1559 spin_unlock(&rtsx_pci_lock);
1565 dev_set_drvdata(&pcidev->dev, handle);
1567 if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
1569 len = pci_resource_len(pcidev, bar);
1570 base = pci_resource_start(pcidev, bar);
1571 pcr->remap_addr = ioremap(base, len);
1572 if (!pcr->remap_addr) {
1577 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1578 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1580 if (pcr->rtsx_resv_buf == NULL) {
1584 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1585 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1586 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1587 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1588 pcr->card_inserted = 0;
1589 pcr->card_removed = 0;
1590 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1592 pcr->msi_en = msi_en;
1594 ret = pci_enable_msi(pcidev);
1596 pcr->msi_en = false;
1599 ret = rtsx_pci_acquire_irq(pcr);
1603 pci_set_master(pcidev);
1604 synchronize_irq(pcr->irq);
1606 ret = rtsx_pci_init_chip(pcr);
1610 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1611 rtsx_pcr_cells[i].platform_data = handle;
1612 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1616 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1617 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1621 pm_runtime_allow(&pcidev->dev);
1622 pm_runtime_put(&pcidev->dev);
1629 free_irq(pcr->irq, (void *)pcr);
1632 pci_disable_msi(pcr->pci);
1633 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1634 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1636 iounmap(pcr->remap_addr);
1638 spin_lock(&rtsx_pci_lock);
1639 idr_remove(&rtsx_pci_idr, pcr->id);
1640 spin_unlock(&rtsx_pci_lock);
1646 pci_release_regions(pcidev);
1648 pci_disable_device(pcidev);
1653 static void rtsx_pci_remove(struct pci_dev *pcidev)
1655 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1656 struct rtsx_pcr *pcr = handle->pcr;
1658 pcr->remove_pci = true;
1660 pm_runtime_get_sync(&pcidev->dev);
1661 pm_runtime_forbid(&pcidev->dev);
1663 /* Disable interrupts at the pcr level */
1664 spin_lock_irq(&pcr->lock);
1665 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1667 spin_unlock_irq(&pcr->lock);
1669 cancel_delayed_work_sync(&pcr->carddet_work);
1671 mfd_remove_devices(&pcidev->dev);
1673 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1674 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1675 free_irq(pcr->irq, (void *)pcr);
1677 pci_disable_msi(pcr->pci);
1678 iounmap(pcr->remap_addr);
1680 pci_release_regions(pcidev);
1681 pci_disable_device(pcidev);
1683 spin_lock(&rtsx_pci_lock);
1684 idr_remove(&rtsx_pci_idr, pcr->id);
1685 spin_unlock(&rtsx_pci_lock);
1691 dev_dbg(&(pcidev->dev),
1692 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1693 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1696 static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1698 struct pci_dev *pcidev = to_pci_dev(dev_d);
1699 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1700 struct rtsx_pcr *pcr = handle->pcr;
1702 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1704 cancel_delayed_work_sync(&pcr->carddet_work);
1706 mutex_lock(&pcr->pcr_mutex);
1708 rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
1710 mutex_unlock(&pcr->pcr_mutex);
1714 static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1716 struct pci_dev *pcidev = to_pci_dev(dev_d);
1717 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1718 struct rtsx_pcr *pcr = handle->pcr;
1721 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1723 mutex_lock(&pcr->pcr_mutex);
1725 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1729 ret = rtsx_pci_init_hw(pcr);
1734 mutex_unlock(&pcr->pcr_mutex);
1740 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1742 if (pcr->ops->set_aspm)
1743 pcr->ops->set_aspm(pcr, true);
1745 rtsx_comm_set_aspm(pcr, true);
1748 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1750 struct rtsx_cr_option *option = &pcr->option;
1752 if (option->ltr_enabled) {
1753 u32 latency = option->ltr_l1off_latency;
1755 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1756 mdelay(option->l1_snooze_delay);
1758 rtsx_set_ltr_latency(pcr, latency);
1761 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1762 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1764 rtsx_enable_aspm(pcr);
1767 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1769 rtsx_comm_pm_power_saving(pcr);
1772 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1774 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1775 struct rtsx_pcr *pcr = handle->pcr;
1777 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1779 rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
1781 pci_disable_device(pcidev);
1782 free_irq(pcr->irq, (void *)pcr);
1784 pci_disable_msi(pcr->pci);
1787 static int rtsx_pci_runtime_idle(struct device *device)
1789 struct pci_dev *pcidev = to_pci_dev(device);
1790 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1791 struct rtsx_pcr *pcr = handle->pcr;
1793 dev_dbg(device, "--> %s\n", __func__);
1795 mutex_lock(&pcr->pcr_mutex);
1797 pcr->state = PDEV_STAT_IDLE;
1799 if (pcr->ops->disable_auto_blink)
1800 pcr->ops->disable_auto_blink(pcr);
1801 if (pcr->ops->turn_off_led)
1802 pcr->ops->turn_off_led(pcr);
1804 rtsx_pm_power_saving(pcr);
1806 mutex_unlock(&pcr->pcr_mutex);
1809 pm_schedule_suspend(device, 10000);
1814 static int rtsx_pci_runtime_suspend(struct device *device)
1816 struct pci_dev *pcidev = to_pci_dev(device);
1817 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1818 struct rtsx_pcr *pcr = handle->pcr;
1820 dev_dbg(device, "--> %s\n", __func__);
1822 cancel_delayed_work_sync(&pcr->carddet_work);
1824 mutex_lock(&pcr->pcr_mutex);
1825 rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
1827 mutex_unlock(&pcr->pcr_mutex);
1832 static int rtsx_pci_runtime_resume(struct device *device)
1834 struct pci_dev *pcidev = to_pci_dev(device);
1835 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1836 struct rtsx_pcr *pcr = handle->pcr;
1838 dev_dbg(device, "--> %s\n", __func__);
1840 mutex_lock(&pcr->pcr_mutex);
1842 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1844 rtsx_pci_init_hw(pcr);
1846 if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1847 pcr->slots[RTSX_SD_CARD].card_event(
1848 pcr->slots[RTSX_SD_CARD].p_dev);
1851 mutex_unlock(&pcr->pcr_mutex);
1855 #else /* CONFIG_PM */
1857 #define rtsx_pci_shutdown NULL
1858 #define rtsx_pci_runtime_suspend NULL
1859 #define rtsx_pic_runtime_resume NULL
1861 #endif /* CONFIG_PM */
1863 static const struct dev_pm_ops rtsx_pci_pm_ops = {
1864 SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1865 SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
1868 static struct pci_driver rtsx_pci_driver = {
1869 .name = DRV_NAME_RTSX_PCI,
1870 .id_table = rtsx_pci_ids,
1871 .probe = rtsx_pci_probe,
1872 .remove = rtsx_pci_remove,
1873 .driver.pm = &rtsx_pci_pm_ops,
1874 .shutdown = rtsx_pci_shutdown,
1876 module_pci_driver(rtsx_pci_driver);
1878 MODULE_LICENSE("GPL");
1879 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1880 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");