1 // SPDX-License-Identifier: GPL-2.0-only
3 * HiSilicon Hixxxx UFS Driver
5 * Copyright (c) 2016-2017 Linaro Ltd.
6 * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
9 #include <linux/time.h>
11 #include <linux/of_address.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/platform_device.h>
14 #include <linux/reset.h>
17 #include "ufshcd-pltfrm.h"
21 #include "ufs_quirks.h"
23 static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
28 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
31 err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
33 err |= ufshcd_dme_get(hba,
34 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
35 if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
36 tx_fsm_val_1 == TX_FSM_HIBERN8))
39 /* sleep for max. 200us */
40 usleep_range(100, 200);
41 } while (time_before(jiffies, timeout));
44 * we might have scheduled out for long during polling so
45 * check the state again.
47 if (time_after(jiffies, timeout)) {
48 err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
50 err |= ufshcd_dme_get(hba,
51 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
55 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
57 } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
58 tx_fsm_val_1 != TX_FSM_HIBERN8) {
60 dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
61 __func__, tx_fsm_val_0, tx_fsm_val_1);
67 static void ufs_hisi_clk_init(struct ufs_hba *hba)
69 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
71 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
72 if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
75 ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
76 ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
77 /* open mphy ref clk */
78 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
81 static void ufs_hisi_soc_init(struct ufs_hba *hba)
83 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
86 if (!IS_ERR(host->rst))
87 reset_control_assert(host->rst);
90 ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
92 /* notify PWR ready */
93 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
94 ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
95 UFS_DEVICE_RESET_CTRL);
97 reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
98 reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
99 /* set cfg clk freq */
100 ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
101 /* set ref clk freq */
102 ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
103 /* bypass ufs clk gate */
104 ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
106 ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
109 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
110 /* disable ufshc iso */
111 ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
112 /* disable phy iso */
113 ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
114 /* notice iso disable */
115 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
117 /* disable lp_reset_n */
118 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
121 ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
122 UFS_DEVICE_RESET_CTRL);
127 * enable the fix of linereset recovery,
128 * and enable rx_reset/tx_rest beat
129 * enable ref_clk_en override(bit5) &
130 * override value = 1(bit4), with mask
132 ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
134 if (!IS_ERR(host->rst))
135 reset_control_deassert(host->rst);
138 static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
140 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
145 /* Unipro VS_mphy_disable */
146 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
148 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
150 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
151 /* MPHY CBOVRCTRL2 */
152 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
153 /* MPHY CBOVRCTRL3 */
154 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
156 if (host->caps & UFS_HISI_CAP_PHY10nm) {
157 /* MPHY CBOVRCTRL4 */
158 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
159 /* MPHY CBOVRCTRL5 */
160 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
163 /* Unipro VS_MphyCfgUpdt */
164 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
165 /* MPHY RXOVRCTRL4 rx0 */
166 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
167 /* MPHY RXOVRCTRL4 rx1 */
168 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
169 /* MPHY RXOVRCTRL5 rx0 */
170 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
171 /* MPHY RXOVRCTRL5 rx1 */
172 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
173 /* MPHY RXSQCONTROL rx0 */
174 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
175 /* MPHY RXSQCONTROL rx1 */
176 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
177 /* Unipro VS_MphyCfgUpdt */
178 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
180 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
181 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
183 if (host->caps & UFS_HISI_CAP_PHY10nm) {
184 /* RX_Hibern8Time_Capability*/
185 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
186 /* RX_Hibern8Time_Capability*/
187 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
188 /* RX_Min_ActivateTime */
189 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
190 /* RX_Min_ActivateTime*/
191 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
194 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
196 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
199 /* Gear3 Synclength */
200 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
201 /* Gear3 Synclength */
202 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
203 /* Gear2 Synclength */
204 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
205 /* Gear2 Synclength */
206 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
207 /* Gear1 Synclength */
208 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
209 /* Gear1 Synclength */
210 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
212 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
214 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
216 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
217 /* Unipro VS_mphy_disable */
218 ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
221 "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
223 /* Unipro VS_mphy_disable */
224 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
225 err = ufs_hisi_check_hibern8(hba);
227 dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
229 if (!(host->caps & UFS_HISI_CAP_PHY10nm))
230 ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
232 /* disable auto H8 */
233 reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
234 reg = reg & (~UFS_AHIT_AH8ITV_MASK);
235 ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
237 /* Unipro PA_Local_TX_LCC_Enable */
238 ufshcd_disable_host_tx_lcc(hba);
239 /* close Unipro VS_Mk2ExtnSupport */
240 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
241 ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
243 /* Ensure close success */
244 dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
250 static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
252 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
254 /* Unipro DL_AFC0CreditThreshold */
255 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
256 /* Unipro DL_TC0OutAckThreshold */
257 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
258 /* Unipro DL_TC0TXFCThreshold */
259 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
261 /* not bypass ufs clk gate */
262 ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
264 ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
267 /* select received symbol cnt */
268 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
269 /* reset counter0 and enable */
270 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
275 static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
276 enum ufs_notify_change_status status)
282 err = ufs_hisi_link_startup_pre_change(hba);
285 err = ufs_hisi_link_startup_post_change(hba);
294 static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
296 ufshcd_init_pwr_dev_param(hisi_param);
299 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
301 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
303 if (host->caps & UFS_HISI_CAP_PHY10nm) {
305 * Boston platform need to set SaveConfigTime to 0x13,
306 * and change sync length to maximum value
308 /* VS_DebugSaveConfigTime */
309 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
311 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
313 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
315 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
317 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
319 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
320 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
323 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
324 pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
325 /* VS_DebugSaveConfigTime */
326 ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
328 ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
332 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
334 ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
335 /*PA_PWRModeUserData0 = 8191, default is 0*/
336 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
337 /*PA_PWRModeUserData1 = 65535, default is 0*/
338 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
339 /*PA_PWRModeUserData2 = 32767, default is 0*/
340 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
341 /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
342 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
343 /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
344 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
345 /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
346 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
347 /*PA_PWRModeUserData3 = 8191, default is 0*/
348 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
349 /*PA_PWRModeUserData4 = 65535, default is 0*/
350 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
351 /*PA_PWRModeUserData5 = 32767, default is 0*/
352 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
353 /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
354 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
355 /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
356 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
357 /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
358 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
361 static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
362 enum ufs_notify_change_status status,
363 struct ufs_pa_layer_attr *dev_max_params,
364 struct ufs_pa_layer_attr *dev_req_params)
366 struct ufs_dev_params ufs_hisi_cap;
369 if (!dev_req_params) {
371 "%s: incoming dev_req_params is NULL\n", __func__);
378 ufs_hisi_set_dev_cap(&ufs_hisi_cap);
379 ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
380 dev_max_params, dev_req_params);
383 "%s: failed to determine capabilities\n", __func__);
387 ufs_hisi_pwr_change_pre_change(hba);
399 static int ufs_hisi_suspend_prepare(struct device *dev)
401 /* RPM and SPM are different. Refer ufs_hisi_suspend() */
402 return __ufshcd_suspend_prepare(dev, false);
405 static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
406 enum ufs_notify_change_status status)
408 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
410 if (status == PRE_CHANGE)
413 if (pm_op == UFS_RUNTIME_PM)
416 if (host->in_suspend) {
421 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
423 /* set ref_dig_clk override of PHY PCS to 0 */
424 ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
426 host->in_suspend = true;
431 static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
433 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
435 if (!host->in_suspend)
438 /* set ref_dig_clk override of PHY PCS to 1 */
439 ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
441 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
443 host->in_suspend = false;
447 static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
449 struct device *dev = host->hba->dev;
450 struct platform_device *pdev = to_platform_device(dev);
452 /* get resource of ufs sys ctrl */
453 host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
454 return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
457 static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
459 hba->rpm_lvl = UFS_PM_LVL_1;
460 hba->spm_lvl = UFS_PM_LVL_3;
464 * ufs_hisi_init_common
465 * @hba: host controller instance
467 static int ufs_hisi_init_common(struct ufs_hba *hba)
470 struct device *dev = hba->dev;
471 struct ufs_hisi_host *host;
473 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
478 ufshcd_set_variant(hba, host);
480 host->rst = devm_reset_control_get(dev, "rst");
481 if (IS_ERR(host->rst)) {
482 dev_err(dev, "%s: failed to get reset control\n", __func__);
483 err = PTR_ERR(host->rst);
487 ufs_hisi_set_pm_lvl(hba);
489 err = ufs_hisi_get_resource(host);
496 ufshcd_set_variant(hba, NULL);
500 static int ufs_hi3660_init(struct ufs_hba *hba)
503 struct device *dev = hba->dev;
505 ret = ufs_hisi_init_common(hba);
507 dev_err(dev, "%s: ufs common init fail\n", __func__);
511 ufs_hisi_clk_init(hba);
513 ufs_hisi_soc_init(hba);
518 static int ufs_hi3670_init(struct ufs_hba *hba)
521 struct device *dev = hba->dev;
522 struct ufs_hisi_host *host;
524 ret = ufs_hisi_init_common(hba);
526 dev_err(dev, "%s: ufs common init fail\n", __func__);
530 ufs_hisi_clk_init(hba);
532 ufs_hisi_soc_init(hba);
534 /* Add cap for 10nm PHY variant on HI3670 SoC */
535 host = ufshcd_get_variant(hba);
536 host->caps |= UFS_HISI_CAP_PHY10nm;
541 static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
543 .init = ufs_hi3660_init,
544 .link_startup_notify = ufs_hisi_link_startup_notify,
545 .pwr_change_notify = ufs_hisi_pwr_change_notify,
546 .suspend = ufs_hisi_suspend,
547 .resume = ufs_hisi_resume,
550 static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
552 .init = ufs_hi3670_init,
553 .link_startup_notify = ufs_hisi_link_startup_notify,
554 .pwr_change_notify = ufs_hisi_pwr_change_notify,
555 .suspend = ufs_hisi_suspend,
556 .resume = ufs_hisi_resume,
559 static const struct of_device_id ufs_hisi_of_match[] = {
560 { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
561 { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
565 MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
567 static int ufs_hisi_probe(struct platform_device *pdev)
569 const struct of_device_id *of_id;
571 of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
573 return ufshcd_pltfrm_init(pdev, of_id->data);
576 static int ufs_hisi_remove(struct platform_device *pdev)
578 struct ufs_hba *hba = platform_get_drvdata(pdev);
584 static const struct dev_pm_ops ufs_hisi_pm_ops = {
585 SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
586 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
587 .prepare = ufs_hisi_suspend_prepare,
588 .complete = ufshcd_resume_complete,
591 static struct platform_driver ufs_hisi_pltform = {
592 .probe = ufs_hisi_probe,
593 .remove = ufs_hisi_remove,
594 .shutdown = ufshcd_pltfrm_shutdown,
596 .name = "ufshcd-hisi",
597 .pm = &ufs_hisi_pm_ops,
598 .of_match_table = of_match_ptr(ufs_hisi_of_match),
601 module_platform_driver(ufs_hisi_pltform);
603 MODULE_LICENSE("GPL");
604 MODULE_ALIAS("platform:ufshcd-hisi");
605 MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");