1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
6 #include <linux/acpi.h>
7 #include <linux/time.h>
9 #include <linux/delay.h>
10 #include <linux/interconnect.h>
11 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/phy/phy.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/reset-controller.h>
17 #include <linux/devfreq.h>
19 #include <soc/qcom/ice.h>
21 #include <ufs/ufshcd.h>
22 #include "ufshcd-pltfrm.h"
23 #include <ufs/unipro.h>
25 #include <ufs/ufshci.h>
26 #include <ufs/ufs_quirks.h>
28 #define MCQ_QCFGPTR_MASK GENMASK(7, 0)
29 #define MCQ_QCFGPTR_UNIT 0x200
30 #define MCQ_SQATTR_OFFSET(c) \
31 ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
32 #define MCQ_QCFG_SIZE 0x40
50 #define QCOM_UFS_MAX_GEAR 4
51 #define QCOM_UFS_MAX_LANE 2
61 static const struct __ufs_qcom_bw_table {
64 } ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = {
65 [MODE_MIN][0][0] = { 0, 0 }, /* Bandwidth values in KB/s */
66 [MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922, 1000 },
67 [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
68 [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
69 [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
70 [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
71 [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
72 [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
73 [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
74 [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
75 [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
76 [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
77 [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
78 [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
79 [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
80 [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
81 [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
82 [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
83 [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
84 [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
85 [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
86 [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
87 [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
88 [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
89 [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
90 [MODE_MAX][0][0] = { 7643136, 307200 },
93 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
95 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
96 static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
98 static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
100 return container_of(rcd, struct ufs_qcom_host, rcdev);
103 #ifdef CONFIG_SCSI_UFS_CRYPTO
105 static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
107 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
108 qcom_ice_enable(host->ice);
111 static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
113 struct ufs_hba *hba = host->hba;
114 struct device *dev = hba->dev;
115 struct qcom_ice *ice;
117 ice = of_qcom_ice_get(dev);
118 if (ice == ERR_PTR(-EOPNOTSUPP)) {
119 dev_warn(dev, "Disabling inline encryption support\n");
123 if (IS_ERR_OR_NULL(ice))
124 return PTR_ERR_OR_ZERO(ice);
127 hba->caps |= UFSHCD_CAP_CRYPTO;
132 static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
134 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
135 return qcom_ice_resume(host->ice);
140 static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
142 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
143 return qcom_ice_suspend(host->ice);
148 static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
149 const union ufs_crypto_cfg_entry *cfg,
152 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
153 union ufs_crypto_cap_entry cap;
155 cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE;
157 /* Only AES-256-XTS has been tested so far. */
158 cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
159 if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
160 cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
164 return qcom_ice_program_key(host->ice,
165 QCOM_ICE_CRYPTO_ALG_AES_XTS,
166 QCOM_ICE_CRYPTO_KEY_SIZE_256,
168 cfg->data_unit_size, slot);
170 return qcom_ice_evict_key(host->ice, slot);
175 #define ufs_qcom_ice_program_key NULL
177 static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
181 static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
186 static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
191 static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
197 static int ufs_qcom_host_clk_get(struct device *dev,
198 const char *name, struct clk **clk_out, bool optional)
203 clk = devm_clk_get(dev, name);
211 if (optional && err == -ENOENT) {
216 if (err != -EPROBE_DEFER)
217 dev_err(dev, "failed to get %s err %d\n", name, err);
222 static int ufs_qcom_host_clk_enable(struct device *dev,
223 const char *name, struct clk *clk)
227 err = clk_prepare_enable(clk);
229 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
234 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
236 if (!host->is_lane_clks_enabled)
239 clk_disable_unprepare(host->tx_l1_sync_clk);
240 clk_disable_unprepare(host->tx_l0_sync_clk);
241 clk_disable_unprepare(host->rx_l1_sync_clk);
242 clk_disable_unprepare(host->rx_l0_sync_clk);
244 host->is_lane_clks_enabled = false;
247 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
250 struct device *dev = host->hba->dev;
252 if (host->is_lane_clks_enabled)
255 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
256 host->rx_l0_sync_clk);
260 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
261 host->tx_l0_sync_clk);
265 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
266 host->rx_l1_sync_clk);
270 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
271 host->tx_l1_sync_clk);
275 host->is_lane_clks_enabled = true;
280 clk_disable_unprepare(host->rx_l1_sync_clk);
282 clk_disable_unprepare(host->tx_l0_sync_clk);
284 clk_disable_unprepare(host->rx_l0_sync_clk);
289 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
292 struct device *dev = host->hba->dev;
294 if (has_acpi_companion(dev))
297 err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
298 &host->rx_l0_sync_clk, false);
302 err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
303 &host->tx_l0_sync_clk, false);
307 /* In case of single lane per direction, don't read lane1 clocks */
308 if (host->hba->lanes_per_direction > 1) {
309 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
310 &host->rx_l1_sync_clk, false);
314 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
315 &host->tx_l1_sync_clk, true);
321 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
325 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
328 err = ufshcd_dme_get(hba,
329 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
330 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
332 if (err || tx_fsm_val == TX_FSM_HIBERN8)
335 /* sleep for max. 200us */
336 usleep_range(100, 200);
337 } while (time_before(jiffies, timeout));
340 * we might have scheduled out for long during polling so
341 * check the state again.
343 if (time_after(jiffies, timeout))
344 err = ufshcd_dme_get(hba,
345 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
346 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
350 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
352 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
354 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
361 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
363 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
364 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
367 if (host->hw_ver.major >= 0x05)
368 ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
370 /* make sure above configuration is applied before we return */
375 * ufs_qcom_host_reset - reset host controller and PHY
377 static int ufs_qcom_host_reset(struct ufs_hba *hba)
380 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
381 bool reenable_intr = false;
383 if (!host->core_reset) {
384 dev_warn(hba->dev, "%s: reset control not set\n", __func__);
388 reenable_intr = hba->is_irq_enabled;
389 disable_irq(hba->irq);
390 hba->is_irq_enabled = false;
392 ret = reset_control_assert(host->core_reset);
394 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
400 * The hardware requirement for delay between assert/deassert
401 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
402 * ~125us (4/32768). To be on the safe side add 200us delay.
404 usleep_range(200, 210);
406 ret = reset_control_deassert(host->core_reset);
408 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
411 usleep_range(1000, 1100);
414 enable_irq(hba->irq);
415 hba->is_irq_enabled = true;
421 static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
423 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
425 if (host->hw_ver.major == 0x1) {
427 * HS-G3 operations may not reliably work on legacy QCOM
428 * UFS host controller hardware even though capability
429 * exchange during link startup phase may end up
430 * negotiating maximum supported gear as G3.
431 * Hence downgrade the maximum supported gear to HS-G2.
434 } else if (host->hw_ver.major >= 0x4) {
435 return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
438 /* Default is HS-G3 */
442 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
444 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
445 struct phy *phy = host->generic_phy;
448 /* Reset UFS Host Controller and PHY */
449 ret = ufs_qcom_host_reset(hba);
451 dev_warn(hba->dev, "%s: host reset returned %d\n",
454 /* phy initialization - calibrate the phy */
457 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
462 phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->phy_gear);
464 /* power on phy - start serdes and phy's power and clocks */
465 ret = phy_power_on(phy);
467 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
469 goto out_disable_phy;
472 ufs_qcom_select_unipro_mode(host);
483 * The UTP controller has a number of internal clock gating cells (CGCs).
484 * Internal hardware sub-modules within the UTP controller control the CGCs.
485 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
486 * in a specific operation, UTP controller CGCs are by default disabled and
487 * this function enables them (after every UFS link startup) to save some power
490 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
493 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
496 /* Ensure that HW clock gating is enabled before next operations */
500 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
501 enum ufs_notify_change_status status)
503 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
508 ufs_qcom_power_up_sequence(hba);
510 * The PHY PLL output is the source of tx/rx lane symbol
511 * clocks, hence, enable the lane clocks only after PHY
514 err = ufs_qcom_enable_lane_clks(host);
517 /* check if UFS PHY moved from DISABLED to HIBERN8 */
518 err = ufs_qcom_check_hibern8(hba);
519 ufs_qcom_enable_hw_clk_gating(hba);
520 ufs_qcom_ice_enable(host);
523 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
531 * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers
533 * @hba: host controller instance
534 * @gear: Current operating gear
535 * @hs: current power mode
536 * @rate: current operating rate (A or B)
537 * @update_link_startup_timer: indicate if link_start ongoing
538 * @is_pre_scale_up: flag to check if pre scale up condition.
539 * Return: zero for success and non-zero in case of a failure.
541 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
542 u32 hs, u32 rate, bool update_link_startup_timer,
543 bool is_pre_scale_up)
545 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
546 struct ufs_clk_info *clki;
547 u32 core_clk_period_in_ns;
548 u32 tx_clk_cycles_per_us = 0;
549 unsigned long core_clk_rate = 0;
550 u32 core_clk_cycles_per_us = 0;
552 static u32 pwm_fr_table[][2] = {
559 static u32 hs_fr_table_rA[][2] = {
565 static u32 hs_fr_table_rB[][2] = {
572 * The Qunipro controller does not use following registers:
573 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
574 * UFS_REG_PA_LINK_STARTUP_TIMER.
575 * However UTP controller uses SYS1CLK_1US_REG register for Interrupt
577 * It is mandatory to write SYS1CLK_1US_REG register on UFS host
578 * controller V4.0.0 onwards.
580 if (host->hw_ver.major < 4 && ufs_qcom_cap_qunipro(host) &&
581 !ufshcd_is_intr_aggr_allowed(hba))
585 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
589 list_for_each_entry(clki, &hba->clk_list_head, list) {
590 if (!strcmp(clki->name, "core_clk")) {
592 core_clk_rate = clki->max_freq;
594 core_clk_rate = clk_get_rate(clki->clk);
600 /* If frequency is smaller than 1MHz, set to 1MHz */
601 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
602 core_clk_rate = DEFAULT_CLK_RATE_HZ;
604 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
605 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
606 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
608 * make sure above write gets applied before we return from
614 if (ufs_qcom_cap_qunipro(host))
617 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
618 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
619 core_clk_period_in_ns &= MASK_CLK_NS_REG;
624 if (rate == PA_HS_MODE_A) {
625 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
627 "%s: index %d exceeds table size %zu\n",
629 ARRAY_SIZE(hs_fr_table_rA));
632 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
633 } else if (rate == PA_HS_MODE_B) {
634 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
636 "%s: index %d exceeds table size %zu\n",
638 ARRAY_SIZE(hs_fr_table_rB));
641 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
643 dev_err(hba->dev, "%s: invalid rate = %d\n",
650 if (gear > ARRAY_SIZE(pwm_fr_table)) {
652 "%s: index %d exceeds table size %zu\n",
654 ARRAY_SIZE(pwm_fr_table));
657 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
661 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
665 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
666 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
667 /* this register 2 fields shall be written at once */
668 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
669 REG_UFS_TX_SYMBOL_CLK_NS_US);
671 * make sure above write gets applied before we return from
677 if (update_link_startup_timer && host->hw_ver.major != 0x5) {
678 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
681 * make sure that this configuration is applied before
690 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
691 enum ufs_notify_change_status status)
694 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
698 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
700 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
705 if (ufs_qcom_cap_qunipro(host)) {
706 err = ufs_qcom_set_core_clk_ctrl(hba, true);
708 dev_err(hba->dev, "cfg core clk ctrl failed\n");
711 * Some UFS devices (and may be host) have issues if LCC is
712 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
713 * before link startup which will make sure that both host
714 * and device TX LCC are disabled once link startup is
717 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
718 err = ufshcd_disable_host_tx_lcc(hba);
728 static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
730 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
732 /* reset gpio is optional */
733 if (!host->device_reset)
736 gpiod_set_value_cansleep(host->device_reset, asserted);
739 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
740 enum ufs_notify_change_status status)
742 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
743 struct phy *phy = host->generic_phy;
745 if (status == PRE_CHANGE)
748 if (ufs_qcom_is_link_off(hba)) {
750 * Disable the tx/rx lane symbol clocks before PHY is
751 * powered down as the PLL source should be disabled
752 * after downstream clocks are disabled.
754 ufs_qcom_disable_lane_clks(host);
757 /* reset the connected UFS device during power down */
758 ufs_qcom_device_reset_ctrl(hba, true);
760 } else if (!ufs_qcom_is_link_active(hba)) {
761 ufs_qcom_disable_lane_clks(host);
764 return ufs_qcom_ice_suspend(host);
767 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
769 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
770 struct phy *phy = host->generic_phy;
773 if (ufs_qcom_is_link_off(hba)) {
774 err = phy_power_on(phy);
776 dev_err(hba->dev, "%s: failed PHY power on: %d\n",
781 err = ufs_qcom_enable_lane_clks(host);
785 } else if (!ufs_qcom_is_link_active(hba)) {
786 err = ufs_qcom_enable_lane_clks(host);
791 return ufs_qcom_ice_resume(host);
794 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
796 if (host->dev_ref_clk_ctrl_mmio &&
797 (enable ^ host->is_dev_ref_clk_enabled)) {
798 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
801 temp |= host->dev_ref_clk_en_mask;
803 temp &= ~host->dev_ref_clk_en_mask;
806 * If we are here to disable this clock it might be immediately
807 * after entering into hibern8 in which case we need to make
808 * sure that device ref_clk is active for specific time after
812 unsigned long gating_wait;
814 gating_wait = host->hba->dev_info.clk_gating_wait_us;
819 * bRefClkGatingWaitTime defines the minimum
820 * time for which the reference clock is
821 * required by device during transition from
822 * HS-MODE to LS-MODE or HIBERN8 state. Give it
823 * more delay to be on the safe side.
826 usleep_range(gating_wait, gating_wait + 10);
830 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
833 * Make sure the write to ref_clk reaches the destination and
834 * not stored in a Write Buffer (WB).
836 readl(host->dev_ref_clk_ctrl_mmio);
839 * If we call hibern8 exit after this, we need to make sure that
840 * device ref_clk is stable for at least 1us before the hibern8
846 host->is_dev_ref_clk_enabled = enable;
850 static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw)
852 struct device *dev = host->hba->dev;
855 ret = icc_set_bw(host->icc_ddr, 0, mem_bw);
857 dev_err(dev, "failed to set bandwidth request: %d\n", ret);
861 ret = icc_set_bw(host->icc_cpu, 0, cfg_bw);
863 dev_err(dev, "failed to set bandwidth request: %d\n", ret);
870 static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host)
872 struct ufs_pa_layer_attr *p = &host->dev_req_params;
873 int gear = max_t(u32, p->gear_rx, p->gear_tx);
874 int lane = max_t(u32, p->lane_rx, p->lane_tx);
876 if (ufshcd_is_hs_mode(p)) {
877 if (p->hs_rate == PA_HS_MODE_B)
878 return ufs_qcom_bw_table[MODE_HS_RB][gear][lane];
880 return ufs_qcom_bw_table[MODE_HS_RA][gear][lane];
882 return ufs_qcom_bw_table[MODE_PWM][gear][lane];
886 static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
888 struct __ufs_qcom_bw_table bw_table;
890 bw_table = ufs_qcom_get_bw_table(host);
892 return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
895 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
896 enum ufs_notify_change_status status,
897 struct ufs_pa_layer_attr *dev_max_params,
898 struct ufs_pa_layer_attr *dev_req_params)
900 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
901 struct ufs_dev_params ufs_qcom_cap;
904 if (!dev_req_params) {
905 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
911 ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
912 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
914 /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
915 ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
917 ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
921 dev_err(hba->dev, "%s: failed to determine capabilities\n",
927 * Update phy_gear only when the gears are scaled to a higher value. This is
928 * because, the PHY gear settings are backwards compatible and we only need to
929 * change the PHY gear settings while scaling to higher gears.
931 if (dev_req_params->gear_tx > host->phy_gear)
932 host->phy_gear = dev_req_params->gear_tx;
934 /* enable the device ref clock before changing to HS mode */
935 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
936 ufshcd_is_hs_mode(dev_req_params))
937 ufs_qcom_dev_ref_clk_ctrl(host, true);
939 if (host->hw_ver.major >= 0x4) {
940 ufshcd_dme_configure_adapt(hba,
941 dev_req_params->gear_tx,
946 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
947 dev_req_params->pwr_rx,
948 dev_req_params->hs_rate, false, false)) {
949 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
952 * we return error code at the end of the routine,
953 * but continue to configure UFS_PHY_TX_LANE_ENABLE
954 * and bus voting as usual
959 /* cache the power mode parameters to use internally */
960 memcpy(&host->dev_req_params,
961 dev_req_params, sizeof(*dev_req_params));
963 ufs_qcom_icc_update_bw(host);
965 /* disable the device ref clock if entered PWM mode */
966 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
967 !ufshcd_is_hs_mode(dev_req_params))
968 ufs_qcom_dev_ref_clk_ctrl(host, false);
978 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
981 u32 pa_vs_config_reg1;
983 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
988 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
989 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
990 (pa_vs_config_reg1 | (1 << 12)));
993 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
997 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
998 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1000 if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
1001 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
1006 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1008 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1010 if (host->hw_ver.major == 0x1)
1011 return ufshci_version(1, 1);
1013 return ufshci_version(2, 0);
1017 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1018 * @hba: host controller instance
1020 * QCOM UFS host controller might have some non standard behaviours (quirks)
1021 * than what is specified by UFSHCI specification. Advertise all such
1022 * quirks to standard UFS host controller driver so standard takes them into
1025 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1027 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1029 if (host->hw_ver.major == 0x01) {
1030 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1031 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1032 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
1034 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
1035 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1037 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1040 if (host->hw_ver.major == 0x2) {
1041 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1043 if (!ufs_qcom_cap_qunipro(host))
1044 /* Legacy UniPro mode still need following quirks */
1045 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1046 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1047 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1050 if (host->hw_ver.major > 0x3)
1051 hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
1054 static void ufs_qcom_set_caps(struct ufs_hba *hba)
1056 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1058 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1059 hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
1060 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1061 hba->caps |= UFSHCD_CAP_WB_EN;
1062 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
1063 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
1065 if (host->hw_ver.major >= 0x2) {
1066 host->caps = UFS_QCOM_CAP_QUNIPRO |
1067 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1072 * ufs_qcom_setup_clocks - enables/disable clocks
1073 * @hba: host controller instance
1074 * @on: If true, enable clocks else disable them.
1075 * @status: PRE_CHANGE or POST_CHANGE notify
1077 * Return: 0 on success, non-zero on failure.
1079 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1080 enum ufs_notify_change_status status)
1082 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1085 * In case ufs_qcom_init() is not yet done, simply ignore.
1086 * This ufs_qcom_setup_clocks() shall be called from
1087 * ufs_qcom_init() after init is done.
1095 ufs_qcom_icc_update_bw(host);
1097 if (!ufs_qcom_is_link_active(hba)) {
1098 /* disable device ref_clk */
1099 ufs_qcom_dev_ref_clk_ctrl(host, false);
1105 /* enable the device ref clock for HS mode*/
1106 if (ufshcd_is_hs_mode(&hba->pwr_info))
1107 ufs_qcom_dev_ref_clk_ctrl(host, true);
1109 ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
1110 ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
1119 ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
1121 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1123 ufs_qcom_assert_reset(host->hba);
1124 /* provide 1ms delay to let the reset pulse propagate. */
1125 usleep_range(1000, 1100);
1130 ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
1132 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1134 ufs_qcom_deassert_reset(host->hba);
1137 * after reset deassertion, phy will need all ref clocks,
1138 * voltage, current to settle down before starting serdes.
1140 usleep_range(1000, 1100);
1144 static const struct reset_control_ops ufs_qcom_reset_ops = {
1145 .assert = ufs_qcom_reset_assert,
1146 .deassert = ufs_qcom_reset_deassert,
1149 static int ufs_qcom_icc_init(struct ufs_qcom_host *host)
1151 struct device *dev = host->hba->dev;
1154 host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr");
1155 if (IS_ERR(host->icc_ddr))
1156 return dev_err_probe(dev, PTR_ERR(host->icc_ddr),
1157 "failed to acquire interconnect path\n");
1159 host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs");
1160 if (IS_ERR(host->icc_cpu))
1161 return dev_err_probe(dev, PTR_ERR(host->icc_cpu),
1162 "failed to acquire interconnect path\n");
1165 * Set Maximum bandwidth vote before initializing the UFS controller and
1166 * device. Ideally, a minimal interconnect vote would suffice for the
1167 * initialization, but a max vote would allow faster initialization.
1169 ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw,
1170 ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw);
1172 return dev_err_probe(dev, ret, "failed to set bandwidth request\n");
1178 * ufs_qcom_init - bind phy with controller
1179 * @hba: host controller instance
1181 * Binds PHY with controller and powers up PHY enabling clocks
1184 * Return: -EPROBE_DEFER if binding fails, returns negative error
1185 * on phy power up failure and returns zero on success.
1187 static int ufs_qcom_init(struct ufs_hba *hba)
1190 struct device *dev = hba->dev;
1191 struct platform_device *pdev = to_platform_device(dev);
1192 struct ufs_qcom_host *host;
1193 struct resource *res;
1194 struct ufs_clk_info *clki;
1196 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1198 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1202 /* Make a two way bind between the qcom host and the hba */
1204 ufshcd_set_variant(hba, host);
1206 /* Setup the optional reset control of HCI */
1207 host->core_reset = devm_reset_control_get_optional(hba->dev, "rst");
1208 if (IS_ERR(host->core_reset)) {
1209 err = dev_err_probe(dev, PTR_ERR(host->core_reset),
1210 "Failed to get reset control\n");
1211 goto out_variant_clear;
1214 /* Fire up the reset controller. Failure here is non-fatal. */
1215 host->rcdev.of_node = dev->of_node;
1216 host->rcdev.ops = &ufs_qcom_reset_ops;
1217 host->rcdev.owner = dev->driver->owner;
1218 host->rcdev.nr_resets = 1;
1219 err = devm_reset_controller_register(dev, &host->rcdev);
1221 dev_warn(dev, "Failed to register reset controller\n");
1223 if (!has_acpi_companion(dev)) {
1224 host->generic_phy = devm_phy_get(dev, "ufsphy");
1225 if (IS_ERR(host->generic_phy)) {
1226 err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n");
1227 goto out_variant_clear;
1231 err = ufs_qcom_icc_init(host);
1233 goto out_variant_clear;
1235 host->device_reset = devm_gpiod_get_optional(dev, "reset",
1237 if (IS_ERR(host->device_reset)) {
1238 err = PTR_ERR(host->device_reset);
1239 if (err != -EPROBE_DEFER)
1240 dev_err(dev, "failed to acquire reset gpio: %d\n", err);
1241 goto out_variant_clear;
1244 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1245 &host->hw_ver.minor, &host->hw_ver.step);
1248 * for newer controllers, device reference clock control bit has
1249 * moved inside UFS controller register address space itself.
1251 if (host->hw_ver.major >= 0x02) {
1252 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1253 host->dev_ref_clk_en_mask = BIT(26);
1255 /* "dev_ref_clk_ctrl_mem" is optional resource */
1256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1257 "dev_ref_clk_ctrl_mem");
1259 host->dev_ref_clk_ctrl_mmio =
1260 devm_ioremap_resource(dev, res);
1261 if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
1262 host->dev_ref_clk_ctrl_mmio = NULL;
1263 host->dev_ref_clk_en_mask = BIT(5);
1267 list_for_each_entry(clki, &hba->clk_list_head, list) {
1268 if (!strcmp(clki->name, "core_clk_unipro"))
1269 clki->keep_link_active = true;
1272 err = ufs_qcom_init_lane_clks(host);
1274 goto out_variant_clear;
1276 ufs_qcom_set_caps(hba);
1277 ufs_qcom_advertise_quirks(hba);
1279 err = ufs_qcom_ice_init(host);
1281 goto out_variant_clear;
1283 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1285 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1286 ufs_qcom_hosts[hba->dev->id] = host;
1288 ufs_qcom_get_default_testbus_cfg(host);
1289 err = ufs_qcom_testbus_config(host);
1291 /* Failure is non-fatal */
1292 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1296 * Power up the PHY using the minimum supported gear (UFS_HS_G2).
1297 * Switching to max gear will be performed during reinit if supported.
1299 host->phy_gear = UFS_HS_G2;
1304 ufshcd_set_variant(hba, NULL);
1309 static void ufs_qcom_exit(struct ufs_hba *hba)
1311 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1313 ufs_qcom_disable_lane_clks(host);
1314 phy_power_off(host->generic_phy);
1315 phy_exit(host->generic_phy);
1319 * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles
1321 * @hba: host controller instance
1322 * @cycles_in_1us: No of cycles in 1us to be configured
1324 * Returns error if dme get/set configuration for 40ns fails
1325 * and returns zero on success.
1327 static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
1330 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1336 * UFS host controller V4.0.0 onwards needs to program
1337 * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed
1338 * frequency of unipro core clk of UFS host controller.
1340 if (host->hw_ver.major < 4)
1344 * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not
1345 * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will
1346 * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware
1347 * specification expect to be 16. Hence use exact hardware spec
1348 * mandated value for cycles_in_40ns instead of calculating using
1351 switch (cycles_in_1us) {
1352 case UNIPRO_CORE_CLK_FREQ_403_MHZ:
1353 cycles_in_40ns = 16;
1355 case UNIPRO_CORE_CLK_FREQ_300_MHZ:
1356 cycles_in_40ns = 12;
1358 case UNIPRO_CORE_CLK_FREQ_201_5_MHZ:
1361 case UNIPRO_CORE_CLK_FREQ_150_MHZ:
1364 case UNIPRO_CORE_CLK_FREQ_100_MHZ:
1367 case UNIPRO_CORE_CLK_FREQ_75_MHZ:
1370 case UNIPRO_CORE_CLK_FREQ_37_5_MHZ:
1374 dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n",
1379 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), ®);
1383 reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK;
1384 reg |= cycles_in_40ns;
1386 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
1389 static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
1391 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1392 struct list_head *head = &hba->clk_list_head;
1393 struct ufs_clk_info *clki;
1395 u32 core_clk_ctrl_reg;
1398 list_for_each_entry(clki, head, list) {
1399 if (!IS_ERR_OR_NULL(clki->clk) &&
1400 !strcmp(clki->name, "core_clk_unipro")) {
1402 cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
1404 cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
1409 err = ufshcd_dme_get(hba,
1410 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1411 &core_clk_ctrl_reg);
1415 /* Bit mask is different for UFS host controller V4.0.0 onwards */
1416 if (host->hw_ver.major >= 4) {
1417 if (!FIELD_FIT(CLK_1US_CYCLES_MASK_V4, cycles_in_1us))
1419 core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4;
1420 core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us);
1422 if (!FIELD_FIT(CLK_1US_CYCLES_MASK, cycles_in_1us))
1424 core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK;
1425 core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us);
1428 /* Clear CORE_CLK_DIV_EN */
1429 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1431 err = ufshcd_dme_set(hba,
1432 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1437 /* Configure unipro core clk 40ns attribute */
1438 return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
1441 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1443 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1444 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
1447 if (!ufs_qcom_cap_qunipro(host))
1450 ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
1451 attr->hs_rate, false, true);
1453 dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
1456 /* set unipro core clock attributes and clear clock divider */
1457 return ufs_qcom_set_core_clk_ctrl(hba, true);
1460 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1465 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1467 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1469 u32 core_clk_ctrl_reg;
1471 if (!ufs_qcom_cap_qunipro(host))
1474 err = ufshcd_dme_get(hba,
1475 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1476 &core_clk_ctrl_reg);
1478 /* make sure CORE_CLK_DIV_EN is cleared */
1480 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1481 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1482 err = ufshcd_dme_set(hba,
1483 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1490 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1492 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1494 if (!ufs_qcom_cap_qunipro(host))
1497 /* set unipro core clock attributes and clear clock divider */
1498 return ufs_qcom_set_core_clk_ctrl(hba, false);
1501 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1502 bool scale_up, enum ufs_notify_change_status status)
1504 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1507 /* check the host controller state before sending hibern8 cmd */
1508 if (!ufshcd_is_hba_active(hba))
1511 if (status == PRE_CHANGE) {
1512 err = ufshcd_uic_hibern8_enter(hba);
1516 err = ufs_qcom_clk_scale_up_pre_change(hba);
1518 err = ufs_qcom_clk_scale_down_pre_change(hba);
1521 ufshcd_uic_hibern8_exit(hba);
1526 err = ufs_qcom_clk_scale_up_post_change(hba);
1528 err = ufs_qcom_clk_scale_down_post_change(hba);
1532 ufshcd_uic_hibern8_exit(hba);
1536 ufs_qcom_icc_update_bw(host);
1537 ufshcd_uic_hibern8_exit(hba);
1543 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1545 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1546 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1547 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1550 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1552 /* provide a legal default configuration */
1553 host->testbus.select_major = TSTBUS_UNIPRO;
1554 host->testbus.select_minor = 37;
1557 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1559 if (host->testbus.select_major >= TSTBUS_MAX) {
1560 dev_err(host->hba->dev,
1561 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1562 __func__, host->testbus.select_major);
1569 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1573 u32 mask = TEST_BUS_SUB_SEL_MASK;
1578 if (!ufs_qcom_testbus_cfg_is_ok(host))
1581 switch (host->testbus.select_major) {
1583 reg = UFS_TEST_BUS_CTRL_0;
1587 reg = UFS_TEST_BUS_CTRL_0;
1591 reg = UFS_TEST_BUS_CTRL_0;
1595 reg = UFS_TEST_BUS_CTRL_0;
1599 reg = UFS_TEST_BUS_CTRL_1;
1603 reg = UFS_TEST_BUS_CTRL_1;
1607 reg = UFS_TEST_BUS_CTRL_1;
1611 reg = UFS_TEST_BUS_CTRL_1;
1614 case TSTBUS_WRAPPER:
1615 reg = UFS_TEST_BUS_CTRL_2;
1618 case TSTBUS_COMBINED:
1619 reg = UFS_TEST_BUS_CTRL_2;
1622 case TSTBUS_UTP_HCI:
1623 reg = UFS_TEST_BUS_CTRL_2;
1627 reg = UFS_UNIPRO_CFG;
1632 * No need for a default case, since
1633 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1638 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1639 (u32)host->testbus.select_major << 19,
1641 ufshcd_rmwl(host->hba, mask,
1642 (u32)host->testbus.select_minor << offset,
1644 ufs_qcom_enable_test_bus(host);
1646 * Make sure the test bus configuration is
1647 * committed before returning.
1654 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1657 struct ufs_qcom_host *host;
1659 host = ufshcd_get_variant(hba);
1661 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1662 "HCI Vendor Specific Registers ");
1664 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1665 ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
1667 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1668 reg |= UTP_DBG_RAMS_EN;
1669 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1671 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1672 ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
1674 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1675 ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
1677 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1678 ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
1680 /* clear bit 17 - UTP_DBG_RAMS_EN */
1681 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1683 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1684 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
1686 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1687 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
1689 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1690 ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
1692 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1693 ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
1695 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1696 ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
1698 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1699 ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
1701 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1702 ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
1706 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1707 * @hba: per-adapter instance
1709 * Toggles the (optional) reset line to reset the attached device.
1711 static int ufs_qcom_device_reset(struct ufs_hba *hba)
1713 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1715 /* reset gpio is optional */
1716 if (!host->device_reset)
1720 * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1721 * be on the safe side.
1723 ufs_qcom_device_reset_ctrl(hba, true);
1724 usleep_range(10, 15);
1726 ufs_qcom_device_reset_ctrl(hba, false);
1727 usleep_range(10, 15);
1732 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
1733 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1734 struct devfreq_dev_profile *p,
1735 struct devfreq_simple_ondemand_data *d)
1738 p->timer = DEVFREQ_TIMER_DELAYED;
1739 d->upthreshold = 70;
1740 d->downdifferential = 5;
1743 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1744 struct devfreq_dev_profile *p,
1745 struct devfreq_simple_ondemand_data *data)
1750 static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
1752 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1754 phy_power_off(host->generic_phy);
1758 static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
1759 {.name = "ufs_mem",},
1761 /* Submission Queue DAO */
1762 {.name = "mcq_sqd",},
1763 /* Submission Queue Interrupt Status */
1764 {.name = "mcq_sqis",},
1765 /* Completion Queue DAO */
1766 {.name = "mcq_cqd",},
1767 /* Completion Queue Interrupt Status */
1768 {.name = "mcq_cqis",},
1769 /* MCQ vendor specific */
1770 {.name = "mcq_vs",},
1773 static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
1775 struct platform_device *pdev = to_platform_device(hba->dev);
1776 struct ufshcd_res_info *res;
1777 struct resource *res_mem, *res_mcq;
1780 memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
1782 for (i = 0; i < RES_MAX; i++) {
1784 res->resource = platform_get_resource_byname(pdev,
1787 if (!res->resource) {
1788 dev_info(hba->dev, "Resource %s not provided\n", res->name);
1792 } else if (i == RES_UFS) {
1793 res_mem = res->resource;
1794 res->base = hba->mmio_base;
1798 res->base = devm_ioremap_resource(hba->dev, res->resource);
1799 if (IS_ERR(res->base)) {
1800 dev_err(hba->dev, "Failed to map res %s, err=%d\n",
1801 res->name, (int)PTR_ERR(res->base));
1802 ret = PTR_ERR(res->base);
1808 /* MCQ resource provided in DT */
1809 res = &hba->res[RES_MCQ];
1810 /* Bail if MCQ resource is provided */
1814 /* Explicitly allocate MCQ resource from ufs_mem */
1815 res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
1819 res_mcq->start = res_mem->start +
1820 MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
1821 res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
1822 res_mcq->flags = res_mem->flags;
1823 res_mcq->name = "mcq";
1825 ret = insert_resource(&iomem_resource, res_mcq);
1827 dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
1832 res->base = devm_ioremap_resource(hba->dev, res_mcq);
1833 if (IS_ERR(res->base)) {
1834 dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
1835 (int)PTR_ERR(res->base));
1836 ret = PTR_ERR(res->base);
1841 hba->mcq_base = res->base;
1845 remove_resource(res_mcq);
1849 static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
1851 struct ufshcd_res_info *mem_res, *sqdao_res;
1852 struct ufshcd_mcq_opr_info_t *opr;
1855 mem_res = &hba->res[RES_UFS];
1856 sqdao_res = &hba->res[RES_MCQ_SQD];
1858 if (!mem_res->base || !sqdao_res->base)
1861 for (i = 0; i < OPR_MAX; i++) {
1862 opr = &hba->mcq_opr[i];
1863 opr->offset = sqdao_res->resource->start -
1864 mem_res->resource->start + 0x40 * i;
1865 opr->stride = 0x100;
1866 opr->base = sqdao_res->base + 0x40 * i;
1872 static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
1874 /* Qualcomm HC supports up to 64 */
1875 return MAX_SUPP_MAC;
1878 static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
1879 unsigned long *ocqs)
1881 struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
1883 if (!mcq_vs_res->base)
1886 *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
1891 static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1893 struct device *dev = msi_desc_to_dev(desc);
1894 struct ufs_hba *hba = dev_get_drvdata(dev);
1896 ufshcd_mcq_config_esi(hba, msg);
1899 static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
1901 struct msi_desc *desc = data;
1902 struct device *dev = msi_desc_to_dev(desc);
1903 struct ufs_hba *hba = dev_get_drvdata(dev);
1904 u32 id = desc->msi_index;
1905 struct ufs_hw_queue *hwq = &hba->uhq[id];
1907 ufshcd_mcq_write_cqis(hba, 0x1, id);
1908 ufshcd_mcq_poll_cqe_lock(hba, hwq);
1913 static int ufs_qcom_config_esi(struct ufs_hba *hba)
1915 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1916 struct msi_desc *desc;
1917 struct msi_desc *failed_desc = NULL;
1920 if (host->esi_enabled)
1924 * 1. We only handle CQs as of now.
1925 * 2. Poll queues do not need ESI.
1927 nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
1928 ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
1929 ufs_qcom_write_msi_msg);
1931 dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
1935 msi_lock_descs(hba->dev);
1936 msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
1937 ret = devm_request_irq(hba->dev, desc->irq,
1938 ufs_qcom_mcq_esi_handler,
1939 IRQF_SHARED, "qcom-mcq-esi", desc);
1941 dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
1942 __func__, desc->irq, ret);
1947 msi_unlock_descs(hba->dev);
1951 msi_lock_descs(hba->dev);
1952 msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
1953 if (desc == failed_desc)
1955 devm_free_irq(hba->dev, desc->irq, hba);
1957 msi_unlock_descs(hba->dev);
1958 platform_msi_domain_free_irqs(hba->dev);
1960 if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
1961 host->hw_ver.step == 0) {
1963 ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
1966 ufshcd_mcq_enable_esi(hba);
1971 host->esi_enabled = true;
1977 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1979 * The variant operations configure the necessary controller and PHY
1980 * handshake during initialization.
1982 static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1984 .init = ufs_qcom_init,
1985 .exit = ufs_qcom_exit,
1986 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1987 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1988 .setup_clocks = ufs_qcom_setup_clocks,
1989 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1990 .link_startup_notify = ufs_qcom_link_startup_notify,
1991 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1992 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
1993 .suspend = ufs_qcom_suspend,
1994 .resume = ufs_qcom_resume,
1995 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1996 .device_reset = ufs_qcom_device_reset,
1997 .config_scaling_param = ufs_qcom_config_scaling_param,
1998 .program_key = ufs_qcom_ice_program_key,
1999 .reinit_notify = ufs_qcom_reinit_notify,
2000 .mcq_config_resource = ufs_qcom_mcq_config_resource,
2001 .get_hba_mac = ufs_qcom_get_hba_mac,
2002 .op_runtime_config = ufs_qcom_op_runtime_config,
2003 .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
2004 .config_esi = ufs_qcom_config_esi,
2008 * ufs_qcom_probe - probe routine of the driver
2009 * @pdev: pointer to Platform device handle
2011 * Return: zero for success and non-zero for failure.
2013 static int ufs_qcom_probe(struct platform_device *pdev)
2016 struct device *dev = &pdev->dev;
2018 /* Perform generic probe */
2019 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
2021 return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
2027 * ufs_qcom_remove - set driver_data of the device to NULL
2028 * @pdev: pointer to platform device handle
2032 static void ufs_qcom_remove(struct platform_device *pdev)
2034 struct ufs_hba *hba = platform_get_drvdata(pdev);
2036 pm_runtime_get_sync(&(pdev)->dev);
2038 platform_msi_domain_free_irqs(hba->dev);
2041 static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
2042 { .compatible = "qcom,ufshc"},
2045 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
2048 static const struct acpi_device_id ufs_qcom_acpi_match[] = {
2052 MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
2055 static const struct dev_pm_ops ufs_qcom_pm_ops = {
2056 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
2057 .prepare = ufshcd_suspend_prepare,
2058 .complete = ufshcd_resume_complete,
2059 #ifdef CONFIG_PM_SLEEP
2060 .suspend = ufshcd_system_suspend,
2061 .resume = ufshcd_system_resume,
2062 .freeze = ufshcd_system_freeze,
2063 .restore = ufshcd_system_restore,
2064 .thaw = ufshcd_system_thaw,
2068 static struct platform_driver ufs_qcom_pltform = {
2069 .probe = ufs_qcom_probe,
2070 .remove_new = ufs_qcom_remove,
2072 .name = "ufshcd-qcom",
2073 .pm = &ufs_qcom_pm_ops,
2074 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2075 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
2078 module_platform_driver(ufs_qcom_pltform);
2080 MODULE_LICENSE("GPL v2");