2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/time.h>
17 #include <linux/platform_device.h>
18 #include <linux/phy/phy.h>
20 #include <linux/phy/phy-qcom-ufs.h>
22 #include "ufshcd-pltfrm.h"
26 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
27 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
45 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
47 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
48 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
49 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
52 static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
55 print_hex_dump(KERN_ERR, prefix,
56 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
57 16, 4, (void __force *)hba->mmio_base + offset,
61 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
65 err = ufshcd_dme_get(hba,
66 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
68 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
74 static int ufs_qcom_host_clk_get(struct device *dev,
75 const char *name, struct clk **clk_out)
80 clk = devm_clk_get(dev, name);
83 dev_err(dev, "%s: failed to get %s err %d",
92 static int ufs_qcom_host_clk_enable(struct device *dev,
93 const char *name, struct clk *clk)
97 err = clk_prepare_enable(clk);
99 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
104 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
106 if (!host->is_lane_clks_enabled)
109 clk_disable_unprepare(host->tx_l1_sync_clk);
110 clk_disable_unprepare(host->tx_l0_sync_clk);
111 clk_disable_unprepare(host->rx_l1_sync_clk);
112 clk_disable_unprepare(host->rx_l0_sync_clk);
114 host->is_lane_clks_enabled = false;
117 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
120 struct device *dev = host->hba->dev;
122 if (host->is_lane_clks_enabled)
125 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
126 host->rx_l0_sync_clk);
130 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
131 host->tx_l0_sync_clk);
135 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
136 host->rx_l1_sync_clk);
140 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
141 host->tx_l1_sync_clk);
145 host->is_lane_clks_enabled = true;
149 clk_disable_unprepare(host->rx_l1_sync_clk);
151 clk_disable_unprepare(host->tx_l0_sync_clk);
153 clk_disable_unprepare(host->rx_l0_sync_clk);
158 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
161 struct device *dev = host->hba->dev;
163 err = ufs_qcom_host_clk_get(dev,
164 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
168 err = ufs_qcom_host_clk_get(dev,
169 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
173 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
174 &host->rx_l1_sync_clk);
178 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
179 &host->tx_l1_sync_clk);
185 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
187 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
188 struct phy *phy = host->generic_phy;
192 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
196 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
198 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
205 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
209 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
212 err = ufshcd_dme_get(hba,
213 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
214 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
216 if (err || tx_fsm_val == TX_FSM_HIBERN8)
219 /* sleep for max. 200us */
220 usleep_range(100, 200);
221 } while (time_before(jiffies, timeout));
224 * we might have scheduled out for long during polling so
225 * check the state again.
227 if (time_after(jiffies, timeout))
228 err = ufshcd_dme_get(hba,
229 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
230 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
234 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
236 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
238 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
245 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
247 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
248 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
250 /* make sure above configuration is applied before we return */
254 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
256 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
257 struct phy *phy = host->generic_phy;
259 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
262 /* Assert PHY reset and apply PHY calibration values */
263 ufs_qcom_assert_reset(hba);
264 /* provide 1ms delay to let the reset pulse propagate */
265 usleep_range(1000, 1100);
267 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
271 "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n",
276 /* De-assert PHY reset and start serdes */
277 ufs_qcom_deassert_reset(hba);
280 * after reset deassertion, phy will need all ref clocks,
281 * voltage, current to settle down before starting serdes.
283 usleep_range(1000, 1100);
284 ret = ufs_qcom_phy_start_serdes(phy);
286 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
291 ret = ufs_qcom_phy_is_pcs_ready(phy);
294 "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
297 ufs_qcom_select_unipro_mode(host);
304 * The UTP controller has a number of internal clock gating cells (CGCs).
305 * Internal hardware sub-modules within the UTP controller control the CGCs.
306 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
307 * in a specific operation, UTP controller CGCs are by default disabled and
308 * this function enables them (after every UFS link startup) to save some power
311 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
314 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
317 /* Ensure that HW clock gating is enabled before next operations */
321 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
322 enum ufs_notify_change_status status)
324 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
329 ufs_qcom_power_up_sequence(hba);
331 * The PHY PLL output is the source of tx/rx lane symbol
332 * clocks, hence, enable the lane clocks only after PHY
335 err = ufs_qcom_enable_lane_clks(host);
338 /* check if UFS PHY moved from DISABLED to HIBERN8 */
339 err = ufs_qcom_check_hibern8(hba);
340 ufs_qcom_enable_hw_clk_gating(hba);
344 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
352 * Returns zero for success and non-zero in case of a failure
354 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
355 u32 hs, u32 rate, bool update_link_startup_timer)
358 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
359 struct ufs_clk_info *clki;
360 u32 core_clk_period_in_ns;
361 u32 tx_clk_cycles_per_us = 0;
362 unsigned long core_clk_rate = 0;
363 u32 core_clk_cycles_per_us = 0;
365 static u32 pwm_fr_table[][2] = {
372 static u32 hs_fr_table_rA[][2] = {
378 static u32 hs_fr_table_rB[][2] = {
385 * The Qunipro controller does not use following registers:
386 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
387 * UFS_REG_PA_LINK_STARTUP_TIMER
388 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
391 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
395 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
399 list_for_each_entry(clki, &hba->clk_list_head, list) {
400 if (!strcmp(clki->name, "core_clk"))
401 core_clk_rate = clk_get_rate(clki->clk);
404 /* If frequency is smaller than 1MHz, set to 1MHz */
405 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
406 core_clk_rate = DEFAULT_CLK_RATE_HZ;
408 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
409 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
410 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
412 * make sure above write gets applied before we return from
418 if (ufs_qcom_cap_qunipro(host))
421 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
422 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
423 core_clk_period_in_ns &= MASK_CLK_NS_REG;
428 if (rate == PA_HS_MODE_A) {
429 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
431 "%s: index %d exceeds table size %zu\n",
433 ARRAY_SIZE(hs_fr_table_rA));
436 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
437 } else if (rate == PA_HS_MODE_B) {
438 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
440 "%s: index %d exceeds table size %zu\n",
442 ARRAY_SIZE(hs_fr_table_rB));
445 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
447 dev_err(hba->dev, "%s: invalid rate = %d\n",
454 if (gear > ARRAY_SIZE(pwm_fr_table)) {
456 "%s: index %d exceeds table size %zu\n",
458 ARRAY_SIZE(pwm_fr_table));
461 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
465 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
469 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
470 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
471 /* this register 2 fields shall be written at once */
472 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
473 REG_UFS_TX_SYMBOL_CLK_NS_US);
475 * make sure above write gets applied before we return from
481 if (update_link_startup_timer) {
482 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
483 REG_UFS_PA_LINK_STARTUP_TIMER);
485 * make sure that this configuration is applied before
498 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
499 enum ufs_notify_change_status status)
502 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
506 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
508 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
514 if (ufs_qcom_cap_qunipro(host))
516 * set unipro core clock cycles to 150 & clear clock
519 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
524 ufs_qcom_link_startup_post_change(hba);
534 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
536 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
537 struct phy *phy = host->generic_phy;
540 if (ufs_qcom_is_link_off(hba)) {
542 * Disable the tx/rx lane symbol clocks before PHY is
543 * powered down as the PLL source should be disabled
544 * after downstream clocks are disabled.
546 ufs_qcom_disable_lane_clks(host);
549 /* Assert PHY soft reset */
550 ufs_qcom_assert_reset(hba);
555 * If UniPro link is not active, PHY ref_clk, main PHY analog power
556 * rail and low noise analog power rail for PLL can be switched off.
558 if (!ufs_qcom_is_link_active(hba)) {
559 ufs_qcom_disable_lane_clks(host);
567 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
569 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
570 struct phy *phy = host->generic_phy;
573 err = phy_power_on(phy);
575 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
580 err = ufs_qcom_enable_lane_clks(host);
584 hba->is_sys_suspended = false;
590 struct ufs_qcom_dev_params {
591 u32 pwm_rx_gear; /* pwm rx gear to work in */
592 u32 pwm_tx_gear; /* pwm tx gear to work in */
593 u32 hs_rx_gear; /* hs rx gear to work in */
594 u32 hs_tx_gear; /* hs tx gear to work in */
595 u32 rx_lanes; /* number of rx lanes */
596 u32 tx_lanes; /* number of tx lanes */
597 u32 rx_pwr_pwm; /* rx pwm working pwr */
598 u32 tx_pwr_pwm; /* tx pwm working pwr */
599 u32 rx_pwr_hs; /* rx hs working pwr */
600 u32 tx_pwr_hs; /* tx hs working pwr */
601 u32 hs_rate; /* rate A/B to work in HS */
602 u32 desired_working_mode;
605 static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
606 struct ufs_pa_layer_attr *dev_max,
607 struct ufs_pa_layer_attr *agreed_pwr)
611 bool is_dev_sup_hs = false;
612 bool is_qcom_max_hs = false;
614 if (dev_max->pwr_rx == FAST_MODE)
615 is_dev_sup_hs = true;
617 if (qcom_param->desired_working_mode == FAST) {
618 is_qcom_max_hs = true;
619 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
620 qcom_param->hs_tx_gear);
622 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
623 qcom_param->pwm_tx_gear);
627 * device doesn't support HS but qcom_param->desired_working_mode is
628 * HS, thus device and qcom_param don't agree
630 if (!is_dev_sup_hs && is_qcom_max_hs) {
631 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
634 } else if (is_dev_sup_hs && is_qcom_max_hs) {
636 * since device supports HS, it supports FAST_MODE.
637 * since qcom_param->desired_working_mode is also HS
638 * then final decision (FAST/FASTAUTO) is done according
639 * to qcom_params as it is the restricting factor
641 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
642 qcom_param->rx_pwr_hs;
645 * here qcom_param->desired_working_mode is PWM.
646 * it doesn't matter whether device supports HS or PWM,
647 * in both cases qcom_param->desired_working_mode will
650 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
651 qcom_param->rx_pwr_pwm;
655 * we would like tx to work in the minimum number of lanes
656 * between device capability and vendor preferences.
657 * the same decision will be made for rx
659 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
660 qcom_param->tx_lanes);
661 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
662 qcom_param->rx_lanes);
664 /* device maximum gear is the minimum between device rx and tx gears */
665 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
668 * if both device capabilities and vendor pre-defined preferences are
669 * both HS or both PWM then set the minimum gear to be the chosen
671 * if one is PWM and one is HS then the one that is PWM get to decide
672 * what is the gear, as it is the one that also decided previously what
673 * pwr the device will be configured to.
675 if ((is_dev_sup_hs && is_qcom_max_hs) ||
676 (!is_dev_sup_hs && !is_qcom_max_hs))
677 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
678 min_t(u32, min_dev_gear, min_qcom_gear);
679 else if (!is_dev_sup_hs)
680 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
682 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
684 agreed_pwr->hs_rate = qcom_param->hs_rate;
688 #ifdef CONFIG_MSM_BUS_SCALING
689 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
690 const char *speed_mode)
692 struct device *dev = host->hba->dev;
693 struct device_node *np = dev->of_node;
695 const char *key = "qcom,bus-vector-names";
702 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
703 err = of_property_match_string(np, key, "MAX");
705 err = of_property_match_string(np, key, speed_mode);
709 dev_err(dev, "%s: Invalid %s mode %d\n",
710 __func__, speed_mode, err);
714 static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
716 int gear = max_t(u32, p->gear_rx, p->gear_tx);
717 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
720 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
727 if (!p->pwr_rx && !p->pwr_tx) {
729 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
730 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
731 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
733 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
734 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
737 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
742 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
746 if (vote != host->bus_vote.curr_vote) {
747 err = msm_bus_scale_client_update_request(
748 host->bus_vote.client_handle, vote);
750 dev_err(host->hba->dev,
751 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
752 __func__, host->bus_vote.client_handle,
757 host->bus_vote.curr_vote = vote;
763 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
767 char mode[BUS_VECTOR_NAME_LEN];
769 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
771 vote = ufs_qcom_get_bus_vote(host, mode);
773 err = ufs_qcom_set_bus_vote(host, vote);
778 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
780 host->bus_vote.saved_vote = vote;
785 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
788 struct ufs_hba *hba = dev_get_drvdata(dev);
789 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
791 return snprintf(buf, PAGE_SIZE, "%u\n",
792 host->bus_vote.is_max_bw_needed);
796 store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
797 const char *buf, size_t count)
799 struct ufs_hba *hba = dev_get_drvdata(dev);
800 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
803 if (!kstrtou32(buf, 0, &value)) {
804 host->bus_vote.is_max_bw_needed = !!value;
805 ufs_qcom_update_bus_bw_vote(host);
811 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
814 struct msm_bus_scale_pdata *bus_pdata;
815 struct device *dev = host->hba->dev;
816 struct platform_device *pdev = to_platform_device(dev);
817 struct device_node *np = dev->of_node;
819 bus_pdata = msm_bus_cl_get_pdata(pdev);
821 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
826 err = of_property_count_strings(np, "qcom,bus-vector-names");
827 if (err < 0 || err != bus_pdata->num_usecases) {
828 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
833 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
834 if (!host->bus_vote.client_handle) {
835 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
841 /* cache the vote index for minimum and maximum bandwidth */
842 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
843 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
845 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
846 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
847 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
848 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
849 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
850 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
854 #else /* CONFIG_MSM_BUS_SCALING */
855 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
860 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
865 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
869 #endif /* CONFIG_MSM_BUS_SCALING */
871 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
873 if (host->dev_ref_clk_ctrl_mmio &&
874 (enable ^ host->is_dev_ref_clk_enabled)) {
875 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
878 temp |= host->dev_ref_clk_en_mask;
880 temp &= ~host->dev_ref_clk_en_mask;
883 * If we are here to disable this clock it might be immediately
884 * after entering into hibern8 in which case we need to make
885 * sure that device ref_clk is active at least 1us after the
891 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
893 /* ensure that ref_clk is enabled/disabled before we return */
897 * If we call hibern8 exit after this, we need to make sure that
898 * device ref_clk is stable for at least 1us before the hibern8
904 host->is_dev_ref_clk_enabled = enable;
908 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
909 enum ufs_notify_change_status status,
910 struct ufs_pa_layer_attr *dev_max_params,
911 struct ufs_pa_layer_attr *dev_req_params)
914 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
915 struct phy *phy = host->generic_phy;
916 struct ufs_qcom_dev_params ufs_qcom_cap;
920 if (!dev_req_params) {
921 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
928 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
929 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
930 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
931 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
932 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
933 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
934 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
935 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
936 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
937 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
938 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
939 ufs_qcom_cap.desired_working_mode =
940 UFS_QCOM_LIMIT_DESIRED_MODE;
942 if (host->hw_ver.major == 0x1) {
944 * HS-G3 operations may not reliably work on legacy QCOM
945 * UFS host controller hardware even though capability
946 * exchange during link startup phase may end up
947 * negotiating maximum supported gear as G3.
948 * Hence downgrade the maximum supported gear to HS-G2.
950 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
951 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
952 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
953 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
956 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
960 pr_err("%s: failed to determine capabilities\n",
967 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
968 dev_req_params->pwr_rx,
969 dev_req_params->hs_rate, false)) {
970 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
973 * we return error code at the end of the routine,
974 * but continue to configure UFS_PHY_TX_LANE_ENABLE
975 * and bus voting as usual
980 val = ~(MAX_U32 << dev_req_params->lane_tx);
981 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
983 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
988 /* cache the power mode parameters to use internally */
989 memcpy(&host->dev_req_params,
990 dev_req_params, sizeof(*dev_req_params));
991 ufs_qcom_update_bus_bw_vote(host);
1001 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1003 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1005 if (host->hw_ver.major == 0x1)
1006 return UFSHCI_VERSION_11;
1008 return UFSHCI_VERSION_20;
1012 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1013 * @hba: host controller instance
1015 * QCOM UFS host controller might have some non standard behaviours (quirks)
1016 * than what is specified by UFSHCI specification. Advertise all such
1017 * quirks to standard UFS host controller driver so standard takes them into
1020 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1022 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1024 if (host->hw_ver.major == 0x01) {
1025 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1026 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1027 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
1029 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
1030 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1032 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1035 if (host->hw_ver.major >= 0x2) {
1036 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1038 if (!ufs_qcom_cap_qunipro(host))
1039 /* Legacy UniPro mode still need following quirks */
1040 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1041 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1042 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1046 static void ufs_qcom_set_caps(struct ufs_hba *hba)
1048 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1050 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1051 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1052 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1054 if (host->hw_ver.major >= 0x2) {
1055 host->caps = UFS_QCOM_CAP_QUNIPRO |
1056 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1061 * ufs_qcom_setup_clocks - enables/disable clocks
1062 * @hba: host controller instance
1063 * @on: If true, enable clocks else disable them.
1065 * Returns 0 on success, non-zero on failure.
1067 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
1069 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1074 * In case ufs_qcom_init() is not yet done, simply ignore.
1075 * This ufs_qcom_setup_clocks() shall be called from
1076 * ufs_qcom_init() after init is done.
1082 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1086 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1088 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1090 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1093 vote = host->bus_vote.saved_vote;
1094 if (vote == host->bus_vote.min_bw_vote)
1095 ufs_qcom_update_bus_bw_vote(host);
1099 /* M-PHY RMMI interface clocks can be turned off */
1100 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1101 if (!ufs_qcom_is_link_active(hba))
1102 /* disable device ref_clk */
1103 ufs_qcom_dev_ref_clk_ctrl(host, false);
1105 vote = host->bus_vote.min_bw_vote;
1108 err = ufs_qcom_set_bus_vote(host, vote);
1110 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1117 #define ANDROID_BOOT_DEV_MAX 30
1118 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1121 static int __init get_android_boot_dev(char *str)
1123 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1126 __setup("androidboot.bootdevice=", get_android_boot_dev);
1130 * ufs_qcom_init - bind phy with controller
1131 * @hba: host controller instance
1133 * Binds PHY with controller and powers up PHY enabling clocks
1136 * Returns -EPROBE_DEFER if binding fails, returns negative error
1137 * on phy power up failure and returns zero on success.
1139 static int ufs_qcom_init(struct ufs_hba *hba)
1142 struct device *dev = hba->dev;
1143 struct platform_device *pdev = to_platform_device(dev);
1144 struct ufs_qcom_host *host;
1145 struct resource *res;
1147 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1150 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1153 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1157 /* Make a two way bind between the qcom host and the hba */
1159 ufshcd_set_variant(hba, host);
1162 * voting/devoting device ref_clk source is time consuming hence
1163 * skip devoting it during aggressive clock gating. This clock
1164 * will still be gated off during runtime suspend.
1166 host->generic_phy = devm_phy_get(dev, "ufsphy");
1168 if (IS_ERR(host->generic_phy)) {
1169 err = PTR_ERR(host->generic_phy);
1170 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1174 err = ufs_qcom_bus_register(host);
1178 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1179 &host->hw_ver.minor, &host->hw_ver.step);
1182 * for newer controllers, device reference clock control bit has
1183 * moved inside UFS controller register address space itself.
1185 if (host->hw_ver.major >= 0x02) {
1186 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1187 host->dev_ref_clk_en_mask = BIT(26);
1189 /* "dev_ref_clk_ctrl_mem" is optional resource */
1190 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1192 host->dev_ref_clk_ctrl_mmio =
1193 devm_ioremap_resource(dev, res);
1194 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1196 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1198 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1199 host->dev_ref_clk_ctrl_mmio = NULL;
1201 host->dev_ref_clk_en_mask = BIT(5);
1205 /* update phy revision information before calling phy_init() */
1206 ufs_qcom_phy_save_controller_version(host->generic_phy,
1207 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
1209 phy_init(host->generic_phy);
1210 err = phy_power_on(host->generic_phy);
1212 goto out_unregister_bus;
1214 err = ufs_qcom_init_lane_clks(host);
1216 goto out_disable_phy;
1218 ufs_qcom_set_caps(hba);
1219 ufs_qcom_advertise_quirks(hba);
1221 ufs_qcom_setup_clocks(hba, true);
1223 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1224 ufs_qcom_hosts[hba->dev->id] = host;
1226 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1227 ufs_qcom_get_default_testbus_cfg(host);
1228 err = ufs_qcom_testbus_config(host);
1230 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1238 phy_power_off(host->generic_phy);
1240 phy_exit(host->generic_phy);
1242 devm_kfree(dev, host);
1243 ufshcd_set_variant(hba, NULL);
1248 static void ufs_qcom_exit(struct ufs_hba *hba)
1250 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1252 ufs_qcom_disable_lane_clks(host);
1253 phy_power_off(host->generic_phy);
1256 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1260 u32 core_clk_ctrl_reg;
1262 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1265 err = ufshcd_dme_get(hba,
1266 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1267 &core_clk_ctrl_reg);
1271 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1272 core_clk_ctrl_reg |= clk_cycles;
1274 /* Clear CORE_CLK_DIV_EN */
1275 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1277 err = ufshcd_dme_set(hba,
1278 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1284 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1286 /* nothing to do as of now */
1290 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1292 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1294 if (!ufs_qcom_cap_qunipro(host))
1297 /* set unipro core clock cycles to 150 and clear clock divider */
1298 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1301 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1303 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1305 u32 core_clk_ctrl_reg;
1307 if (!ufs_qcom_cap_qunipro(host))
1310 err = ufshcd_dme_get(hba,
1311 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1312 &core_clk_ctrl_reg);
1314 /* make sure CORE_CLK_DIV_EN is cleared */
1316 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1317 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1318 err = ufshcd_dme_set(hba,
1319 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1326 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1328 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1330 if (!ufs_qcom_cap_qunipro(host))
1333 /* set unipro core clock cycles to 75 and clear clock divider */
1334 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1337 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1338 bool scale_up, enum ufs_notify_change_status status)
1340 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1341 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1344 if (status == PRE_CHANGE) {
1346 err = ufs_qcom_clk_scale_up_pre_change(hba);
1348 err = ufs_qcom_clk_scale_down_pre_change(hba);
1351 err = ufs_qcom_clk_scale_up_post_change(hba);
1353 err = ufs_qcom_clk_scale_down_post_change(hba);
1355 if (err || !dev_req_params)
1358 ufs_qcom_cfg_timers(hba,
1359 dev_req_params->gear_rx,
1360 dev_req_params->pwr_rx,
1361 dev_req_params->hs_rate,
1363 ufs_qcom_update_bus_bw_vote(host);
1370 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1372 /* provide a legal default configuration */
1373 host->testbus.select_major = TSTBUS_UAWM;
1374 host->testbus.select_minor = 1;
1377 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1379 if (host->testbus.select_major >= TSTBUS_MAX) {
1380 dev_err(host->hba->dev,
1381 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1382 __func__, host->testbus.select_major);
1387 * Not performing check for each individual select_major
1388 * mappings of select_minor, since there is no harm in
1389 * configuring a non-existent select_minor
1391 if (host->testbus.select_minor > 0x1F) {
1392 dev_err(host->hba->dev,
1393 "%s: 0x%05X is not a legal testbus option\n",
1394 __func__, host->testbus.select_minor);
1401 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1405 u32 mask = TEST_BUS_SUB_SEL_MASK;
1410 if (!ufs_qcom_testbus_cfg_is_ok(host))
1413 switch (host->testbus.select_major) {
1415 reg = UFS_TEST_BUS_CTRL_0;
1419 reg = UFS_TEST_BUS_CTRL_0;
1423 reg = UFS_TEST_BUS_CTRL_0;
1427 reg = UFS_TEST_BUS_CTRL_0;
1431 reg = UFS_TEST_BUS_CTRL_1;
1435 reg = UFS_TEST_BUS_CTRL_1;
1439 reg = UFS_TEST_BUS_CTRL_1;
1443 reg = UFS_TEST_BUS_CTRL_1;
1446 case TSTBUS_WRAPPER:
1447 reg = UFS_TEST_BUS_CTRL_2;
1450 case TSTBUS_COMBINED:
1451 reg = UFS_TEST_BUS_CTRL_2;
1454 case TSTBUS_UTP_HCI:
1455 reg = UFS_TEST_BUS_CTRL_2;
1459 reg = UFS_UNIPRO_CFG;
1463 * No need for a default case, since
1464 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1470 pm_runtime_get_sync(host->hba->dev);
1471 ufshcd_hold(host->hba, false);
1472 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1473 (u32)host->testbus.select_major << 19,
1475 ufshcd_rmwl(host->hba, mask,
1476 (u32)host->testbus.select_minor << offset,
1478 ufshcd_release(host->hba);
1479 pm_runtime_put_sync(host->hba->dev);
1484 static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1486 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
1489 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1491 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
1492 "HCI Vendor Specific Registers ");
1494 ufs_qcom_testbus_read(hba);
1497 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1499 * The variant operations configure the necessary controller and PHY
1500 * handshake during initialization.
1502 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1504 .init = ufs_qcom_init,
1505 .exit = ufs_qcom_exit,
1506 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1507 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1508 .setup_clocks = ufs_qcom_setup_clocks,
1509 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1510 .link_startup_notify = ufs_qcom_link_startup_notify,
1511 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1512 .suspend = ufs_qcom_suspend,
1513 .resume = ufs_qcom_resume,
1514 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1518 * ufs_qcom_probe - probe routine of the driver
1519 * @pdev: pointer to Platform device handle
1521 * Return zero for success and non-zero for failure
1523 static int ufs_qcom_probe(struct platform_device *pdev)
1526 struct device *dev = &pdev->dev;
1528 /* Perform generic probe */
1529 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1531 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1537 * ufs_qcom_remove - set driver_data of the device to NULL
1538 * @pdev: pointer to platform device handle
1542 static int ufs_qcom_remove(struct platform_device *pdev)
1544 struct ufs_hba *hba = platform_get_drvdata(pdev);
1546 pm_runtime_get_sync(&(pdev)->dev);
1551 static const struct of_device_id ufs_qcom_of_match[] = {
1552 { .compatible = "qcom,ufshc"},
1556 static const struct dev_pm_ops ufs_qcom_pm_ops = {
1557 .suspend = ufshcd_pltfrm_suspend,
1558 .resume = ufshcd_pltfrm_resume,
1559 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1560 .runtime_resume = ufshcd_pltfrm_runtime_resume,
1561 .runtime_idle = ufshcd_pltfrm_runtime_idle,
1564 static struct platform_driver ufs_qcom_pltform = {
1565 .probe = ufs_qcom_probe,
1566 .remove = ufs_qcom_remove,
1567 .shutdown = ufshcd_pltfrm_shutdown,
1569 .name = "ufshcd-qcom",
1570 .pm = &ufs_qcom_pm_ops,
1571 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1574 module_platform_driver(ufs_qcom_pltform);
1576 MODULE_LICENSE("GPL v2");