Merge tag 'usb-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[linux-2.6-block.git] / drivers / ufs / host / ufs-qcom.c
index 17e24270477dd838dd19ab933793c0aa77511920..480787048e752929d9b255cde85feb9d629292f1 100644 (file)
@@ -4,26 +4,26 @@
  */
 
 #include <linux/acpi.h>
-#include <linux/time.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/gpio/consumer.h>
 #include <linux/interconnect.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/platform_device.h>
 #include <linux/phy/phy.h>
-#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
 #include <linux/reset-controller.h>
-#include <linux/devfreq.h>
+#include <linux/time.h>
 
 #include <soc/qcom/ice.h>
 
 #include <ufs/ufshcd.h>
-#include "ufshcd-pltfrm.h"
-#include <ufs/unipro.h>
-#include "ufs-qcom.h"
 #include <ufs/ufshci.h>
 #include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-qcom.h"
 
 #define MCQ_QCFGPTR_MASK       GENMASK(7, 0)
 #define MCQ_QCFGPTR_UNIT       0x200
@@ -90,8 +90,6 @@ static const struct __ufs_qcom_bw_table {
        [MODE_MAX][0][0]                    = { 7643136,        307200 },
 };
 
-static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
-
 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
 static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
 
@@ -158,7 +156,7 @@ static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
        cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
        if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
            cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
        if (config_enable)
                return qcom_ice_program_key(host->ice,
@@ -194,52 +192,12 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
 }
 #endif
 
-static int ufs_qcom_host_clk_get(struct device *dev,
-               const char *name, struct clk **clk_out, bool optional)
-{
-       struct clk *clk;
-       int err = 0;
-
-       clk = devm_clk_get(dev, name);
-       if (!IS_ERR(clk)) {
-               *clk_out = clk;
-               return 0;
-       }
-
-       err = PTR_ERR(clk);
-
-       if (optional && err == -ENOENT) {
-               *clk_out = NULL;
-               return 0;
-       }
-
-       if (err != -EPROBE_DEFER)
-               dev_err(dev, "failed to get %s err %d\n", name, err);
-
-       return err;
-}
-
-static int ufs_qcom_host_clk_enable(struct device *dev,
-               const char *name, struct clk *clk)
-{
-       int err = 0;
-
-       err = clk_prepare_enable(clk);
-       if (err)
-               dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
-
-       return err;
-}
-
 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
 {
        if (!host->is_lane_clks_enabled)
                return;
 
-       clk_disable_unprepare(host->tx_l1_sync_clk);
-       clk_disable_unprepare(host->tx_l0_sync_clk);
-       clk_disable_unprepare(host->rx_l1_sync_clk);
-       clk_disable_unprepare(host->rx_l0_sync_clk);
+       clk_bulk_disable_unprepare(host->num_clks, host->clks);
 
        host->is_lane_clks_enabled = false;
 }
@@ -247,73 +205,29 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
 {
        int err;
-       struct device *dev = host->hba->dev;
 
-       if (host->is_lane_clks_enabled)
-               return 0;
-
-       err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
-               host->rx_l0_sync_clk);
+       err = clk_bulk_prepare_enable(host->num_clks, host->clks);
        if (err)
                return err;
 
-       err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
-               host->tx_l0_sync_clk);
-       if (err)
-               goto disable_rx_l0;
-
-       err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
-                       host->rx_l1_sync_clk);
-       if (err)
-               goto disable_tx_l0;
-
-       err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
-                       host->tx_l1_sync_clk);
-       if (err)
-               goto disable_rx_l1;
-
        host->is_lane_clks_enabled = true;
 
        return 0;
-
-disable_rx_l1:
-       clk_disable_unprepare(host->rx_l1_sync_clk);
-disable_tx_l0:
-       clk_disable_unprepare(host->tx_l0_sync_clk);
-disable_rx_l0:
-       clk_disable_unprepare(host->rx_l0_sync_clk);
-
-       return err;
 }
 
 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
 {
-       int err = 0;
+       int err;
        struct device *dev = host->hba->dev;
 
        if (has_acpi_companion(dev))
                return 0;
 
-       err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
-                                       &host->rx_l0_sync_clk, false);
-       if (err)
+       err = devm_clk_bulk_get_all(dev, &host->clks);
+       if (err <= 0)
                return err;
 
-       err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
-                                       &host->tx_l0_sync_clk, false);
-       if (err)
-               return err;
-
-       /* In case of single lane per direction, don't read lane1 clocks */
-       if (host->hba->lanes_per_direction > 1) {
-               err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
-                       &host->rx_l1_sync_clk, false);
-               if (err)
-                       return err;
-
-               err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
-                       &host->tx_l1_sync_clk, true);
-       }
+       host->num_clks = err;
 
        return 0;
 }
@@ -321,7 +235,7 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
 {
        int err;
-       u32 tx_fsm_val = 0;
+       u32 tx_fsm_val;
        unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
 
        do {
@@ -360,9 +274,7 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
 
 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
 {
-       ufshcd_rmwl(host->hba, QUNIPRO_SEL,
-                  ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
-                  REG_UFS_CFG1);
+       ufshcd_rmwl(host->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1);
 
        if (host->hw_ver.major >= 0x05)
                ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
@@ -376,18 +288,15 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
  */
 static int ufs_qcom_host_reset(struct ufs_hba *hba)
 {
-       int ret = 0;
+       int ret;
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-       bool reenable_intr = false;
+       bool reenable_intr;
 
-       if (!host->core_reset) {
-               dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+       if (!host->core_reset)
                return 0;
-       }
 
        reenable_intr = hba->is_irq_enabled;
-       disable_irq(hba->irq);
-       hba->is_irq_enabled = false;
+       ufshcd_disable_irq(hba);
 
        ret = reset_control_assert(host->core_reset);
        if (ret) {
@@ -404,16 +313,16 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
        usleep_range(200, 210);
 
        ret = reset_control_deassert(host->core_reset);
-       if (ret)
+       if (ret) {
                dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
                                 __func__, ret);
+               return ret;
+       }
 
        usleep_range(1000, 1100);
 
-       if (reenable_intr) {
-               enable_irq(hba->irq);
-               hba->is_irq_enabled = true;
-       }
+       if (reenable_intr)
+               ufshcd_enable_irq(hba);
 
        return 0;
 }
@@ -422,18 +331,8 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
-       if (host->hw_ver.major == 0x1) {
-               /*
-                * HS-G3 operations may not reliably work on legacy QCOM
-                * UFS host controller hardware even though capability
-                * exchange during link startup phase may end up
-                * negotiating maximum supported gear as G3.
-                * Hence downgrade the maximum supported gear to HS-G2.
-                */
-               return UFS_HS_G2;
-       } else if (host->hw_ver.major >= 0x4) {
+       if (host->hw_ver.major >= 0x4)
                return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
-       }
 
        /* Default is HS-G3 */
        return UFS_HS_G3;
@@ -442,14 +341,29 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+       struct ufs_host_params *host_params = &host->host_params;
        struct phy *phy = host->generic_phy;
+       enum phy_mode mode;
        int ret;
 
+       /*
+        * HW ver 5 can only support up to HS-G5 Rate-A due to HW limitations.
+        * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A,
+        * so that the subsequent power mode change shall stick to Rate-A.
+        */
+       if (host->hw_ver.major == 0x5) {
+               if (host->phy_gear == UFS_HS_G5)
+                       host_params->hs_rate = PA_HS_MODE_A;
+               else
+                       host_params->hs_rate = PA_HS_MODE_B;
+       }
+
+       mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A;
+
        /* Reset UFS Host Controller and PHY */
        ret = ufs_qcom_host_reset(hba);
        if (ret)
-               dev_warn(hba->dev, "%s: host reset returned %d\n",
-                                 __func__, ret);
+               return ret;
 
        /* phy initialization - calibrate the phy */
        ret = phy_init(phy);
@@ -459,7 +373,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
                return ret;
        }
 
-       phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->phy_gear);
+       ret = phy_set_mode_ext(phy, mode, host->phy_gear);
+       if (ret)
+               goto out_disable_phy;
 
        /* power on phy - start serdes and phy's power and clocks */
        ret = phy_power_on(phy);
@@ -489,9 +405,8 @@ out_disable_phy:
  */
 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
 {
-       ufshcd_writel(hba,
-               ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
-               REG_UFS_CFG2);
+       ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
+                   REG_UFS_CFG2);
 
        /* Ensure that HW clock gating is enabled before next operations */
        mb();
@@ -501,11 +416,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
                                      enum ufs_notify_change_status status)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-       int err = 0;
+       int err;
 
        switch (status) {
        case PRE_CHANGE:
-               ufs_qcom_power_up_sequence(hba);
+               err = ufs_qcom_power_up_sequence(hba);
+               if (err)
+                       return err;
+
                /*
                 * The PHY PLL output is the source of tx/rx lane symbol
                 * clocks, hence, enable the lane clocks only after PHY
@@ -544,41 +462,16 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
        struct ufs_clk_info *clki;
-       u32 core_clk_period_in_ns;
-       u32 tx_clk_cycles_per_us = 0;
        unsigned long core_clk_rate = 0;
-       u32 core_clk_cycles_per_us = 0;
-
-       static u32 pwm_fr_table[][2] = {
-               {UFS_PWM_G1, 0x1},
-               {UFS_PWM_G2, 0x1},
-               {UFS_PWM_G3, 0x1},
-               {UFS_PWM_G4, 0x1},
-       };
-
-       static u32 hs_fr_table_rA[][2] = {
-               {UFS_HS_G1, 0x1F},
-               {UFS_HS_G2, 0x3e},
-               {UFS_HS_G3, 0x7D},
-       };
-
-       static u32 hs_fr_table_rB[][2] = {
-               {UFS_HS_G1, 0x24},
-               {UFS_HS_G2, 0x49},
-               {UFS_HS_G3, 0x92},
-       };
+       u32 core_clk_cycles_per_us;
 
        /*
-        * The Qunipro controller does not use following registers:
-        * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
-        * UFS_REG_PA_LINK_STARTUP_TIMER.
-        * However UTP controller uses SYS1CLK_1US_REG register for Interrupt
+        * UTP controller uses SYS1CLK_1US_REG register for Interrupt
         * Aggregation logic.
         * It is mandatory to write SYS1CLK_1US_REG register on UFS host
         * controller V4.0.0 onwards.
         */
-       if (host->hw_ver.major < 4 && ufs_qcom_cap_qunipro(host) &&
-           !ufshcd_is_intr_aggr_allowed(hba))
+       if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
                return 0;
 
        if (gear == 0) {
@@ -611,79 +504,6 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
                mb();
        }
 
-       if (ufs_qcom_cap_qunipro(host))
-               return 0;
-
-       core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
-       core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
-       core_clk_period_in_ns &= MASK_CLK_NS_REG;
-
-       switch (hs) {
-       case FASTAUTO_MODE:
-       case FAST_MODE:
-               if (rate == PA_HS_MODE_A) {
-                       if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
-                               dev_err(hba->dev,
-                                       "%s: index %d exceeds table size %zu\n",
-                                       __func__, gear,
-                                       ARRAY_SIZE(hs_fr_table_rA));
-                               return -EINVAL;
-                       }
-                       tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
-               } else if (rate == PA_HS_MODE_B) {
-                       if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
-                               dev_err(hba->dev,
-                                       "%s: index %d exceeds table size %zu\n",
-                                       __func__, gear,
-                                       ARRAY_SIZE(hs_fr_table_rB));
-                               return -EINVAL;
-                       }
-                       tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
-               } else {
-                       dev_err(hba->dev, "%s: invalid rate = %d\n",
-                               __func__, rate);
-                       return -EINVAL;
-               }
-               break;
-       case SLOWAUTO_MODE:
-       case SLOW_MODE:
-               if (gear > ARRAY_SIZE(pwm_fr_table)) {
-                       dev_err(hba->dev,
-                                       "%s: index %d exceeds table size %zu\n",
-                                       __func__, gear,
-                                       ARRAY_SIZE(pwm_fr_table));
-                       return -EINVAL;
-               }
-               tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
-               break;
-       case UNCHANGED:
-       default:
-               dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
-               return -EINVAL;
-       }
-
-       if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
-           (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
-               /* this register 2 fields shall be written at once */
-               ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
-                             REG_UFS_TX_SYMBOL_CLK_NS_US);
-               /*
-                * make sure above write gets applied before we return from
-                * this function.
-                */
-               mb();
-       }
-
-       if (update_link_startup_timer && host->hw_ver.major != 0x5) {
-               ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
-                             REG_UFS_CFG0);
-               /*
-                * make sure that this configuration is applied before
-                * we return
-                */
-               mb();
-       }
-
        return 0;
 }
 
@@ -691,7 +511,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
                                        enum ufs_notify_change_status status)
 {
        int err = 0;
-       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
        switch (status) {
        case PRE_CHANGE:
@@ -702,11 +521,9 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
                        return -EINVAL;
                }
 
-               if (ufs_qcom_cap_qunipro(host)) {
-                       err = ufs_qcom_set_core_clk_ctrl(hba, true);
-                       if (err)
-                               dev_err(hba->dev, "cfg core clk ctrl failed\n");
-               }
+               err = ufs_qcom_set_core_clk_ctrl(hba, true);
+               if (err)
+                       dev_err(hba->dev, "cfg core clk ctrl failed\n");
                /*
                 * Some UFS devices (and may be host) have issues if LCC is
                 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
@@ -898,7 +715,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
                                struct ufs_pa_layer_attr *dev_req_params)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-       struct ufs_dev_params ufs_qcom_cap;
+       struct ufs_host_params *host_params = &host->host_params;
        int ret = 0;
 
        if (!dev_req_params) {
@@ -908,15 +725,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 
        switch (status) {
        case PRE_CHANGE:
-               ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
-               ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
-
-               /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
-               ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
-
-               ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
-                                              dev_max_params,
-                                              dev_req_params);
+               ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
                if (ret) {
                        dev_err(hba->dev, "%s: failed to determine capabilities\n",
                                        __func__);
@@ -924,11 +733,12 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
                }
 
                /*
-                * Update phy_gear only when the gears are scaled to a higher value. This is
-                * because, the PHY gear settings are backwards compatible and we only need to
-                * change the PHY gear settings while scaling to higher gears.
+                * During UFS driver probe, always update the PHY gear to match the negotiated
+                * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled,
+                * the second init can program the optimal PHY settings. This allows one to start
+                * the first init with either the minimum or the maximum support gear.
                 */
-               if (dev_req_params->gear_tx > host->phy_gear)
+               if (hba->ufshcd_state == UFSHCD_STATE_RESET)
                        host->phy_gear = dev_req_params->gear_tx;
 
                /* enable the device ref clock before changing to HS mode */
@@ -1005,12 +815,7 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
 
 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
 {
-       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
-       if (host->hw_ver.major == 0x1)
-               return ufshci_version(1, 1);
-       else
-               return ufshci_version(2, 0);
+       return ufshci_version(2, 0);
 }
 
 /**
@@ -1026,46 +831,69 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
-       if (host->hw_ver.major == 0x01) {
-               hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
-                           | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
-                           | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
+       if (host->hw_ver.major == 0x2)
+               hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
 
-               if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
-                       hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
+       if (host->hw_ver.major > 0x3)
+               hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+}
 
-               hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
-       }
+static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
+{
+       struct ufs_host_params *host_params = &host->host_params;
+       u32 val, dev_major;
 
-       if (host->hw_ver.major == 0x2) {
-               hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
+       host->phy_gear = host_params->hs_tx_gear;
 
-               if (!ufs_qcom_cap_qunipro(host))
-                       /* Legacy UniPro mode still need following quirks */
-                       hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
-                               | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
-                               | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
-       }
+       if (host->hw_ver.major < 0x4) {
+               /*
+                * For controllers whose major HW version is < 4, power up the
+                * PHY using minimum supported gear (UFS_HS_G2). Switching to
+                * max gear will be performed during reinit if supported.
+                * For newer controllers, whose major HW version is >= 4, power
+                * up the PHY using max supported gear.
+                */
+               host->phy_gear = UFS_HS_G2;
+       } else if (host->hw_ver.major >= 0x5) {
+               val = ufshcd_readl(host->hba, REG_UFS_DEBUG_SPARE_CFG);
+               dev_major = FIELD_GET(UFS_DEV_VER_MAJOR_MASK, val);
 
-       if (host->hw_ver.major > 0x3)
-               hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+               /*
+                * Since the UFS device version is populated, let's remove the
+                * REINIT quirk as the negotiated gear won't change during boot.
+                * So there is no need to do reinit.
+                */
+               if (dev_major != 0x0)
+                       host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+
+               /*
+                * For UFS 3.1 device and older, power up the PHY using HS-G4
+                * PHY gear to save power.
+                */
+               if (dev_major > 0x0 && dev_major < 0x4)
+                       host->phy_gear = UFS_HS_G4;
+       }
 }
 
-static void ufs_qcom_set_caps(struct ufs_hba *hba)
+static void ufs_qcom_set_host_params(struct ufs_hba *hba)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+       struct ufs_host_params *host_params = &host->host_params;
+
+       ufshcd_init_host_params(host_params);
 
+       /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
+       host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba);
+}
+
+static void ufs_qcom_set_caps(struct ufs_hba *hba)
+{
        hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
        hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
        hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
        hba->caps |= UFSHCD_CAP_WB_EN;
        hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
        hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
-
-       if (host->hw_ver.major >= 0x2) {
-               host->caps = UFS_QCOM_CAP_QUNIPRO |
-                            UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
-       }
 }
 
 /**
@@ -1188,16 +1016,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 {
        int err;
        struct device *dev = hba->dev;
-       struct platform_device *pdev = to_platform_device(dev);
        struct ufs_qcom_host *host;
-       struct resource *res;
        struct ufs_clk_info *clki;
 
        host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
-       if (!host) {
-               dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
+       if (!host)
                return -ENOMEM;
-       }
 
        /* Make a two way bind between the qcom host and the hba */
        host->hba = hba;
@@ -1235,34 +1059,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
        host->device_reset = devm_gpiod_get_optional(dev, "reset",
                                                     GPIOD_OUT_HIGH);
        if (IS_ERR(host->device_reset)) {
-               err = PTR_ERR(host->device_reset);
-               if (err != -EPROBE_DEFER)
-                       dev_err(dev, "failed to acquire reset gpio: %d\n", err);
+               err = dev_err_probe(dev, PTR_ERR(host->device_reset),
+                                   "Failed to acquire device reset gpio\n");
                goto out_variant_clear;
        }
 
        ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
                &host->hw_ver.minor, &host->hw_ver.step);
 
-       /*
-        * for newer controllers, device reference clock control bit has
-        * moved inside UFS controller register address space itself.
-        */
-       if (host->hw_ver.major >= 0x02) {
-               host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
-               host->dev_ref_clk_en_mask = BIT(26);
-       } else {
-               /* "dev_ref_clk_ctrl_mem" is optional resource */
-               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-                                                  "dev_ref_clk_ctrl_mem");
-               if (res) {
-                       host->dev_ref_clk_ctrl_mmio =
-                                       devm_ioremap_resource(dev, res);
-                       if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
-                               host->dev_ref_clk_ctrl_mmio = NULL;
-                       host->dev_ref_clk_en_mask = BIT(5);
-               }
-       }
+       host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
+       host->dev_ref_clk_en_mask = BIT(26);
 
        list_for_each_entry(clki, &hba->clk_list_head, list) {
                if (!strcmp(clki->name, "core_clk_unipro"))
@@ -1275,6 +1081,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 
        ufs_qcom_set_caps(hba);
        ufs_qcom_advertise_quirks(hba);
+       ufs_qcom_set_host_params(hba);
+       ufs_qcom_set_phy_gear(host);
 
        err = ufs_qcom_ice_init(host);
        if (err)
@@ -1282,9 +1090,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 
        ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
 
-       if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
-               ufs_qcom_hosts[hba->dev->id] = host;
-
        ufs_qcom_get_default_testbus_cfg(host);
        err = ufs_qcom_testbus_config(host);
        if (err)
@@ -1292,12 +1097,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
                dev_warn(dev, "%s: failed to configure the testbus %d\n",
                                __func__, err);
 
-       /*
-        * Power up the PHY using the minimum supported gear (UFS_HS_G2).
-        * Switching to max gear will be performed during reinit if supported.
-        */
-       host->phy_gear = UFS_HS_G2;
-
        return 0;
 
 out_variant_clear:
@@ -1391,7 +1190,7 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
        struct list_head *head = &hba->clk_list_head;
        struct ufs_clk_info *clki;
-       u32 cycles_in_1us;
+       u32 cycles_in_1us = 0;
        u32 core_clk_ctrl_reg;
        int err;
 
@@ -1444,9 +1243,6 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
        struct ufs_pa_layer_attr *attr = &host->dev_req_params;
        int ret;
 
-       if (!ufs_qcom_cap_qunipro(host))
-               return 0;
-
        ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
                                  attr->hs_rate, false, true);
        if (ret) {
@@ -1464,13 +1260,9 @@ static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
 
 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
 {
-       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
        int err;
        u32 core_clk_ctrl_reg;
 
-       if (!ufs_qcom_cap_qunipro(host))
-               return 0;
-
        err = ufshcd_dme_get(hba,
                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
                            &core_clk_ctrl_reg);
@@ -1489,11 +1281,6 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
 
 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
 {
-       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
-       if (!ufs_qcom_cap_qunipro(host))
-               return 0;
-
        /* set unipro core clock attributes and clear clock divider */
        return ufs_qcom_set_core_clk_ctrl(hba, false);
 }
@@ -1502,7 +1289,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
                bool scale_up, enum ufs_notify_change_status status)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-       int err = 0;
+       int err;
 
        /* check the host controller state before sending hibern8 cmd */
        if (!ufshcd_is_hba_active(hba))
@@ -1775,7 +1562,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
        struct platform_device *pdev = to_platform_device(hba->dev);
        struct ufshcd_res_info *res;
        struct resource *res_mem, *res_mcq;
-       int i, ret = 0;
+       int i, ret;
 
        memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
 
@@ -1787,7 +1574,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
                if (!res->resource) {
                        dev_info(hba->dev, "Resource %s not provided\n", res->name);
                        if (i == RES_UFS)
-                               return -ENOMEM;
+                               return -ENODEV;
                        continue;
                } else if (i == RES_UFS) {
                        res_mem = res->resource;
@@ -1958,11 +1745,10 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
                platform_msi_domain_free_irqs(hba->dev);
        } else {
                if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
-                   host->hw_ver.step == 0) {
-                       ufshcd_writel(hba,
-                                     ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
-                                     REG_UFS_CFG3);
-               }
+                   host->hw_ver.step == 0)
+                       ufshcd_rmwl(hba, ESI_VEC_MASK,
+                                   FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+                                   REG_UFS_CFG3);
                ufshcd_mcq_enable_esi(hba);
        }