scsi: lpfc: Update lpfc version to 14.2.0.15
[linux-2.6-block.git] / drivers / ufs / host / ufs-qcom.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/acpi.h>
7 #include <linux/time.h>
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/interconnect.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/platform_device.h>
14 #include <linux/phy/phy.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/reset-controller.h>
17 #include <linux/devfreq.h>
18
19 #include <soc/qcom/ice.h>
20
21 #include <ufs/ufshcd.h>
22 #include "ufshcd-pltfrm.h"
23 #include <ufs/unipro.h>
24 #include "ufs-qcom.h"
25 #include <ufs/ufshci.h>
26 #include <ufs/ufs_quirks.h>
27
28 #define MCQ_QCFGPTR_MASK        GENMASK(7, 0)
29 #define MCQ_QCFGPTR_UNIT        0x200
30 #define MCQ_SQATTR_OFFSET(c) \
31         ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
32 #define MCQ_QCFG_SIZE   0x40
33
34 enum {
35         TSTBUS_UAWM,
36         TSTBUS_UARM,
37         TSTBUS_TXUC,
38         TSTBUS_RXUC,
39         TSTBUS_DFC,
40         TSTBUS_TRLUT,
41         TSTBUS_TMRLUT,
42         TSTBUS_OCSC,
43         TSTBUS_UTP_HCI,
44         TSTBUS_COMBINED,
45         TSTBUS_WRAPPER,
46         TSTBUS_UNIPRO,
47         TSTBUS_MAX,
48 };
49
50 #define QCOM_UFS_MAX_GEAR 4
51 #define QCOM_UFS_MAX_LANE 2
52
53 enum {
54         MODE_MIN,
55         MODE_PWM,
56         MODE_HS_RA,
57         MODE_HS_RB,
58         MODE_MAX,
59 };
60
61 static const struct __ufs_qcom_bw_table {
62         u32 mem_bw;
63         u32 cfg_bw;
64 } ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = {
65         [MODE_MIN][0][0]                   = { 0,               0 }, /* Bandwidth values in KB/s */
66         [MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922,             1000 },
67         [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844,            1000 },
68         [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688,            1000 },
69         [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376,            1000 },
70         [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844,            1000 },
71         [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688,            1000 },
72         [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376,            1000 },
73         [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752,           1000 },
74         [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796,         1000 },
75         [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591,         1000 },
76         [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582,        102400 },
77         [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200,        204800 },
78         [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591,         1000 },
79         [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181,         1000 },
80         [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582,        204800 },
81         [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200,        409600 },
82         [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422,         1000 },
83         [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189,         1000 },
84         [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582,        102400 },
85         [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200,        204800 },
86         [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189,         1000 },
87         [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378,         1000 },
88         [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582,        204800 },
89         [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200,        409600 },
90         [MODE_MAX][0][0]                    = { 7643136,        307200 },
91 };
92
93 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
94
95 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
96 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
97                                                        u32 clk_cycles);
98
99 static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
100 {
101         return container_of(rcd, struct ufs_qcom_host, rcdev);
102 }
103
104 #ifdef CONFIG_SCSI_UFS_CRYPTO
105
106 static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
107 {
108         if (host->hba->caps & UFSHCD_CAP_CRYPTO)
109                 qcom_ice_enable(host->ice);
110 }
111
112 static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
113 {
114         struct ufs_hba *hba = host->hba;
115         struct device *dev = hba->dev;
116         struct qcom_ice *ice;
117
118         ice = of_qcom_ice_get(dev);
119         if (ice == ERR_PTR(-EOPNOTSUPP)) {
120                 dev_warn(dev, "Disabling inline encryption support\n");
121                 ice = NULL;
122         }
123
124         if (IS_ERR_OR_NULL(ice))
125                 return PTR_ERR_OR_ZERO(ice);
126
127         host->ice = ice;
128         hba->caps |= UFSHCD_CAP_CRYPTO;
129
130         return 0;
131 }
132
133 static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
134 {
135         if (host->hba->caps & UFSHCD_CAP_CRYPTO)
136                 return qcom_ice_resume(host->ice);
137
138         return 0;
139 }
140
141 static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
142 {
143         if (host->hba->caps & UFSHCD_CAP_CRYPTO)
144                 return qcom_ice_suspend(host->ice);
145
146         return 0;
147 }
148
149 static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
150                                     const union ufs_crypto_cfg_entry *cfg,
151                                     int slot)
152 {
153         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
154         union ufs_crypto_cap_entry cap;
155         bool config_enable =
156                 cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE;
157
158         /* Only AES-256-XTS has been tested so far. */
159         cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
160         if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
161             cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
162                 return -EINVAL;
163
164         if (config_enable)
165                 return qcom_ice_program_key(host->ice,
166                                             QCOM_ICE_CRYPTO_ALG_AES_XTS,
167                                             QCOM_ICE_CRYPTO_KEY_SIZE_256,
168                                             cfg->crypto_key,
169                                             cfg->data_unit_size, slot);
170         else
171                 return qcom_ice_evict_key(host->ice, slot);
172 }
173
174 #else
175
176 #define ufs_qcom_ice_program_key NULL
177
178 static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
179 {
180 }
181
182 static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
183 {
184         return 0;
185 }
186
187 static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
188 {
189         return 0;
190 }
191
192 static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
193 {
194         return 0;
195 }
196 #endif
197
198 static int ufs_qcom_host_clk_get(struct device *dev,
199                 const char *name, struct clk **clk_out, bool optional)
200 {
201         struct clk *clk;
202         int err = 0;
203
204         clk = devm_clk_get(dev, name);
205         if (!IS_ERR(clk)) {
206                 *clk_out = clk;
207                 return 0;
208         }
209
210         err = PTR_ERR(clk);
211
212         if (optional && err == -ENOENT) {
213                 *clk_out = NULL;
214                 return 0;
215         }
216
217         if (err != -EPROBE_DEFER)
218                 dev_err(dev, "failed to get %s err %d\n", name, err);
219
220         return err;
221 }
222
223 static int ufs_qcom_host_clk_enable(struct device *dev,
224                 const char *name, struct clk *clk)
225 {
226         int err = 0;
227
228         err = clk_prepare_enable(clk);
229         if (err)
230                 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
231
232         return err;
233 }
234
235 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
236 {
237         if (!host->is_lane_clks_enabled)
238                 return;
239
240         clk_disable_unprepare(host->tx_l1_sync_clk);
241         clk_disable_unprepare(host->tx_l0_sync_clk);
242         clk_disable_unprepare(host->rx_l1_sync_clk);
243         clk_disable_unprepare(host->rx_l0_sync_clk);
244
245         host->is_lane_clks_enabled = false;
246 }
247
248 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
249 {
250         int err;
251         struct device *dev = host->hba->dev;
252
253         if (host->is_lane_clks_enabled)
254                 return 0;
255
256         err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
257                 host->rx_l0_sync_clk);
258         if (err)
259                 return err;
260
261         err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
262                 host->tx_l0_sync_clk);
263         if (err)
264                 goto disable_rx_l0;
265
266         err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
267                         host->rx_l1_sync_clk);
268         if (err)
269                 goto disable_tx_l0;
270
271         err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
272                         host->tx_l1_sync_clk);
273         if (err)
274                 goto disable_rx_l1;
275
276         host->is_lane_clks_enabled = true;
277
278         return 0;
279
280 disable_rx_l1:
281         clk_disable_unprepare(host->rx_l1_sync_clk);
282 disable_tx_l0:
283         clk_disable_unprepare(host->tx_l0_sync_clk);
284 disable_rx_l0:
285         clk_disable_unprepare(host->rx_l0_sync_clk);
286
287         return err;
288 }
289
290 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
291 {
292         int err = 0;
293         struct device *dev = host->hba->dev;
294
295         if (has_acpi_companion(dev))
296                 return 0;
297
298         err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
299                                         &host->rx_l0_sync_clk, false);
300         if (err)
301                 return err;
302
303         err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
304                                         &host->tx_l0_sync_clk, false);
305         if (err)
306                 return err;
307
308         /* In case of single lane per direction, don't read lane1 clocks */
309         if (host->hba->lanes_per_direction > 1) {
310                 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
311                         &host->rx_l1_sync_clk, false);
312                 if (err)
313                         return err;
314
315                 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
316                         &host->tx_l1_sync_clk, true);
317         }
318
319         return 0;
320 }
321
322 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
323 {
324         int err;
325         u32 tx_fsm_val = 0;
326         unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
327
328         do {
329                 err = ufshcd_dme_get(hba,
330                                 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
331                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
332                                 &tx_fsm_val);
333                 if (err || tx_fsm_val == TX_FSM_HIBERN8)
334                         break;
335
336                 /* sleep for max. 200us */
337                 usleep_range(100, 200);
338         } while (time_before(jiffies, timeout));
339
340         /*
341          * we might have scheduled out for long during polling so
342          * check the state again.
343          */
344         if (time_after(jiffies, timeout))
345                 err = ufshcd_dme_get(hba,
346                                 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
347                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
348                                 &tx_fsm_val);
349
350         if (err) {
351                 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
352                                 __func__, err);
353         } else if (tx_fsm_val != TX_FSM_HIBERN8) {
354                 err = tx_fsm_val;
355                 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
356                                 __func__, err);
357         }
358
359         return err;
360 }
361
362 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
363 {
364         ufshcd_rmwl(host->hba, QUNIPRO_SEL,
365                    ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
366                    REG_UFS_CFG1);
367
368         if (host->hw_ver.major >= 0x05)
369                 ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
370
371         /* make sure above configuration is applied before we return */
372         mb();
373 }
374
375 /*
376  * ufs_qcom_host_reset - reset host controller and PHY
377  */
378 static int ufs_qcom_host_reset(struct ufs_hba *hba)
379 {
380         int ret = 0;
381         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
382         bool reenable_intr = false;
383
384         if (!host->core_reset) {
385                 dev_warn(hba->dev, "%s: reset control not set\n", __func__);
386                 return 0;
387         }
388
389         reenable_intr = hba->is_irq_enabled;
390         disable_irq(hba->irq);
391         hba->is_irq_enabled = false;
392
393         ret = reset_control_assert(host->core_reset);
394         if (ret) {
395                 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
396                                  __func__, ret);
397                 return ret;
398         }
399
400         /*
401          * The hardware requirement for delay between assert/deassert
402          * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
403          * ~125us (4/32768). To be on the safe side add 200us delay.
404          */
405         usleep_range(200, 210);
406
407         ret = reset_control_deassert(host->core_reset);
408         if (ret)
409                 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
410                                  __func__, ret);
411
412         usleep_range(1000, 1100);
413
414         if (reenable_intr) {
415                 enable_irq(hba->irq);
416                 hba->is_irq_enabled = true;
417         }
418
419         return 0;
420 }
421
422 static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
423 {
424         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
425
426         if (host->hw_ver.major == 0x1) {
427                 /*
428                  * HS-G3 operations may not reliably work on legacy QCOM
429                  * UFS host controller hardware even though capability
430                  * exchange during link startup phase may end up
431                  * negotiating maximum supported gear as G3.
432                  * Hence downgrade the maximum supported gear to HS-G2.
433                  */
434                 return UFS_HS_G2;
435         } else if (host->hw_ver.major >= 0x4) {
436                 return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
437         }
438
439         /* Default is HS-G3 */
440         return UFS_HS_G3;
441 }
442
443 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
444 {
445         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
446         struct phy *phy = host->generic_phy;
447         int ret;
448
449         /* Reset UFS Host Controller and PHY */
450         ret = ufs_qcom_host_reset(hba);
451         if (ret)
452                 dev_warn(hba->dev, "%s: host reset returned %d\n",
453                                   __func__, ret);
454
455         /* phy initialization - calibrate the phy */
456         ret = phy_init(phy);
457         if (ret) {
458                 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
459                         __func__, ret);
460                 return ret;
461         }
462
463         phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
464
465         /* power on phy - start serdes and phy's power and clocks */
466         ret = phy_power_on(phy);
467         if (ret) {
468                 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
469                         __func__, ret);
470                 goto out_disable_phy;
471         }
472
473         ufs_qcom_select_unipro_mode(host);
474
475         return 0;
476
477 out_disable_phy:
478         phy_exit(phy);
479
480         return ret;
481 }
482
483 /*
484  * The UTP controller has a number of internal clock gating cells (CGCs).
485  * Internal hardware sub-modules within the UTP controller control the CGCs.
486  * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
487  * in a specific operation, UTP controller CGCs are by default disabled and
488  * this function enables them (after every UFS link startup) to save some power
489  * leakage.
490  */
491 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
492 {
493         ufshcd_writel(hba,
494                 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
495                 REG_UFS_CFG2);
496
497         /* Ensure that HW clock gating is enabled before next operations */
498         mb();
499 }
500
501 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
502                                       enum ufs_notify_change_status status)
503 {
504         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
505         int err = 0;
506
507         switch (status) {
508         case PRE_CHANGE:
509                 ufs_qcom_power_up_sequence(hba);
510                 /*
511                  * The PHY PLL output is the source of tx/rx lane symbol
512                  * clocks, hence, enable the lane clocks only after PHY
513                  * is initialized.
514                  */
515                 err = ufs_qcom_enable_lane_clks(host);
516                 break;
517         case POST_CHANGE:
518                 /* check if UFS PHY moved from DISABLED to HIBERN8 */
519                 err = ufs_qcom_check_hibern8(hba);
520                 ufs_qcom_enable_hw_clk_gating(hba);
521                 ufs_qcom_ice_enable(host);
522                 break;
523         default:
524                 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
525                 err = -EINVAL;
526                 break;
527         }
528         return err;
529 }
530
531 /*
532  * Return: zero for success and non-zero in case of a failure.
533  */
534 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
535                                u32 hs, u32 rate, bool update_link_startup_timer)
536 {
537         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
538         struct ufs_clk_info *clki;
539         u32 core_clk_period_in_ns;
540         u32 tx_clk_cycles_per_us = 0;
541         unsigned long core_clk_rate = 0;
542         u32 core_clk_cycles_per_us = 0;
543
544         static u32 pwm_fr_table[][2] = {
545                 {UFS_PWM_G1, 0x1},
546                 {UFS_PWM_G2, 0x1},
547                 {UFS_PWM_G3, 0x1},
548                 {UFS_PWM_G4, 0x1},
549         };
550
551         static u32 hs_fr_table_rA[][2] = {
552                 {UFS_HS_G1, 0x1F},
553                 {UFS_HS_G2, 0x3e},
554                 {UFS_HS_G3, 0x7D},
555         };
556
557         static u32 hs_fr_table_rB[][2] = {
558                 {UFS_HS_G1, 0x24},
559                 {UFS_HS_G2, 0x49},
560                 {UFS_HS_G3, 0x92},
561         };
562
563         /*
564          * The Qunipro controller does not use following registers:
565          * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
566          * UFS_REG_PA_LINK_STARTUP_TIMER
567          * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
568          * Aggregation logic.
569         */
570         if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
571                 return 0;
572
573         if (gear == 0) {
574                 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
575                 return -EINVAL;
576         }
577
578         list_for_each_entry(clki, &hba->clk_list_head, list) {
579                 if (!strcmp(clki->name, "core_clk"))
580                         core_clk_rate = clk_get_rate(clki->clk);
581         }
582
583         /* If frequency is smaller than 1MHz, set to 1MHz */
584         if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
585                 core_clk_rate = DEFAULT_CLK_RATE_HZ;
586
587         core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
588         if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
589                 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
590                 /*
591                  * make sure above write gets applied before we return from
592                  * this function.
593                  */
594                 mb();
595         }
596
597         if (ufs_qcom_cap_qunipro(host))
598                 return 0;
599
600         core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
601         core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
602         core_clk_period_in_ns &= MASK_CLK_NS_REG;
603
604         switch (hs) {
605         case FASTAUTO_MODE:
606         case FAST_MODE:
607                 if (rate == PA_HS_MODE_A) {
608                         if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
609                                 dev_err(hba->dev,
610                                         "%s: index %d exceeds table size %zu\n",
611                                         __func__, gear,
612                                         ARRAY_SIZE(hs_fr_table_rA));
613                                 return -EINVAL;
614                         }
615                         tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
616                 } else if (rate == PA_HS_MODE_B) {
617                         if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
618                                 dev_err(hba->dev,
619                                         "%s: index %d exceeds table size %zu\n",
620                                         __func__, gear,
621                                         ARRAY_SIZE(hs_fr_table_rB));
622                                 return -EINVAL;
623                         }
624                         tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
625                 } else {
626                         dev_err(hba->dev, "%s: invalid rate = %d\n",
627                                 __func__, rate);
628                         return -EINVAL;
629                 }
630                 break;
631         case SLOWAUTO_MODE:
632         case SLOW_MODE:
633                 if (gear > ARRAY_SIZE(pwm_fr_table)) {
634                         dev_err(hba->dev,
635                                         "%s: index %d exceeds table size %zu\n",
636                                         __func__, gear,
637                                         ARRAY_SIZE(pwm_fr_table));
638                         return -EINVAL;
639                 }
640                 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
641                 break;
642         case UNCHANGED:
643         default:
644                 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
645                 return -EINVAL;
646         }
647
648         if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
649             (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
650                 /* this register 2 fields shall be written at once */
651                 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
652                               REG_UFS_TX_SYMBOL_CLK_NS_US);
653                 /*
654                  * make sure above write gets applied before we return from
655                  * this function.
656                  */
657                 mb();
658         }
659
660         if (update_link_startup_timer && host->hw_ver.major != 0x5) {
661                 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
662                               REG_UFS_CFG0);
663                 /*
664                  * make sure that this configuration is applied before
665                  * we return
666                  */
667                 mb();
668         }
669
670         return 0;
671 }
672
673 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
674                                         enum ufs_notify_change_status status)
675 {
676         int err = 0;
677         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
678
679         switch (status) {
680         case PRE_CHANGE:
681                 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
682                                         0, true)) {
683                         dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
684                                 __func__);
685                         return -EINVAL;
686                 }
687
688                 if (ufs_qcom_cap_qunipro(host))
689                         /*
690                          * set unipro core clock cycles to 150 & clear clock
691                          * divider
692                          */
693                         err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
694                                                                           150);
695
696                 /*
697                  * Some UFS devices (and may be host) have issues if LCC is
698                  * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
699                  * before link startup which will make sure that both host
700                  * and device TX LCC are disabled once link startup is
701                  * completed.
702                  */
703                 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
704                         err = ufshcd_disable_host_tx_lcc(hba);
705
706                 break;
707         default:
708                 break;
709         }
710
711         return err;
712 }
713
714 static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
715 {
716         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
717
718         /* reset gpio is optional */
719         if (!host->device_reset)
720                 return;
721
722         gpiod_set_value_cansleep(host->device_reset, asserted);
723 }
724
725 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
726         enum ufs_notify_change_status status)
727 {
728         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
729         struct phy *phy = host->generic_phy;
730
731         if (status == PRE_CHANGE)
732                 return 0;
733
734         if (ufs_qcom_is_link_off(hba)) {
735                 /*
736                  * Disable the tx/rx lane symbol clocks before PHY is
737                  * powered down as the PLL source should be disabled
738                  * after downstream clocks are disabled.
739                  */
740                 ufs_qcom_disable_lane_clks(host);
741                 phy_power_off(phy);
742
743                 /* reset the connected UFS device during power down */
744                 ufs_qcom_device_reset_ctrl(hba, true);
745
746         } else if (!ufs_qcom_is_link_active(hba)) {
747                 ufs_qcom_disable_lane_clks(host);
748         }
749
750         return ufs_qcom_ice_suspend(host);
751 }
752
753 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
754 {
755         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
756         struct phy *phy = host->generic_phy;
757         int err;
758
759         if (ufs_qcom_is_link_off(hba)) {
760                 err = phy_power_on(phy);
761                 if (err) {
762                         dev_err(hba->dev, "%s: failed PHY power on: %d\n",
763                                 __func__, err);
764                         return err;
765                 }
766
767                 err = ufs_qcom_enable_lane_clks(host);
768                 if (err)
769                         return err;
770
771         } else if (!ufs_qcom_is_link_active(hba)) {
772                 err = ufs_qcom_enable_lane_clks(host);
773                 if (err)
774                         return err;
775         }
776
777         return ufs_qcom_ice_resume(host);
778 }
779
780 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
781 {
782         if (host->dev_ref_clk_ctrl_mmio &&
783             (enable ^ host->is_dev_ref_clk_enabled)) {
784                 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
785
786                 if (enable)
787                         temp |= host->dev_ref_clk_en_mask;
788                 else
789                         temp &= ~host->dev_ref_clk_en_mask;
790
791                 /*
792                  * If we are here to disable this clock it might be immediately
793                  * after entering into hibern8 in which case we need to make
794                  * sure that device ref_clk is active for specific time after
795                  * hibern8 enter.
796                  */
797                 if (!enable) {
798                         unsigned long gating_wait;
799
800                         gating_wait = host->hba->dev_info.clk_gating_wait_us;
801                         if (!gating_wait) {
802                                 udelay(1);
803                         } else {
804                                 /*
805                                  * bRefClkGatingWaitTime defines the minimum
806                                  * time for which the reference clock is
807                                  * required by device during transition from
808                                  * HS-MODE to LS-MODE or HIBERN8 state. Give it
809                                  * more delay to be on the safe side.
810                                  */
811                                 gating_wait += 10;
812                                 usleep_range(gating_wait, gating_wait + 10);
813                         }
814                 }
815
816                 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
817
818                 /*
819                  * Make sure the write to ref_clk reaches the destination and
820                  * not stored in a Write Buffer (WB).
821                  */
822                 readl(host->dev_ref_clk_ctrl_mmio);
823
824                 /*
825                  * If we call hibern8 exit after this, we need to make sure that
826                  * device ref_clk is stable for at least 1us before the hibern8
827                  * exit command.
828                  */
829                 if (enable)
830                         udelay(1);
831
832                 host->is_dev_ref_clk_enabled = enable;
833         }
834 }
835
836 static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw)
837 {
838         struct device *dev = host->hba->dev;
839         int ret;
840
841         ret = icc_set_bw(host->icc_ddr, 0, mem_bw);
842         if (ret < 0) {
843                 dev_err(dev, "failed to set bandwidth request: %d\n", ret);
844                 return ret;
845         }
846
847         ret = icc_set_bw(host->icc_cpu, 0, cfg_bw);
848         if (ret < 0) {
849                 dev_err(dev, "failed to set bandwidth request: %d\n", ret);
850                 return ret;
851         }
852
853         return 0;
854 }
855
856 static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host)
857 {
858         struct ufs_pa_layer_attr *p = &host->dev_req_params;
859         int gear = max_t(u32, p->gear_rx, p->gear_tx);
860         int lane = max_t(u32, p->lane_rx, p->lane_tx);
861
862         if (ufshcd_is_hs_mode(p)) {
863                 if (p->hs_rate == PA_HS_MODE_B)
864                         return ufs_qcom_bw_table[MODE_HS_RB][gear][lane];
865                 else
866                         return ufs_qcom_bw_table[MODE_HS_RA][gear][lane];
867         } else {
868                 return ufs_qcom_bw_table[MODE_PWM][gear][lane];
869         }
870 }
871
872 static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
873 {
874         struct __ufs_qcom_bw_table bw_table;
875
876         bw_table = ufs_qcom_get_bw_table(host);
877
878         return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
879 }
880
881 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
882                                 enum ufs_notify_change_status status,
883                                 struct ufs_pa_layer_attr *dev_max_params,
884                                 struct ufs_pa_layer_attr *dev_req_params)
885 {
886         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
887         struct ufs_dev_params ufs_qcom_cap;
888         int ret = 0;
889
890         if (!dev_req_params) {
891                 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
892                 return -EINVAL;
893         }
894
895         switch (status) {
896         case PRE_CHANGE:
897                 ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
898                 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
899
900                 /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
901                 ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
902
903                 ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
904                                                dev_max_params,
905                                                dev_req_params);
906                 if (ret) {
907                         dev_err(hba->dev, "%s: failed to determine capabilities\n",
908                                         __func__);
909                         return ret;
910                 }
911
912                 /* Use the agreed gear */
913                 host->hs_gear = dev_req_params->gear_tx;
914
915                 /* enable the device ref clock before changing to HS mode */
916                 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
917                         ufshcd_is_hs_mode(dev_req_params))
918                         ufs_qcom_dev_ref_clk_ctrl(host, true);
919
920                 if (host->hw_ver.major >= 0x4) {
921                         ufshcd_dme_configure_adapt(hba,
922                                                 dev_req_params->gear_tx,
923                                                 PA_INITIAL_ADAPT);
924                 }
925                 break;
926         case POST_CHANGE:
927                 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
928                                         dev_req_params->pwr_rx,
929                                         dev_req_params->hs_rate, false)) {
930                         dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
931                                 __func__);
932                         /*
933                          * we return error code at the end of the routine,
934                          * but continue to configure UFS_PHY_TX_LANE_ENABLE
935                          * and bus voting as usual
936                          */
937                         ret = -EINVAL;
938                 }
939
940                 /* cache the power mode parameters to use internally */
941                 memcpy(&host->dev_req_params,
942                                 dev_req_params, sizeof(*dev_req_params));
943
944                 ufs_qcom_icc_update_bw(host);
945
946                 /* disable the device ref clock if entered PWM mode */
947                 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
948                         !ufshcd_is_hs_mode(dev_req_params))
949                         ufs_qcom_dev_ref_clk_ctrl(host, false);
950                 break;
951         default:
952                 ret = -EINVAL;
953                 break;
954         }
955
956         return ret;
957 }
958
959 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
960 {
961         int err;
962         u32 pa_vs_config_reg1;
963
964         err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
965                              &pa_vs_config_reg1);
966         if (err)
967                 return err;
968
969         /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
970         return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
971                             (pa_vs_config_reg1 | (1 << 12)));
972 }
973
974 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
975 {
976         int err = 0;
977
978         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
979                 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
980
981         if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
982                 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
983
984         return err;
985 }
986
987 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
988 {
989         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
990
991         if (host->hw_ver.major == 0x1)
992                 return ufshci_version(1, 1);
993         else
994                 return ufshci_version(2, 0);
995 }
996
997 /**
998  * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
999  * @hba: host controller instance
1000  *
1001  * QCOM UFS host controller might have some non standard behaviours (quirks)
1002  * than what is specified by UFSHCI specification. Advertise all such
1003  * quirks to standard UFS host controller driver so standard takes them into
1004  * account.
1005  */
1006 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1007 {
1008         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1009
1010         if (host->hw_ver.major == 0x01) {
1011                 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1012                             | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1013                             | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
1014
1015                 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
1016                         hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1017
1018                 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1019         }
1020
1021         if (host->hw_ver.major == 0x2) {
1022                 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1023
1024                 if (!ufs_qcom_cap_qunipro(host))
1025                         /* Legacy UniPro mode still need following quirks */
1026                         hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1027                                 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1028                                 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1029         }
1030
1031         if (host->hw_ver.major > 0x3)
1032                 hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
1033 }
1034
1035 static void ufs_qcom_set_caps(struct ufs_hba *hba)
1036 {
1037         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1038
1039         hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1040         hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
1041         hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1042         hba->caps |= UFSHCD_CAP_WB_EN;
1043         hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
1044         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
1045
1046         if (host->hw_ver.major >= 0x2) {
1047                 host->caps = UFS_QCOM_CAP_QUNIPRO |
1048                              UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1049         }
1050 }
1051
1052 /**
1053  * ufs_qcom_setup_clocks - enables/disable clocks
1054  * @hba: host controller instance
1055  * @on: If true, enable clocks else disable them.
1056  * @status: PRE_CHANGE or POST_CHANGE notify
1057  *
1058  * Return: 0 on success, non-zero on failure.
1059  */
1060 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1061                                  enum ufs_notify_change_status status)
1062 {
1063         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1064
1065         /*
1066          * In case ufs_qcom_init() is not yet done, simply ignore.
1067          * This ufs_qcom_setup_clocks() shall be called from
1068          * ufs_qcom_init() after init is done.
1069          */
1070         if (!host)
1071                 return 0;
1072
1073         switch (status) {
1074         case PRE_CHANGE:
1075                 if (on) {
1076                         ufs_qcom_icc_update_bw(host);
1077                 } else {
1078                         if (!ufs_qcom_is_link_active(hba)) {
1079                                 /* disable device ref_clk */
1080                                 ufs_qcom_dev_ref_clk_ctrl(host, false);
1081                         }
1082                 }
1083                 break;
1084         case POST_CHANGE:
1085                 if (on) {
1086                         /* enable the device ref clock for HS mode*/
1087                         if (ufshcd_is_hs_mode(&hba->pwr_info))
1088                                 ufs_qcom_dev_ref_clk_ctrl(host, true);
1089                 } else {
1090                         ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
1091                                             ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
1092                 }
1093                 break;
1094         }
1095
1096         return 0;
1097 }
1098
1099 static int
1100 ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
1101 {
1102         struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1103
1104         ufs_qcom_assert_reset(host->hba);
1105         /* provide 1ms delay to let the reset pulse propagate. */
1106         usleep_range(1000, 1100);
1107         return 0;
1108 }
1109
1110 static int
1111 ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
1112 {
1113         struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1114
1115         ufs_qcom_deassert_reset(host->hba);
1116
1117         /*
1118          * after reset deassertion, phy will need all ref clocks,
1119          * voltage, current to settle down before starting serdes.
1120          */
1121         usleep_range(1000, 1100);
1122         return 0;
1123 }
1124
1125 static const struct reset_control_ops ufs_qcom_reset_ops = {
1126         .assert = ufs_qcom_reset_assert,
1127         .deassert = ufs_qcom_reset_deassert,
1128 };
1129
1130 static int ufs_qcom_icc_init(struct ufs_qcom_host *host)
1131 {
1132         struct device *dev = host->hba->dev;
1133         int ret;
1134
1135         host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr");
1136         if (IS_ERR(host->icc_ddr))
1137                 return dev_err_probe(dev, PTR_ERR(host->icc_ddr),
1138                                     "failed to acquire interconnect path\n");
1139
1140         host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs");
1141         if (IS_ERR(host->icc_cpu))
1142                 return dev_err_probe(dev, PTR_ERR(host->icc_cpu),
1143                                     "failed to acquire interconnect path\n");
1144
1145         /*
1146          * Set Maximum bandwidth vote before initializing the UFS controller and
1147          * device. Ideally, a minimal interconnect vote would suffice for the
1148          * initialization, but a max vote would allow faster initialization.
1149          */
1150         ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw,
1151                                   ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw);
1152         if (ret < 0)
1153                 return dev_err_probe(dev, ret, "failed to set bandwidth request\n");
1154
1155         return 0;
1156 }
1157
1158 /**
1159  * ufs_qcom_init - bind phy with controller
1160  * @hba: host controller instance
1161  *
1162  * Binds PHY with controller and powers up PHY enabling clocks
1163  * and regulators.
1164  *
1165  * Return: -EPROBE_DEFER if binding fails, returns negative error
1166  * on phy power up failure and returns zero on success.
1167  */
1168 static int ufs_qcom_init(struct ufs_hba *hba)
1169 {
1170         int err;
1171         struct device *dev = hba->dev;
1172         struct platform_device *pdev = to_platform_device(dev);
1173         struct ufs_qcom_host *host;
1174         struct resource *res;
1175         struct ufs_clk_info *clki;
1176
1177         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1178         if (!host) {
1179                 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1180                 return -ENOMEM;
1181         }
1182
1183         /* Make a two way bind between the qcom host and the hba */
1184         host->hba = hba;
1185         ufshcd_set_variant(hba, host);
1186
1187         /* Setup the optional reset control of HCI */
1188         host->core_reset = devm_reset_control_get_optional(hba->dev, "rst");
1189         if (IS_ERR(host->core_reset)) {
1190                 err = dev_err_probe(dev, PTR_ERR(host->core_reset),
1191                                     "Failed to get reset control\n");
1192                 goto out_variant_clear;
1193         }
1194
1195         /* Fire up the reset controller. Failure here is non-fatal. */
1196         host->rcdev.of_node = dev->of_node;
1197         host->rcdev.ops = &ufs_qcom_reset_ops;
1198         host->rcdev.owner = dev->driver->owner;
1199         host->rcdev.nr_resets = 1;
1200         err = devm_reset_controller_register(dev, &host->rcdev);
1201         if (err)
1202                 dev_warn(dev, "Failed to register reset controller\n");
1203
1204         if (!has_acpi_companion(dev)) {
1205                 host->generic_phy = devm_phy_get(dev, "ufsphy");
1206                 if (IS_ERR(host->generic_phy)) {
1207                         err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n");
1208                         goto out_variant_clear;
1209                 }
1210         }
1211
1212         err = ufs_qcom_icc_init(host);
1213         if (err)
1214                 goto out_variant_clear;
1215
1216         host->device_reset = devm_gpiod_get_optional(dev, "reset",
1217                                                      GPIOD_OUT_HIGH);
1218         if (IS_ERR(host->device_reset)) {
1219                 err = PTR_ERR(host->device_reset);
1220                 if (err != -EPROBE_DEFER)
1221                         dev_err(dev, "failed to acquire reset gpio: %d\n", err);
1222                 goto out_variant_clear;
1223         }
1224
1225         ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1226                 &host->hw_ver.minor, &host->hw_ver.step);
1227
1228         /*
1229          * for newer controllers, device reference clock control bit has
1230          * moved inside UFS controller register address space itself.
1231          */
1232         if (host->hw_ver.major >= 0x02) {
1233                 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1234                 host->dev_ref_clk_en_mask = BIT(26);
1235         } else {
1236                 /* "dev_ref_clk_ctrl_mem" is optional resource */
1237                 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1238                                                    "dev_ref_clk_ctrl_mem");
1239                 if (res) {
1240                         host->dev_ref_clk_ctrl_mmio =
1241                                         devm_ioremap_resource(dev, res);
1242                         if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
1243                                 host->dev_ref_clk_ctrl_mmio = NULL;
1244                         host->dev_ref_clk_en_mask = BIT(5);
1245                 }
1246         }
1247
1248         list_for_each_entry(clki, &hba->clk_list_head, list) {
1249                 if (!strcmp(clki->name, "core_clk_unipro"))
1250                         clki->keep_link_active = true;
1251         }
1252
1253         err = ufs_qcom_init_lane_clks(host);
1254         if (err)
1255                 goto out_variant_clear;
1256
1257         ufs_qcom_set_caps(hba);
1258         ufs_qcom_advertise_quirks(hba);
1259
1260         err = ufs_qcom_ice_init(host);
1261         if (err)
1262                 goto out_variant_clear;
1263
1264         ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1265
1266         if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1267                 ufs_qcom_hosts[hba->dev->id] = host;
1268
1269         ufs_qcom_get_default_testbus_cfg(host);
1270         err = ufs_qcom_testbus_config(host);
1271         if (err)
1272                 /* Failure is non-fatal */
1273                 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1274                                 __func__, err);
1275
1276         /*
1277          * Power up the PHY using the minimum supported gear (UFS_HS_G2).
1278          * Switching to max gear will be performed during reinit if supported.
1279          */
1280         host->hs_gear = UFS_HS_G2;
1281
1282         return 0;
1283
1284 out_variant_clear:
1285         ufshcd_set_variant(hba, NULL);
1286
1287         return err;
1288 }
1289
1290 static void ufs_qcom_exit(struct ufs_hba *hba)
1291 {
1292         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1293
1294         ufs_qcom_disable_lane_clks(host);
1295         phy_power_off(host->generic_phy);
1296         phy_exit(host->generic_phy);
1297 }
1298
1299 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1300                                                        u32 clk_cycles)
1301 {
1302         int err;
1303         u32 core_clk_ctrl_reg;
1304
1305         if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1306                 return -EINVAL;
1307
1308         err = ufshcd_dme_get(hba,
1309                             UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1310                             &core_clk_ctrl_reg);
1311         if (err)
1312                 return err;
1313
1314         core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1315         core_clk_ctrl_reg |= clk_cycles;
1316
1317         /* Clear CORE_CLK_DIV_EN */
1318         core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1319
1320         return ufshcd_dme_set(hba,
1321                             UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1322                             core_clk_ctrl_reg);
1323 }
1324
1325 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1326 {
1327         /* nothing to do as of now */
1328         return 0;
1329 }
1330
1331 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1332 {
1333         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1334
1335         if (!ufs_qcom_cap_qunipro(host))
1336                 return 0;
1337
1338         /* set unipro core clock cycles to 150 and clear clock divider */
1339         return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1340 }
1341
1342 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1343 {
1344         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1345         int err;
1346         u32 core_clk_ctrl_reg;
1347
1348         if (!ufs_qcom_cap_qunipro(host))
1349                 return 0;
1350
1351         err = ufshcd_dme_get(hba,
1352                             UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1353                             &core_clk_ctrl_reg);
1354
1355         /* make sure CORE_CLK_DIV_EN is cleared */
1356         if (!err &&
1357             (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1358                 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1359                 err = ufshcd_dme_set(hba,
1360                                     UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1361                                     core_clk_ctrl_reg);
1362         }
1363
1364         return err;
1365 }
1366
1367 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1368 {
1369         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1370
1371         if (!ufs_qcom_cap_qunipro(host))
1372                 return 0;
1373
1374         /* set unipro core clock cycles to 75 and clear clock divider */
1375         return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1376 }
1377
1378 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1379                 bool scale_up, enum ufs_notify_change_status status)
1380 {
1381         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1382         struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1383         int err = 0;
1384
1385         /* check the host controller state before sending hibern8 cmd */
1386         if (!ufshcd_is_hba_active(hba))
1387                 return 0;
1388
1389         if (status == PRE_CHANGE) {
1390                 err = ufshcd_uic_hibern8_enter(hba);
1391                 if (err)
1392                         return err;
1393                 if (scale_up)
1394                         err = ufs_qcom_clk_scale_up_pre_change(hba);
1395                 else
1396                         err = ufs_qcom_clk_scale_down_pre_change(hba);
1397                 if (err)
1398                         ufshcd_uic_hibern8_exit(hba);
1399
1400         } else {
1401                 if (scale_up)
1402                         err = ufs_qcom_clk_scale_up_post_change(hba);
1403                 else
1404                         err = ufs_qcom_clk_scale_down_post_change(hba);
1405
1406
1407                 if (err) {
1408                         ufshcd_uic_hibern8_exit(hba);
1409                         return err;
1410                 }
1411
1412                 ufs_qcom_cfg_timers(hba,
1413                                     dev_req_params->gear_rx,
1414                                     dev_req_params->pwr_rx,
1415                                     dev_req_params->hs_rate,
1416                                     false);
1417                 ufs_qcom_icc_update_bw(host);
1418                 ufshcd_uic_hibern8_exit(hba);
1419         }
1420
1421         return 0;
1422 }
1423
1424 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1425 {
1426         ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1427                         UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1428         ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1429 }
1430
1431 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1432 {
1433         /* provide a legal default configuration */
1434         host->testbus.select_major = TSTBUS_UNIPRO;
1435         host->testbus.select_minor = 37;
1436 }
1437
1438 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1439 {
1440         if (host->testbus.select_major >= TSTBUS_MAX) {
1441                 dev_err(host->hba->dev,
1442                         "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1443                         __func__, host->testbus.select_major);
1444                 return false;
1445         }
1446
1447         return true;
1448 }
1449
1450 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1451 {
1452         int reg;
1453         int offset;
1454         u32 mask = TEST_BUS_SUB_SEL_MASK;
1455
1456         if (!host)
1457                 return -EINVAL;
1458
1459         if (!ufs_qcom_testbus_cfg_is_ok(host))
1460                 return -EPERM;
1461
1462         switch (host->testbus.select_major) {
1463         case TSTBUS_UAWM:
1464                 reg = UFS_TEST_BUS_CTRL_0;
1465                 offset = 24;
1466                 break;
1467         case TSTBUS_UARM:
1468                 reg = UFS_TEST_BUS_CTRL_0;
1469                 offset = 16;
1470                 break;
1471         case TSTBUS_TXUC:
1472                 reg = UFS_TEST_BUS_CTRL_0;
1473                 offset = 8;
1474                 break;
1475         case TSTBUS_RXUC:
1476                 reg = UFS_TEST_BUS_CTRL_0;
1477                 offset = 0;
1478                 break;
1479         case TSTBUS_DFC:
1480                 reg = UFS_TEST_BUS_CTRL_1;
1481                 offset = 24;
1482                 break;
1483         case TSTBUS_TRLUT:
1484                 reg = UFS_TEST_BUS_CTRL_1;
1485                 offset = 16;
1486                 break;
1487         case TSTBUS_TMRLUT:
1488                 reg = UFS_TEST_BUS_CTRL_1;
1489                 offset = 8;
1490                 break;
1491         case TSTBUS_OCSC:
1492                 reg = UFS_TEST_BUS_CTRL_1;
1493                 offset = 0;
1494                 break;
1495         case TSTBUS_WRAPPER:
1496                 reg = UFS_TEST_BUS_CTRL_2;
1497                 offset = 16;
1498                 break;
1499         case TSTBUS_COMBINED:
1500                 reg = UFS_TEST_BUS_CTRL_2;
1501                 offset = 8;
1502                 break;
1503         case TSTBUS_UTP_HCI:
1504                 reg = UFS_TEST_BUS_CTRL_2;
1505                 offset = 0;
1506                 break;
1507         case TSTBUS_UNIPRO:
1508                 reg = UFS_UNIPRO_CFG;
1509                 offset = 20;
1510                 mask = 0xFFF;
1511                 break;
1512         /*
1513          * No need for a default case, since
1514          * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1515          * is legal
1516          */
1517         }
1518         mask <<= offset;
1519         ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1520                     (u32)host->testbus.select_major << 19,
1521                     REG_UFS_CFG1);
1522         ufshcd_rmwl(host->hba, mask,
1523                     (u32)host->testbus.select_minor << offset,
1524                     reg);
1525         ufs_qcom_enable_test_bus(host);
1526         /*
1527          * Make sure the test bus configuration is
1528          * committed before returning.
1529          */
1530         mb();
1531
1532         return 0;
1533 }
1534
1535 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1536 {
1537         u32 reg;
1538         struct ufs_qcom_host *host;
1539
1540         host = ufshcd_get_variant(hba);
1541
1542         ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1543                          "HCI Vendor Specific Registers ");
1544
1545         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1546         ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
1547
1548         reg = ufshcd_readl(hba, REG_UFS_CFG1);
1549         reg |= UTP_DBG_RAMS_EN;
1550         ufshcd_writel(hba, reg, REG_UFS_CFG1);
1551
1552         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1553         ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
1554
1555         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1556         ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
1557
1558         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1559         ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
1560
1561         /* clear bit 17 - UTP_DBG_RAMS_EN */
1562         ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1563
1564         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1565         ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
1566
1567         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1568         ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
1569
1570         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1571         ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
1572
1573         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1574         ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
1575
1576         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1577         ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
1578
1579         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1580         ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
1581
1582         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1583         ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
1584 }
1585
1586 /**
1587  * ufs_qcom_device_reset() - toggle the (optional) device reset line
1588  * @hba: per-adapter instance
1589  *
1590  * Toggles the (optional) reset line to reset the attached device.
1591  */
1592 static int ufs_qcom_device_reset(struct ufs_hba *hba)
1593 {
1594         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1595
1596         /* reset gpio is optional */
1597         if (!host->device_reset)
1598                 return -EOPNOTSUPP;
1599
1600         /*
1601          * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1602          * be on the safe side.
1603          */
1604         ufs_qcom_device_reset_ctrl(hba, true);
1605         usleep_range(10, 15);
1606
1607         ufs_qcom_device_reset_ctrl(hba, false);
1608         usleep_range(10, 15);
1609
1610         return 0;
1611 }
1612
1613 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
1614 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1615                                         struct devfreq_dev_profile *p,
1616                                         struct devfreq_simple_ondemand_data *d)
1617 {
1618         p->polling_ms = 60;
1619         p->timer = DEVFREQ_TIMER_DELAYED;
1620         d->upthreshold = 70;
1621         d->downdifferential = 5;
1622 }
1623 #else
1624 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1625                 struct devfreq_dev_profile *p,
1626                 struct devfreq_simple_ondemand_data *data)
1627 {
1628 }
1629 #endif
1630
1631 static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
1632 {
1633         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1634
1635         phy_power_off(host->generic_phy);
1636 }
1637
1638 /* Resources */
1639 static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
1640         {.name = "ufs_mem",},
1641         {.name = "mcq",},
1642         /* Submission Queue DAO */
1643         {.name = "mcq_sqd",},
1644         /* Submission Queue Interrupt Status */
1645         {.name = "mcq_sqis",},
1646         /* Completion Queue DAO */
1647         {.name = "mcq_cqd",},
1648         /* Completion Queue Interrupt Status */
1649         {.name = "mcq_cqis",},
1650         /* MCQ vendor specific */
1651         {.name = "mcq_vs",},
1652 };
1653
1654 static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
1655 {
1656         struct platform_device *pdev = to_platform_device(hba->dev);
1657         struct ufshcd_res_info *res;
1658         struct resource *res_mem, *res_mcq;
1659         int i, ret = 0;
1660
1661         memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
1662
1663         for (i = 0; i < RES_MAX; i++) {
1664                 res = &hba->res[i];
1665                 res->resource = platform_get_resource_byname(pdev,
1666                                                              IORESOURCE_MEM,
1667                                                              res->name);
1668                 if (!res->resource) {
1669                         dev_info(hba->dev, "Resource %s not provided\n", res->name);
1670                         if (i == RES_UFS)
1671                                 return -ENOMEM;
1672                         continue;
1673                 } else if (i == RES_UFS) {
1674                         res_mem = res->resource;
1675                         res->base = hba->mmio_base;
1676                         continue;
1677                 }
1678
1679                 res->base = devm_ioremap_resource(hba->dev, res->resource);
1680                 if (IS_ERR(res->base)) {
1681                         dev_err(hba->dev, "Failed to map res %s, err=%d\n",
1682                                          res->name, (int)PTR_ERR(res->base));
1683                         ret = PTR_ERR(res->base);
1684                         res->base = NULL;
1685                         return ret;
1686                 }
1687         }
1688
1689         /* MCQ resource provided in DT */
1690         res = &hba->res[RES_MCQ];
1691         /* Bail if MCQ resource is provided */
1692         if (res->base)
1693                 goto out;
1694
1695         /* Explicitly allocate MCQ resource from ufs_mem */
1696         res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
1697         if (!res_mcq)
1698                 return -ENOMEM;
1699
1700         res_mcq->start = res_mem->start +
1701                          MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
1702         res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
1703         res_mcq->flags = res_mem->flags;
1704         res_mcq->name = "mcq";
1705
1706         ret = insert_resource(&iomem_resource, res_mcq);
1707         if (ret) {
1708                 dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
1709                         ret);
1710                 return ret;
1711         }
1712
1713         res->base = devm_ioremap_resource(hba->dev, res_mcq);
1714         if (IS_ERR(res->base)) {
1715                 dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
1716                         (int)PTR_ERR(res->base));
1717                 ret = PTR_ERR(res->base);
1718                 goto ioremap_err;
1719         }
1720
1721 out:
1722         hba->mcq_base = res->base;
1723         return 0;
1724 ioremap_err:
1725         res->base = NULL;
1726         remove_resource(res_mcq);
1727         return ret;
1728 }
1729
1730 static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
1731 {
1732         struct ufshcd_res_info *mem_res, *sqdao_res;
1733         struct ufshcd_mcq_opr_info_t *opr;
1734         int i;
1735
1736         mem_res = &hba->res[RES_UFS];
1737         sqdao_res = &hba->res[RES_MCQ_SQD];
1738
1739         if (!mem_res->base || !sqdao_res->base)
1740                 return -EINVAL;
1741
1742         for (i = 0; i < OPR_MAX; i++) {
1743                 opr = &hba->mcq_opr[i];
1744                 opr->offset = sqdao_res->resource->start -
1745                               mem_res->resource->start + 0x40 * i;
1746                 opr->stride = 0x100;
1747                 opr->base = sqdao_res->base + 0x40 * i;
1748         }
1749
1750         return 0;
1751 }
1752
1753 static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
1754 {
1755         /* Qualcomm HC supports up to 64 */
1756         return MAX_SUPP_MAC;
1757 }
1758
1759 static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
1760                                         unsigned long *ocqs)
1761 {
1762         struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
1763
1764         if (!mcq_vs_res->base)
1765                 return -EINVAL;
1766
1767         *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
1768
1769         return 0;
1770 }
1771
1772 static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1773 {
1774         struct device *dev = msi_desc_to_dev(desc);
1775         struct ufs_hba *hba = dev_get_drvdata(dev);
1776
1777         ufshcd_mcq_config_esi(hba, msg);
1778 }
1779
1780 static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
1781 {
1782         struct msi_desc *desc = data;
1783         struct device *dev = msi_desc_to_dev(desc);
1784         struct ufs_hba *hba = dev_get_drvdata(dev);
1785         u32 id = desc->msi_index;
1786         struct ufs_hw_queue *hwq = &hba->uhq[id];
1787
1788         ufshcd_mcq_write_cqis(hba, 0x1, id);
1789         ufshcd_mcq_poll_cqe_lock(hba, hwq);
1790
1791         return IRQ_HANDLED;
1792 }
1793
1794 static int ufs_qcom_config_esi(struct ufs_hba *hba)
1795 {
1796         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1797         struct msi_desc *desc;
1798         struct msi_desc *failed_desc = NULL;
1799         int nr_irqs, ret;
1800
1801         if (host->esi_enabled)
1802                 return 0;
1803
1804         /*
1805          * 1. We only handle CQs as of now.
1806          * 2. Poll queues do not need ESI.
1807          */
1808         nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
1809         ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
1810                                              ufs_qcom_write_msi_msg);
1811         if (ret) {
1812                 dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
1813                 goto out;
1814         }
1815
1816         msi_lock_descs(hba->dev);
1817         msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
1818                 ret = devm_request_irq(hba->dev, desc->irq,
1819                                        ufs_qcom_mcq_esi_handler,
1820                                        IRQF_SHARED, "qcom-mcq-esi", desc);
1821                 if (ret) {
1822                         dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
1823                                 __func__, desc->irq, ret);
1824                         failed_desc = desc;
1825                         break;
1826                 }
1827         }
1828         msi_unlock_descs(hba->dev);
1829
1830         if (ret) {
1831                 /* Rewind */
1832                 msi_lock_descs(hba->dev);
1833                 msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
1834                         if (desc == failed_desc)
1835                                 break;
1836                         devm_free_irq(hba->dev, desc->irq, hba);
1837                 }
1838                 msi_unlock_descs(hba->dev);
1839                 platform_msi_domain_free_irqs(hba->dev);
1840         } else {
1841                 if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
1842                     host->hw_ver.step == 0) {
1843                         ufshcd_writel(hba,
1844                                       ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
1845                                       REG_UFS_CFG3);
1846                 }
1847                 ufshcd_mcq_enable_esi(hba);
1848         }
1849
1850 out:
1851         if (!ret)
1852                 host->esi_enabled = true;
1853
1854         return ret;
1855 }
1856
1857 /*
1858  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1859  *
1860  * The variant operations configure the necessary controller and PHY
1861  * handshake during initialization.
1862  */
1863 static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1864         .name                   = "qcom",
1865         .init                   = ufs_qcom_init,
1866         .exit                   = ufs_qcom_exit,
1867         .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
1868         .clk_scale_notify       = ufs_qcom_clk_scale_notify,
1869         .setup_clocks           = ufs_qcom_setup_clocks,
1870         .hce_enable_notify      = ufs_qcom_hce_enable_notify,
1871         .link_startup_notify    = ufs_qcom_link_startup_notify,
1872         .pwr_change_notify      = ufs_qcom_pwr_change_notify,
1873         .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
1874         .suspend                = ufs_qcom_suspend,
1875         .resume                 = ufs_qcom_resume,
1876         .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
1877         .device_reset           = ufs_qcom_device_reset,
1878         .config_scaling_param = ufs_qcom_config_scaling_param,
1879         .program_key            = ufs_qcom_ice_program_key,
1880         .reinit_notify          = ufs_qcom_reinit_notify,
1881         .mcq_config_resource    = ufs_qcom_mcq_config_resource,
1882         .get_hba_mac            = ufs_qcom_get_hba_mac,
1883         .op_runtime_config      = ufs_qcom_op_runtime_config,
1884         .get_outstanding_cqs    = ufs_qcom_get_outstanding_cqs,
1885         .config_esi             = ufs_qcom_config_esi,
1886 };
1887
1888 /**
1889  * ufs_qcom_probe - probe routine of the driver
1890  * @pdev: pointer to Platform device handle
1891  *
1892  * Return: zero for success and non-zero for failure.
1893  */
1894 static int ufs_qcom_probe(struct platform_device *pdev)
1895 {
1896         int err;
1897         struct device *dev = &pdev->dev;
1898
1899         /* Perform generic probe */
1900         err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1901         if (err)
1902                 return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
1903
1904         return 0;
1905 }
1906
1907 /**
1908  * ufs_qcom_remove - set driver_data of the device to NULL
1909  * @pdev: pointer to platform device handle
1910  *
1911  * Always returns 0
1912  */
1913 static int ufs_qcom_remove(struct platform_device *pdev)
1914 {
1915         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1916
1917         pm_runtime_get_sync(&(pdev)->dev);
1918         ufshcd_remove(hba);
1919         platform_msi_domain_free_irqs(hba->dev);
1920         return 0;
1921 }
1922
1923 static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
1924         { .compatible = "qcom,ufshc"},
1925         {},
1926 };
1927 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1928
1929 #ifdef CONFIG_ACPI
1930 static const struct acpi_device_id ufs_qcom_acpi_match[] = {
1931         { "QCOM24A5" },
1932         { },
1933 };
1934 MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
1935 #endif
1936
1937 static const struct dev_pm_ops ufs_qcom_pm_ops = {
1938         SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1939         .prepare         = ufshcd_suspend_prepare,
1940         .complete        = ufshcd_resume_complete,
1941 #ifdef CONFIG_PM_SLEEP
1942         .suspend         = ufshcd_system_suspend,
1943         .resume          = ufshcd_system_resume,
1944         .freeze          = ufshcd_system_freeze,
1945         .restore         = ufshcd_system_restore,
1946         .thaw            = ufshcd_system_thaw,
1947 #endif
1948 };
1949
1950 static struct platform_driver ufs_qcom_pltform = {
1951         .probe  = ufs_qcom_probe,
1952         .remove = ufs_qcom_remove,
1953         .driver = {
1954                 .name   = "ufshcd-qcom",
1955                 .pm     = &ufs_qcom_pm_ops,
1956                 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1957                 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
1958         },
1959 };
1960 module_platform_driver(ufs_qcom_pltform);
1961
1962 MODULE_LICENSE("GPL v2");