Commit | Line | Data |
---|---|---|
97fb5e8d | 1 | // SPDX-License-Identifier: GPL-2.0-only |
81c0fc51 | 2 | /* |
54b879b7 | 3 | * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. |
81c0fc51 YG |
4 | */ |
5 | ||
e1a7752c | 6 | #include <linux/acpi.h> |
3f06f780 BVA |
7 | #include <linux/clk.h> |
8 | #include <linux/delay.h> | |
be2e06c8 MS |
9 | #include <linux/devfreq.h> |
10 | #include <linux/gpio/consumer.h> | |
03ce80a1 | 11 | #include <linux/interconnect.h> |
3f06f780 | 12 | #include <linux/module.h> |
81c0fc51 | 13 | #include <linux/of.h> |
81c0fc51 | 14 | #include <linux/phy/phy.h> |
be2e06c8 | 15 | #include <linux/platform_device.h> |
12fd5f25 | 16 | #include <linux/reset-controller.h> |
be2e06c8 | 17 | #include <linux/time.h> |
4b9ad0b8 | 18 | |
56541c7c AV |
19 | #include <soc/qcom/ice.h> |
20 | ||
dd11376b | 21 | #include <ufs/ufshcd.h> |
dd11376b BVA |
22 | #include <ufs/ufshci.h> |
23 | #include <ufs/ufs_quirks.h> | |
be2e06c8 MS |
24 | #include <ufs/unipro.h> |
25 | #include "ufshcd-pltfrm.h" | |
26 | #include "ufs-qcom.h" | |
3f06f780 | 27 | |
c263b4ef AD |
28 | #define MCQ_QCFGPTR_MASK GENMASK(7, 0) |
29 | #define MCQ_QCFGPTR_UNIT 0x200 | |
30 | #define MCQ_SQATTR_OFFSET(c) \ | |
31 | ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT) | |
32 | #define MCQ_QCFG_SIZE 0x40 | |
33 | ||
6e3fd44d YG |
34 | enum { |
35 | TSTBUS_UAWM, | |
36 | TSTBUS_UARM, | |
37 | TSTBUS_TXUC, | |
38 | TSTBUS_RXUC, | |
39 | TSTBUS_DFC, | |
40 | TSTBUS_TRLUT, | |
41 | TSTBUS_TMRLUT, | |
42 | TSTBUS_OCSC, | |
43 | TSTBUS_UTP_HCI, | |
44 | TSTBUS_COMBINED, | |
45 | TSTBUS_WRAPPER, | |
46 | TSTBUS_UNIPRO, | |
47 | TSTBUS_MAX, | |
48 | }; | |
81c0fc51 | 49 | |
03ce80a1 MS |
50 | #define QCOM_UFS_MAX_GEAR 4 |
51 | #define QCOM_UFS_MAX_LANE 2 | |
52 | ||
53 | enum { | |
54 | MODE_MIN, | |
55 | MODE_PWM, | |
56 | MODE_HS_RA, | |
57 | MODE_HS_RB, | |
58 | MODE_MAX, | |
59 | }; | |
60 | ||
01e74715 | 61 | static const struct __ufs_qcom_bw_table { |
03ce80a1 MS |
62 | u32 mem_bw; |
63 | u32 cfg_bw; | |
64 | } ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = { | |
65 | [MODE_MIN][0][0] = { 0, 0 }, /* Bandwidth values in KB/s */ | |
66 | [MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922, 1000 }, | |
67 | [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 }, | |
68 | [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 }, | |
69 | [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 }, | |
70 | [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 }, | |
71 | [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 }, | |
72 | [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 }, | |
73 | [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 }, | |
74 | [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 }, | |
75 | [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 }, | |
76 | [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 }, | |
77 | [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 }, | |
78 | [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 }, | |
79 | [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 }, | |
80 | [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, | |
81 | [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, | |
82 | [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 }, | |
83 | [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 }, | |
84 | [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 }, | |
85 | [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 }, | |
86 | [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 }, | |
87 | [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 }, | |
88 | [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, | |
89 | [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, | |
90 | [MODE_MAX][0][0] = { 7643136, 307200 }, | |
91 | }; | |
92 | ||
6e3fd44d | 93 | static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); |
b4e13e1a | 94 | static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up); |
f06fcc71 | 95 | |
12fd5f25 EG |
96 | static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) |
97 | { | |
98 | return container_of(rcd, struct ufs_qcom_host, rcdev); | |
99 | } | |
100 | ||
56541c7c AV |
101 | #ifdef CONFIG_SCSI_UFS_CRYPTO |
102 | ||
103 | static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) | |
104 | { | |
105 | if (host->hba->caps & UFSHCD_CAP_CRYPTO) | |
106 | qcom_ice_enable(host->ice); | |
107 | } | |
108 | ||
109 | static int ufs_qcom_ice_init(struct ufs_qcom_host *host) | |
110 | { | |
111 | struct ufs_hba *hba = host->hba; | |
112 | struct device *dev = hba->dev; | |
113 | struct qcom_ice *ice; | |
114 | ||
115 | ice = of_qcom_ice_get(dev); | |
116 | if (ice == ERR_PTR(-EOPNOTSUPP)) { | |
117 | dev_warn(dev, "Disabling inline encryption support\n"); | |
118 | ice = NULL; | |
119 | } | |
120 | ||
121 | if (IS_ERR_OR_NULL(ice)) | |
122 | return PTR_ERR_OR_ZERO(ice); | |
123 | ||
124 | host->ice = ice; | |
125 | hba->caps |= UFSHCD_CAP_CRYPTO; | |
126 | ||
127 | return 0; | |
128 | } | |
129 | ||
130 | static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) | |
131 | { | |
132 | if (host->hba->caps & UFSHCD_CAP_CRYPTO) | |
133 | return qcom_ice_resume(host->ice); | |
134 | ||
135 | return 0; | |
136 | } | |
137 | ||
138 | static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) | |
139 | { | |
140 | if (host->hba->caps & UFSHCD_CAP_CRYPTO) | |
141 | return qcom_ice_suspend(host->ice); | |
142 | ||
143 | return 0; | |
144 | } | |
145 | ||
146 | static int ufs_qcom_ice_program_key(struct ufs_hba *hba, | |
147 | const union ufs_crypto_cfg_entry *cfg, | |
148 | int slot) | |
149 | { | |
150 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
151 | union ufs_crypto_cap_entry cap; | |
152 | bool config_enable = | |
153 | cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE; | |
154 | ||
155 | /* Only AES-256-XTS has been tested so far. */ | |
156 | cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; | |
157 | if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || | |
158 | cap.key_size != UFS_CRYPTO_KEY_SIZE_256) | |
3bf7ab4a | 159 | return -EOPNOTSUPP; |
56541c7c AV |
160 | |
161 | if (config_enable) | |
162 | return qcom_ice_program_key(host->ice, | |
163 | QCOM_ICE_CRYPTO_ALG_AES_XTS, | |
164 | QCOM_ICE_CRYPTO_KEY_SIZE_256, | |
165 | cfg->crypto_key, | |
166 | cfg->data_unit_size, slot); | |
167 | else | |
168 | return qcom_ice_evict_key(host->ice, slot); | |
169 | } | |
170 | ||
171 | #else | |
172 | ||
173 | #define ufs_qcom_ice_program_key NULL | |
174 | ||
175 | static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) | |
176 | { | |
177 | } | |
178 | ||
179 | static int ufs_qcom_ice_init(struct ufs_qcom_host *host) | |
180 | { | |
181 | return 0; | |
182 | } | |
183 | ||
184 | static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) | |
185 | { | |
186 | return 0; | |
187 | } | |
188 | ||
189 | static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) | |
190 | { | |
191 | return 0; | |
192 | } | |
193 | #endif | |
194 | ||
81c0fc51 YG |
195 | static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) |
196 | { | |
197 | if (!host->is_lane_clks_enabled) | |
198 | return; | |
199 | ||
9caef856 | 200 | clk_bulk_disable_unprepare(host->num_clks, host->clks); |
81c0fc51 YG |
201 | |
202 | host->is_lane_clks_enabled = false; | |
203 | } | |
204 | ||
205 | static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) | |
206 | { | |
031312db | 207 | int err; |
81c0fc51 | 208 | |
9caef856 | 209 | err = clk_bulk_prepare_enable(host->num_clks, host->clks); |
81c0fc51 | 210 | if (err) |
031312db | 211 | return err; |
81c0fc51 | 212 | |
81c0fc51 | 213 | host->is_lane_clks_enabled = true; |
031312db MS |
214 | |
215 | return 0; | |
81c0fc51 YG |
216 | } |
217 | ||
218 | static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) | |
219 | { | |
1f165c87 | 220 | int err; |
81c0fc51 YG |
221 | struct device *dev = host->hba->dev; |
222 | ||
e1a7752c LJ |
223 | if (has_acpi_companion(dev)) |
224 | return 0; | |
225 | ||
9caef856 MS |
226 | err = devm_clk_bulk_get_all(dev, &host->clks); |
227 | if (err <= 0) | |
031312db | 228 | return err; |
81c0fc51 | 229 | |
9caef856 | 230 | host->num_clks = err; |
031312db MS |
231 | |
232 | return 0; | |
81c0fc51 YG |
233 | } |
234 | ||
81c0fc51 YG |
235 | static int ufs_qcom_check_hibern8(struct ufs_hba *hba) |
236 | { | |
237 | int err; | |
1f165c87 | 238 | u32 tx_fsm_val; |
81c0fc51 YG |
239 | unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); |
240 | ||
241 | do { | |
242 | err = ufshcd_dme_get(hba, | |
f06fcc71 YG |
243 | UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, |
244 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), | |
245 | &tx_fsm_val); | |
81c0fc51 YG |
246 | if (err || tx_fsm_val == TX_FSM_HIBERN8) |
247 | break; | |
248 | ||
249 | /* sleep for max. 200us */ | |
250 | usleep_range(100, 200); | |
251 | } while (time_before(jiffies, timeout)); | |
252 | ||
253 | /* | |
254 | * we might have scheduled out for long during polling so | |
255 | * check the state again. | |
256 | */ | |
257 | if (time_after(jiffies, timeout)) | |
258 | err = ufshcd_dme_get(hba, | |
f06fcc71 YG |
259 | UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, |
260 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), | |
261 | &tx_fsm_val); | |
81c0fc51 YG |
262 | |
263 | if (err) { | |
264 | dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", | |
265 | __func__, err); | |
266 | } else if (tx_fsm_val != TX_FSM_HIBERN8) { | |
267 | err = tx_fsm_val; | |
268 | dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", | |
269 | __func__, err); | |
270 | } | |
271 | ||
272 | return err; | |
273 | } | |
274 | ||
f06fcc71 YG |
275 | static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) |
276 | { | |
104cd58d | 277 | ufshcd_rmwl(host->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1); |
9c02aa24 | 278 | |
c422fbd5 | 279 | if (host->hw_ver.major >= 0x05) |
9c02aa24 | 280 | ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0); |
f06fcc71 YG |
281 | } |
282 | ||
bc5b6816 | 283 | /* |
870b1279 CG |
284 | * ufs_qcom_host_reset - reset host controller and PHY |
285 | */ | |
286 | static int ufs_qcom_host_reset(struct ufs_hba *hba) | |
287 | { | |
1f165c87 | 288 | int ret; |
870b1279 | 289 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1f165c87 | 290 | bool reenable_intr; |
870b1279 | 291 | |
d42d3686 | 292 | if (!host->core_reset) |
031312db | 293 | return 0; |
870b1279 | 294 | |
4a791574 | 295 | reenable_intr = hba->is_irq_enabled; |
0ae7a027 | 296 | ufshcd_disable_irq(hba); |
4a791574 | 297 | |
870b1279 CG |
298 | ret = reset_control_assert(host->core_reset); |
299 | if (ret) { | |
300 | dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", | |
301 | __func__, ret); | |
031312db | 302 | return ret; |
870b1279 CG |
303 | } |
304 | ||
305 | /* | |
306 | * The hardware requirement for delay between assert/deassert | |
307 | * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to | |
308 | * ~125us (4/32768). To be on the safe side add 200us delay. | |
309 | */ | |
310 | usleep_range(200, 210); | |
311 | ||
312 | ret = reset_control_deassert(host->core_reset); | |
d1195471 | 313 | if (ret) { |
870b1279 CG |
314 | dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", |
315 | __func__, ret); | |
d1195471 MS |
316 | return ret; |
317 | } | |
870b1279 CG |
318 | |
319 | usleep_range(1000, 1100); | |
320 | ||
0ae7a027 MS |
321 | if (reenable_intr) |
322 | ufshcd_enable_irq(hba); | |
4a791574 | 323 | |
031312db | 324 | return 0; |
870b1279 CG |
325 | } |
326 | ||
c2709865 MS |
327 | static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba) |
328 | { | |
329 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
330 | ||
104cd58d | 331 | if (host->hw_ver.major >= 0x4) |
2c407fe9 | 332 | return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0)); |
c2709865 MS |
333 | |
334 | /* Default is HS-G3 */ | |
335 | return UFS_HS_G3; | |
336 | } | |
337 | ||
81c0fc51 YG |
338 | static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) |
339 | { | |
1ce5898a | 340 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
9d8528a8 | 341 | struct ufs_host_params *host_params = &host->host_params; |
81c0fc51 | 342 | struct phy *phy = host->generic_phy; |
9d8528a8 | 343 | enum phy_mode mode; |
031312db | 344 | int ret; |
81c0fc51 | 345 | |
9d8528a8 CG |
346 | /* |
347 | * HW ver 5 can only support up to HS-G5 Rate-A due to HW limitations. | |
348 | * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A, | |
349 | * so that the subsequent power mode change shall stick to Rate-A. | |
350 | */ | |
351 | if (host->hw_ver.major == 0x5) { | |
352 | if (host->phy_gear == UFS_HS_G5) | |
353 | host_params->hs_rate = PA_HS_MODE_A; | |
354 | else | |
355 | host_params->hs_rate = PA_HS_MODE_B; | |
356 | } | |
357 | ||
358 | mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A; | |
359 | ||
870b1279 CG |
360 | /* Reset UFS Host Controller and PHY */ |
361 | ret = ufs_qcom_host_reset(hba); | |
362 | if (ret) | |
d1195471 | 363 | return ret; |
870b1279 | 364 | |
052553af VG |
365 | /* phy initialization - calibrate the phy */ |
366 | ret = phy_init(phy); | |
81c0fc51 | 367 | if (ret) { |
052553af | 368 | dev_err(hba->dev, "%s: phy init failed, ret = %d\n", |
4b9ad0b8 | 369 | __func__, ret); |
031312db | 370 | return ret; |
81c0fc51 YG |
371 | } |
372 | ||
a68abdad CG |
373 | ret = phy_set_mode_ext(phy, mode, host->phy_gear); |
374 | if (ret) | |
375 | goto out_disable_phy; | |
baf5ddac | 376 | |
052553af VG |
377 | /* power on phy - start serdes and phy's power and clocks */ |
378 | ret = phy_power_on(phy); | |
81c0fc51 | 379 | if (ret) { |
052553af | 380 | dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", |
81c0fc51 | 381 | __func__, ret); |
052553af | 382 | goto out_disable_phy; |
81c0fc51 YG |
383 | } |
384 | ||
f06fcc71 YG |
385 | ufs_qcom_select_unipro_mode(host); |
386 | ||
052553af VG |
387 | return 0; |
388 | ||
389 | out_disable_phy: | |
052553af | 390 | phy_exit(phy); |
031312db | 391 | |
81c0fc51 YG |
392 | return ret; |
393 | } | |
394 | ||
395 | /* | |
396 | * The UTP controller has a number of internal clock gating cells (CGCs). | |
397 | * Internal hardware sub-modules within the UTP controller control the CGCs. | |
398 | * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved | |
399 | * in a specific operation, UTP controller CGCs are by default disabled and | |
400 | * this function enables them (after every UFS link startup) to save some power | |
401 | * leakage. | |
402 | */ | |
403 | static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) | |
404 | { | |
0e9f4375 MS |
405 | ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL, |
406 | REG_UFS_CFG2); | |
81c0fc51 YG |
407 | |
408 | /* Ensure that HW clock gating is enabled before next operations */ | |
d9488511 | 409 | ufshcd_readl(hba, REG_UFS_CFG2); |
81c0fc51 YG |
410 | } |
411 | ||
f06fcc71 YG |
412 | static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, |
413 | enum ufs_notify_change_status status) | |
81c0fc51 | 414 | { |
1ce5898a | 415 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1f165c87 | 416 | int err; |
81c0fc51 YG |
417 | |
418 | switch (status) { | |
419 | case PRE_CHANGE: | |
e430c0e0 MS |
420 | err = ufs_qcom_power_up_sequence(hba); |
421 | if (err) | |
422 | return err; | |
423 | ||
81c0fc51 YG |
424 | /* |
425 | * The PHY PLL output is the source of tx/rx lane symbol | |
426 | * clocks, hence, enable the lane clocks only after PHY | |
427 | * is initialized. | |
428 | */ | |
429 | err = ufs_qcom_enable_lane_clks(host); | |
430 | break; | |
431 | case POST_CHANGE: | |
432 | /* check if UFS PHY moved from DISABLED to HIBERN8 */ | |
433 | err = ufs_qcom_check_hibern8(hba); | |
434 | ufs_qcom_enable_hw_clk_gating(hba); | |
df4ec2fa | 435 | ufs_qcom_ice_enable(host); |
81c0fc51 YG |
436 | break; |
437 | default: | |
438 | dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); | |
439 | err = -EINVAL; | |
440 | break; | |
441 | } | |
442 | return err; | |
443 | } | |
444 | ||
fd915c67 NR |
445 | /** |
446 | * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers | |
447 | * | |
448 | * @hba: host controller instance | |
449 | * @gear: Current operating gear | |
450 | * @hs: current power mode | |
451 | * @rate: current operating rate (A or B) | |
452 | * @update_link_startup_timer: indicate if link_start ongoing | |
453 | * @is_pre_scale_up: flag to check if pre scale up condition. | |
3a17fefe | 454 | * Return: zero for success and non-zero in case of a failure. |
81c0fc51 | 455 | */ |
f06fcc71 | 456 | static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, |
fd915c67 NR |
457 | u32 hs, u32 rate, bool update_link_startup_timer, |
458 | bool is_pre_scale_up) | |
81c0fc51 | 459 | { |
1ce5898a | 460 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 | 461 | struct ufs_clk_info *clki; |
81c0fc51 | 462 | unsigned long core_clk_rate = 0; |
1f165c87 | 463 | u32 core_clk_cycles_per_us; |
81c0fc51 | 464 | |
81c7e06a | 465 | /* |
104cd58d | 466 | * UTP controller uses SYS1CLK_1US_REG register for Interrupt |
81c7e06a | 467 | * Aggregation logic. |
fd915c67 NR |
468 | * It is mandatory to write SYS1CLK_1US_REG register on UFS host |
469 | * controller V4.0.0 onwards. | |
470 | */ | |
104cd58d | 471 | if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) |
031312db | 472 | return 0; |
81c7e06a | 473 | |
81c0fc51 YG |
474 | if (gear == 0) { |
475 | dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); | |
031312db | 476 | return -EINVAL; |
81c0fc51 YG |
477 | } |
478 | ||
479 | list_for_each_entry(clki, &hba->clk_list_head, list) { | |
fd915c67 NR |
480 | if (!strcmp(clki->name, "core_clk")) { |
481 | if (is_pre_scale_up) | |
482 | core_clk_rate = clki->max_freq; | |
483 | else | |
484 | core_clk_rate = clk_get_rate(clki->clk); | |
485 | break; | |
486 | } | |
487 | ||
81c0fc51 YG |
488 | } |
489 | ||
490 | /* If frequency is smaller than 1MHz, set to 1MHz */ | |
491 | if (core_clk_rate < DEFAULT_CLK_RATE_HZ) | |
492 | core_clk_rate = DEFAULT_CLK_RATE_HZ; | |
493 | ||
494 | core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; | |
f06fcc71 YG |
495 | if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { |
496 | ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); | |
497 | /* | |
498 | * make sure above write gets applied before we return from | |
499 | * this function. | |
500 | */ | |
a862fafa | 501 | ufshcd_readl(hba, REG_UFS_SYS1CLK_1US); |
f06fcc71 YG |
502 | } |
503 | ||
031312db | 504 | return 0; |
81c0fc51 YG |
505 | } |
506 | ||
f06fcc71 YG |
507 | static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, |
508 | enum ufs_notify_change_status status) | |
81c0fc51 | 509 | { |
f06fcc71 | 510 | int err = 0; |
81c0fc51 YG |
511 | |
512 | switch (status) { | |
513 | case PRE_CHANGE: | |
f06fcc71 | 514 | if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, |
fd915c67 | 515 | 0, true, false)) { |
81c0fc51 YG |
516 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", |
517 | __func__); | |
031312db | 518 | return -EINVAL; |
81c0fc51 | 519 | } |
f06fcc71 | 520 | |
104cd58d MS |
521 | err = ufs_qcom_set_core_clk_ctrl(hba, true); |
522 | if (err) | |
523 | dev_err(hba->dev, "cfg core clk ctrl failed\n"); | |
4b9ad0b8 YG |
524 | /* |
525 | * Some UFS devices (and may be host) have issues if LCC is | |
526 | * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 | |
527 | * before link startup which will make sure that both host | |
528 | * and device TX LCC are disabled once link startup is | |
529 | * completed. | |
530 | */ | |
531 | if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) | |
984eaac1 | 532 | err = ufshcd_disable_host_tx_lcc(hba); |
4b9ad0b8 | 533 | |
81c0fc51 YG |
534 | break; |
535 | default: | |
536 | break; | |
537 | } | |
538 | ||
f06fcc71 | 539 | return err; |
81c0fc51 YG |
540 | } |
541 | ||
b61d0414 ZC |
542 | static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) |
543 | { | |
544 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
545 | ||
546 | /* reset gpio is optional */ | |
547 | if (!host->device_reset) | |
548 | return; | |
549 | ||
550 | gpiod_set_value_cansleep(host->device_reset, asserted); | |
551 | } | |
552 | ||
9561f584 PW |
553 | static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, |
554 | enum ufs_notify_change_status status) | |
81c0fc51 | 555 | { |
1ce5898a | 556 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 | 557 | struct phy *phy = host->generic_phy; |
81c0fc51 | 558 | |
9561f584 PW |
559 | if (status == PRE_CHANGE) |
560 | return 0; | |
561 | ||
81c0fc51 YG |
562 | if (ufs_qcom_is_link_off(hba)) { |
563 | /* | |
564 | * Disable the tx/rx lane symbol clocks before PHY is | |
565 | * powered down as the PLL source should be disabled | |
566 | * after downstream clocks are disabled. | |
567 | */ | |
568 | ufs_qcom_disable_lane_clks(host); | |
569 | phy_power_off(phy); | |
81c0fc51 | 570 | |
b61d0414 ZC |
571 | /* reset the connected UFS device during power down */ |
572 | ufs_qcom_device_reset_ctrl(hba, true); | |
573 | ||
3f6d1767 | 574 | } else if (!ufs_qcom_is_link_active(hba)) { |
f06fcc71 | 575 | ufs_qcom_disable_lane_clks(host); |
f06fcc71 | 576 | } |
81c0fc51 | 577 | |
56541c7c | 578 | return ufs_qcom_ice_suspend(host); |
81c0fc51 YG |
579 | } |
580 | ||
581 | static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) | |
582 | { | |
1ce5898a | 583 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
584 | struct phy *phy = host->generic_phy; |
585 | int err; | |
586 | ||
3f6d1767 EG |
587 | if (ufs_qcom_is_link_off(hba)) { |
588 | err = phy_power_on(phy); | |
589 | if (err) { | |
590 | dev_err(hba->dev, "%s: failed PHY power on: %d\n", | |
591 | __func__, err); | |
592 | return err; | |
593 | } | |
81c0fc51 | 594 | |
3f6d1767 EG |
595 | err = ufs_qcom_enable_lane_clks(host); |
596 | if (err) | |
597 | return err; | |
f06fcc71 | 598 | |
3f6d1767 EG |
599 | } else if (!ufs_qcom_is_link_active(hba)) { |
600 | err = ufs_qcom_enable_lane_clks(host); | |
601 | if (err) | |
602 | return err; | |
603 | } | |
81c0fc51 | 604 | |
bee40dc1 | 605 | return ufs_qcom_ice_resume(host); |
81c0fc51 YG |
606 | } |
607 | ||
f06fcc71 YG |
608 | static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) |
609 | { | |
610 | if (host->dev_ref_clk_ctrl_mmio && | |
611 | (enable ^ host->is_dev_ref_clk_enabled)) { | |
612 | u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); | |
613 | ||
614 | if (enable) | |
615 | temp |= host->dev_ref_clk_en_mask; | |
616 | else | |
617 | temp &= ~host->dev_ref_clk_en_mask; | |
618 | ||
619 | /* | |
620 | * If we are here to disable this clock it might be immediately | |
621 | * after entering into hibern8 in which case we need to make | |
1cbadd0c | 622 | * sure that device ref_clk is active for specific time after |
f06fcc71 YG |
623 | * hibern8 enter. |
624 | */ | |
1cbadd0c CG |
625 | if (!enable) { |
626 | unsigned long gating_wait; | |
627 | ||
628 | gating_wait = host->hba->dev_info.clk_gating_wait_us; | |
629 | if (!gating_wait) { | |
630 | udelay(1); | |
631 | } else { | |
632 | /* | |
633 | * bRefClkGatingWaitTime defines the minimum | |
634 | * time for which the reference clock is | |
635 | * required by device during transition from | |
636 | * HS-MODE to LS-MODE or HIBERN8 state. Give it | |
637 | * more delay to be on the safe side. | |
638 | */ | |
639 | gating_wait += 10; | |
640 | usleep_range(gating_wait, gating_wait + 10); | |
641 | } | |
642 | } | |
f06fcc71 YG |
643 | |
644 | writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); | |
645 | ||
8eecddfc MS |
646 | /* |
647 | * Make sure the write to ref_clk reaches the destination and | |
648 | * not stored in a Write Buffer (WB). | |
649 | */ | |
650 | readl(host->dev_ref_clk_ctrl_mmio); | |
f06fcc71 YG |
651 | |
652 | /* | |
653 | * If we call hibern8 exit after this, we need to make sure that | |
654 | * device ref_clk is stable for at least 1us before the hibern8 | |
655 | * exit command. | |
656 | */ | |
657 | if (enable) | |
658 | udelay(1); | |
659 | ||
660 | host->is_dev_ref_clk_enabled = enable; | |
661 | } | |
662 | } | |
663 | ||
03ce80a1 MS |
664 | static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw) |
665 | { | |
666 | struct device *dev = host->hba->dev; | |
667 | int ret; | |
668 | ||
669 | ret = icc_set_bw(host->icc_ddr, 0, mem_bw); | |
670 | if (ret < 0) { | |
671 | dev_err(dev, "failed to set bandwidth request: %d\n", ret); | |
672 | return ret; | |
673 | } | |
674 | ||
675 | ret = icc_set_bw(host->icc_cpu, 0, cfg_bw); | |
676 | if (ret < 0) { | |
677 | dev_err(dev, "failed to set bandwidth request: %d\n", ret); | |
678 | return ret; | |
679 | } | |
680 | ||
681 | return 0; | |
682 | } | |
683 | ||
684 | static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host) | |
685 | { | |
686 | struct ufs_pa_layer_attr *p = &host->dev_req_params; | |
687 | int gear = max_t(u32, p->gear_rx, p->gear_tx); | |
688 | int lane = max_t(u32, p->lane_rx, p->lane_tx); | |
689 | ||
690 | if (ufshcd_is_hs_mode(p)) { | |
691 | if (p->hs_rate == PA_HS_MODE_B) | |
692 | return ufs_qcom_bw_table[MODE_HS_RB][gear][lane]; | |
693 | else | |
694 | return ufs_qcom_bw_table[MODE_HS_RA][gear][lane]; | |
695 | } else { | |
696 | return ufs_qcom_bw_table[MODE_PWM][gear][lane]; | |
697 | } | |
698 | } | |
699 | ||
700 | static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host) | |
701 | { | |
702 | struct __ufs_qcom_bw_table bw_table; | |
703 | ||
704 | bw_table = ufs_qcom_get_bw_table(host); | |
705 | ||
706 | return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw); | |
707 | } | |
708 | ||
81c0fc51 | 709 | static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, |
f06fcc71 | 710 | enum ufs_notify_change_status status, |
81c0fc51 YG |
711 | struct ufs_pa_layer_attr *dev_max_params, |
712 | struct ufs_pa_layer_attr *dev_req_params) | |
713 | { | |
1ce5898a | 714 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
55820a7f | 715 | struct ufs_host_params *host_params = &host->host_params; |
81c0fc51 | 716 | int ret = 0; |
81c0fc51 | 717 | |
031312db MS |
718 | if (!dev_req_params) { |
719 | pr_err("%s: incoming dev_req_params is NULL\n", __func__); | |
720 | return -EINVAL; | |
721 | } | |
722 | ||
81c0fc51 YG |
723 | switch (status) { |
724 | case PRE_CHANGE: | |
55820a7f | 725 | ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params); |
81c0fc51 | 726 | if (ret) { |
1026f7d3 | 727 | dev_err(hba->dev, "%s: failed to determine capabilities\n", |
81c0fc51 | 728 | __func__); |
031312db | 729 | return ret; |
81c0fc51 YG |
730 | } |
731 | ||
fc88ca19 | 732 | /* |
743e1f59 CG |
733 | * During UFS driver probe, always update the PHY gear to match the negotiated |
734 | * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled, | |
735 | * the second init can program the optimal PHY settings. This allows one to start | |
736 | * the first init with either the minimum or the maximum support gear. | |
fc88ca19 | 737 | */ |
10a39667 EC |
738 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) { |
739 | /* | |
740 | * Skip REINIT if the negotiated gear matches with the | |
741 | * initial phy_gear. Otherwise, update the phy_gear to | |
742 | * program the optimal gear setting during REINIT. | |
743 | */ | |
744 | if (host->phy_gear == dev_req_params->gear_tx) | |
745 | hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; | |
746 | else | |
747 | host->phy_gear = dev_req_params->gear_tx; | |
748 | } | |
baf5ddac | 749 | |
f37aabcf YG |
750 | /* enable the device ref clock before changing to HS mode */ |
751 | if (!ufshcd_is_hs_mode(&hba->pwr_info) && | |
752 | ufshcd_is_hs_mode(dev_req_params)) | |
753 | ufs_qcom_dev_ref_clk_ctrl(host, true); | |
518b32f1 CG |
754 | |
755 | if (host->hw_ver.major >= 0x4) { | |
d9fa1e73 SC |
756 | ufshcd_dme_configure_adapt(hba, |
757 | dev_req_params->gear_tx, | |
758 | PA_INITIAL_ADAPT); | |
518b32f1 | 759 | } |
81c0fc51 YG |
760 | break; |
761 | case POST_CHANGE: | |
f06fcc71 | 762 | if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, |
81c0fc51 | 763 | dev_req_params->pwr_rx, |
fd915c67 | 764 | dev_req_params->hs_rate, false, false)) { |
81c0fc51 YG |
765 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", |
766 | __func__); | |
767 | /* | |
768 | * we return error code at the end of the routine, | |
769 | * but continue to configure UFS_PHY_TX_LANE_ENABLE | |
770 | * and bus voting as usual | |
771 | */ | |
772 | ret = -EINVAL; | |
773 | } | |
774 | ||
81c0fc51 YG |
775 | /* cache the power mode parameters to use internally */ |
776 | memcpy(&host->dev_req_params, | |
777 | dev_req_params, sizeof(*dev_req_params)); | |
f37aabcf | 778 | |
03ce80a1 MS |
779 | ufs_qcom_icc_update_bw(host); |
780 | ||
f37aabcf YG |
781 | /* disable the device ref clock if entered PWM mode */ |
782 | if (ufshcd_is_hs_mode(&hba->pwr_info) && | |
783 | !ufshcd_is_hs_mode(dev_req_params)) | |
784 | ufs_qcom_dev_ref_clk_ctrl(host, false); | |
81c0fc51 YG |
785 | break; |
786 | default: | |
787 | ret = -EINVAL; | |
788 | break; | |
789 | } | |
031312db | 790 | |
81c0fc51 YG |
791 | return ret; |
792 | } | |
793 | ||
56d4a186 SJ |
794 | static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) |
795 | { | |
796 | int err; | |
797 | u32 pa_vs_config_reg1; | |
798 | ||
799 | err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), | |
800 | &pa_vs_config_reg1); | |
801 | if (err) | |
031312db | 802 | return err; |
56d4a186 SJ |
803 | |
804 | /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ | |
031312db | 805 | return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), |
56d4a186 | 806 | (pa_vs_config_reg1 | (1 << 12))); |
56d4a186 SJ |
807 | } |
808 | ||
809 | static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) | |
810 | { | |
811 | int err = 0; | |
812 | ||
813 | if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) | |
814 | err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); | |
815 | ||
27ff2c60 CG |
816 | if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) |
817 | hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; | |
818 | ||
56d4a186 SJ |
819 | return err; |
820 | } | |
821 | ||
ae977587 YG |
822 | static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) |
823 | { | |
104cd58d | 824 | return ufshci_version(2, 0); |
ae977587 YG |
825 | } |
826 | ||
81c0fc51 YG |
827 | /** |
828 | * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks | |
829 | * @hba: host controller instance | |
830 | * | |
831 | * QCOM UFS host controller might have some non standard behaviours (quirks) | |
832 | * than what is specified by UFSHCI specification. Advertise all such | |
833 | * quirks to standard UFS host controller driver so standard takes them into | |
834 | * account. | |
835 | */ | |
836 | static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) | |
837 | { | |
1ce5898a | 838 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 | 839 | |
104cd58d | 840 | if (host->hw_ver.major == 0x2) |
ae977587 | 841 | hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; |
81c0fc51 | 842 | |
baf5ddac MS |
843 | if (host->hw_ver.major > 0x3) |
844 | hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; | |
cad2e03d | 845 | } |
f06fcc71 | 846 | |
0bd3cb89 CG |
847 | static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) |
848 | { | |
849 | struct ufs_host_params *host_params = &host->host_params; | |
dc7c948d | 850 | u32 val, dev_major; |
81c7e06a | 851 | |
883a8b45 AH |
852 | /* |
853 | * Default to powering up the PHY to the max gear possible, which is | |
854 | * backwards compatible with lower gears but not optimal from | |
855 | * a power usage point of view. After device negotiation, if the | |
856 | * gear is lower a reinit will be performed to program the PHY | |
857 | * to the ideal gear for this combo of controller and device. | |
858 | */ | |
0bd3cb89 | 859 | host->phy_gear = host_params->hs_tx_gear; |
2f018378 | 860 | |
dc7c948d BN |
861 | if (host->hw_ver.major < 0x4) { |
862 | /* | |
883a8b45 AH |
863 | * These controllers only have one PHY init sequence, |
864 | * let's power up the PHY using that (the minimum supported | |
865 | * gear, UFS_HS_G2). | |
dc7c948d | 866 | */ |
0bd3cb89 | 867 | host->phy_gear = UFS_HS_G2; |
dc7c948d BN |
868 | } else if (host->hw_ver.major >= 0x5) { |
869 | val = ufshcd_readl(host->hba, REG_UFS_DEBUG_SPARE_CFG); | |
870 | dev_major = FIELD_GET(UFS_DEV_VER_MAJOR_MASK, val); | |
baf5ddac | 871 | |
dc7c948d BN |
872 | /* |
873 | * Since the UFS device version is populated, let's remove the | |
874 | * REINIT quirk as the negotiated gear won't change during boot. | |
875 | * So there is no need to do reinit. | |
876 | */ | |
877 | if (dev_major != 0x0) | |
878 | host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; | |
879 | ||
880 | /* | |
881 | * For UFS 3.1 device and older, power up the PHY using HS-G4 | |
882 | * PHY gear to save power. | |
883 | */ | |
884 | if (dev_major > 0x0 && dev_major < 0x4) | |
885 | host->phy_gear = UFS_HS_G4; | |
886 | } | |
cad2e03d YG |
887 | } |
888 | ||
55820a7f | 889 | static void ufs_qcom_set_host_params(struct ufs_hba *hba) |
cad2e03d | 890 | { |
1ce5898a | 891 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
55820a7f CG |
892 | struct ufs_host_params *host_params = &host->host_params; |
893 | ||
894 | ufshcd_init_host_params(host_params); | |
cad2e03d | 895 | |
55820a7f CG |
896 | /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */ |
897 | host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba); | |
898 | } | |
899 | ||
cad2e03d YG |
900 | static void ufs_qcom_set_caps(struct ufs_hba *hba) |
901 | { | |
f06fcc71 | 902 | hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; |
87bd0501 | 903 | hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; |
f06fcc71 | 904 | hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; |
04ee8a01 | 905 | hba->caps |= UFSHCD_CAP_WB_EN; |
61906fd4 | 906 | hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; |
6f21d927 | 907 | hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; |
81c0fc51 YG |
908 | } |
909 | ||
f06fcc71 YG |
910 | /** |
911 | * ufs_qcom_setup_clocks - enables/disable clocks | |
912 | * @hba: host controller instance | |
913 | * @on: If true, enable clocks else disable them. | |
1e879e8f | 914 | * @status: PRE_CHANGE or POST_CHANGE notify |
f06fcc71 | 915 | * |
3a17fefe | 916 | * Return: 0 on success, non-zero on failure. |
f06fcc71 | 917 | */ |
1e879e8f SJ |
918 | static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, |
919 | enum ufs_notify_change_status status) | |
81c0fc51 | 920 | { |
1ce5898a | 921 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
922 | |
923 | /* | |
924 | * In case ufs_qcom_init() is not yet done, simply ignore. | |
925 | * This ufs_qcom_setup_clocks() shall be called from | |
926 | * ufs_qcom_init() after init is done. | |
927 | */ | |
928 | if (!host) | |
929 | return 0; | |
930 | ||
8240dd97 CG |
931 | switch (status) { |
932 | case PRE_CHANGE: | |
03ce80a1 MS |
933 | if (on) { |
934 | ufs_qcom_icc_update_bw(host); | |
935 | } else { | |
8240dd97 CG |
936 | if (!ufs_qcom_is_link_active(hba)) { |
937 | /* disable device ref_clk */ | |
938 | ufs_qcom_dev_ref_clk_ctrl(host, false); | |
939 | } | |
feb3d798 | 940 | } |
8240dd97 CG |
941 | break; |
942 | case POST_CHANGE: | |
943 | if (on) { | |
944 | /* enable the device ref clock for HS mode*/ | |
945 | if (ufshcd_is_hs_mode(&hba->pwr_info)) | |
946 | ufs_qcom_dev_ref_clk_ctrl(host, true); | |
03ce80a1 MS |
947 | } else { |
948 | ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw, | |
949 | ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw); | |
8240dd97 CG |
950 | } |
951 | break; | |
81c0fc51 YG |
952 | } |
953 | ||
c4adf171 | 954 | return 0; |
81c0fc51 YG |
955 | } |
956 | ||
12fd5f25 EG |
957 | static int |
958 | ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) | |
959 | { | |
960 | struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); | |
961 | ||
12fd5f25 EG |
962 | ufs_qcom_assert_reset(host->hba); |
963 | /* provide 1ms delay to let the reset pulse propagate. */ | |
964 | usleep_range(1000, 1100); | |
965 | return 0; | |
966 | } | |
967 | ||
968 | static int | |
969 | ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) | |
970 | { | |
971 | struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); | |
972 | ||
12fd5f25 EG |
973 | ufs_qcom_deassert_reset(host->hba); |
974 | ||
975 | /* | |
976 | * after reset deassertion, phy will need all ref clocks, | |
977 | * voltage, current to settle down before starting serdes. | |
978 | */ | |
979 | usleep_range(1000, 1100); | |
980 | return 0; | |
981 | } | |
982 | ||
983 | static const struct reset_control_ops ufs_qcom_reset_ops = { | |
984 | .assert = ufs_qcom_reset_assert, | |
985 | .deassert = ufs_qcom_reset_deassert, | |
986 | }; | |
987 | ||
03ce80a1 MS |
988 | static int ufs_qcom_icc_init(struct ufs_qcom_host *host) |
989 | { | |
990 | struct device *dev = host->hba->dev; | |
991 | int ret; | |
992 | ||
993 | host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr"); | |
994 | if (IS_ERR(host->icc_ddr)) | |
995 | return dev_err_probe(dev, PTR_ERR(host->icc_ddr), | |
996 | "failed to acquire interconnect path\n"); | |
997 | ||
998 | host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs"); | |
999 | if (IS_ERR(host->icc_cpu)) | |
1000 | return dev_err_probe(dev, PTR_ERR(host->icc_cpu), | |
1001 | "failed to acquire interconnect path\n"); | |
1002 | ||
1003 | /* | |
1004 | * Set Maximum bandwidth vote before initializing the UFS controller and | |
1005 | * device. Ideally, a minimal interconnect vote would suffice for the | |
1006 | * initialization, but a max vote would allow faster initialization. | |
1007 | */ | |
1008 | ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw, | |
1009 | ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw); | |
1010 | if (ret < 0) | |
1011 | return dev_err_probe(dev, ret, "failed to set bandwidth request\n"); | |
1012 | ||
1013 | return 0; | |
1014 | } | |
1015 | ||
81c0fc51 YG |
1016 | /** |
1017 | * ufs_qcom_init - bind phy with controller | |
1018 | * @hba: host controller instance | |
1019 | * | |
1020 | * Binds PHY with controller and powers up PHY enabling clocks | |
1021 | * and regulators. | |
1022 | * | |
3a17fefe | 1023 | * Return: -EPROBE_DEFER if binding fails, returns negative error |
81c0fc51 YG |
1024 | * on phy power up failure and returns zero on success. |
1025 | */ | |
1026 | static int ufs_qcom_init(struct ufs_hba *hba) | |
1027 | { | |
1028 | int err; | |
1029 | struct device *dev = hba->dev; | |
1030 | struct ufs_qcom_host *host; | |
96f08cc5 | 1031 | struct ufs_clk_info *clki; |
81c0fc51 | 1032 | |
81c0fc51 | 1033 | host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); |
8291652e | 1034 | if (!host) |
031312db | 1035 | return -ENOMEM; |
81c0fc51 | 1036 | |
f06fcc71 | 1037 | /* Make a two way bind between the qcom host and the hba */ |
81c0fc51 | 1038 | host->hba = hba; |
1ce5898a | 1039 | ufshcd_set_variant(hba, host); |
81c0fc51 | 1040 | |
223b17ed MS |
1041 | /* Setup the optional reset control of HCI */ |
1042 | host->core_reset = devm_reset_control_get_optional(hba->dev, "rst"); | |
870b1279 | 1043 | if (IS_ERR(host->core_reset)) { |
223b17ed MS |
1044 | err = dev_err_probe(dev, PTR_ERR(host->core_reset), |
1045 | "Failed to get reset control\n"); | |
1046 | goto out_variant_clear; | |
870b1279 CG |
1047 | } |
1048 | ||
12fd5f25 EG |
1049 | /* Fire up the reset controller. Failure here is non-fatal. */ |
1050 | host->rcdev.of_node = dev->of_node; | |
1051 | host->rcdev.ops = &ufs_qcom_reset_ops; | |
1052 | host->rcdev.owner = dev->driver->owner; | |
1053 | host->rcdev.nr_resets = 1; | |
1054 | err = devm_reset_controller_register(dev, &host->rcdev); | |
031312db | 1055 | if (err) |
12fd5f25 | 1056 | dev_warn(dev, "Failed to register reset controller\n"); |
12fd5f25 | 1057 | |
c9ed9a6c MS |
1058 | if (!has_acpi_companion(dev)) { |
1059 | host->generic_phy = devm_phy_get(dev, "ufsphy"); | |
1060 | if (IS_ERR(host->generic_phy)) { | |
1061 | err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n"); | |
e1a7752c LJ |
1062 | goto out_variant_clear; |
1063 | } | |
81c0fc51 YG |
1064 | } |
1065 | ||
03ce80a1 MS |
1066 | err = ufs_qcom_icc_init(host); |
1067 | if (err) | |
1068 | goto out_variant_clear; | |
1069 | ||
b8416b2f BA |
1070 | host->device_reset = devm_gpiod_get_optional(dev, "reset", |
1071 | GPIOD_OUT_HIGH); | |
1072 | if (IS_ERR(host->device_reset)) { | |
c7afadac MS |
1073 | err = dev_err_probe(dev, PTR_ERR(host->device_reset), |
1074 | "Failed to acquire device reset gpio\n"); | |
b8416b2f BA |
1075 | goto out_variant_clear; |
1076 | } | |
1077 | ||
bfdbe8ba YG |
1078 | ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, |
1079 | &host->hw_ver.minor, &host->hw_ver.step); | |
1080 | ||
104cd58d MS |
1081 | host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; |
1082 | host->dev_ref_clk_en_mask = BIT(26); | |
f06fcc71 | 1083 | |
96f08cc5 CG |
1084 | list_for_each_entry(clki, &hba->clk_list_head, list) { |
1085 | if (!strcmp(clki->name, "core_clk_unipro")) | |
1086 | clki->keep_link_active = true; | |
1087 | } | |
1088 | ||
81c0fc51 YG |
1089 | err = ufs_qcom_init_lane_clks(host); |
1090 | if (err) | |
052553af | 1091 | goto out_variant_clear; |
81c0fc51 | 1092 | |
cad2e03d | 1093 | ufs_qcom_set_caps(hba); |
81c0fc51 | 1094 | ufs_qcom_advertise_quirks(hba); |
55820a7f | 1095 | ufs_qcom_set_host_params(hba); |
0bd3cb89 | 1096 | ufs_qcom_set_phy_gear(host); |
81c0fc51 | 1097 | |
df4ec2fa EB |
1098 | err = ufs_qcom_ice_init(host); |
1099 | if (err) | |
1100 | goto out_variant_clear; | |
1101 | ||
1e879e8f | 1102 | ufs_qcom_setup_clocks(hba, true, POST_CHANGE); |
81c0fc51 | 1103 | |
6e3fd44d YG |
1104 | ufs_qcom_get_default_testbus_cfg(host); |
1105 | err = ufs_qcom_testbus_config(host); | |
031312db MS |
1106 | if (err) |
1107 | /* Failure is non-fatal */ | |
6e3fd44d YG |
1108 | dev_warn(dev, "%s: failed to configure the testbus %d\n", |
1109 | __func__, err); | |
6e3fd44d | 1110 | |
031312db | 1111 | return 0; |
81c0fc51 | 1112 | |
a6854dff | 1113 | out_variant_clear: |
1ce5898a | 1114 | ufshcd_set_variant(hba, NULL); |
031312db | 1115 | |
81c0fc51 YG |
1116 | return err; |
1117 | } | |
1118 | ||
1119 | static void ufs_qcom_exit(struct ufs_hba *hba) | |
1120 | { | |
1ce5898a | 1121 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
1122 | |
1123 | ufs_qcom_disable_lane_clks(host); | |
1124 | phy_power_off(host->generic_phy); | |
d7fe6b66 | 1125 | phy_exit(host->generic_phy); |
81c0fc51 YG |
1126 | } |
1127 | ||
a53dfc00 NR |
1128 | /** |
1129 | * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles | |
1130 | * | |
1131 | * @hba: host controller instance | |
1132 | * @cycles_in_1us: No of cycles in 1us to be configured | |
1133 | * | |
1134 | * Returns error if dme get/set configuration for 40ns fails | |
1135 | * and returns zero on success. | |
1136 | */ | |
1137 | static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, | |
1138 | u32 cycles_in_1us) | |
1139 | { | |
1140 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
1141 | u32 cycles_in_40ns; | |
1142 | u32 reg; | |
1143 | int err; | |
1144 | ||
1145 | /* | |
1146 | * UFS host controller V4.0.0 onwards needs to program | |
1147 | * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed | |
1148 | * frequency of unipro core clk of UFS host controller. | |
1149 | */ | |
1150 | if (host->hw_ver.major < 4) | |
1151 | return 0; | |
1152 | ||
1153 | /* | |
1154 | * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not | |
1155 | * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will | |
1156 | * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware | |
1157 | * specification expect to be 16. Hence use exact hardware spec | |
1158 | * mandated value for cycles_in_40ns instead of calculating using | |
1159 | * generic formulae. | |
1160 | */ | |
1161 | switch (cycles_in_1us) { | |
1162 | case UNIPRO_CORE_CLK_FREQ_403_MHZ: | |
1163 | cycles_in_40ns = 16; | |
1164 | break; | |
1165 | case UNIPRO_CORE_CLK_FREQ_300_MHZ: | |
1166 | cycles_in_40ns = 12; | |
1167 | break; | |
1168 | case UNIPRO_CORE_CLK_FREQ_201_5_MHZ: | |
1169 | cycles_in_40ns = 8; | |
1170 | break; | |
1171 | case UNIPRO_CORE_CLK_FREQ_150_MHZ: | |
1172 | cycles_in_40ns = 6; | |
1173 | break; | |
1174 | case UNIPRO_CORE_CLK_FREQ_100_MHZ: | |
1175 | cycles_in_40ns = 4; | |
1176 | break; | |
1177 | case UNIPRO_CORE_CLK_FREQ_75_MHZ: | |
1178 | cycles_in_40ns = 3; | |
1179 | break; | |
1180 | case UNIPRO_CORE_CLK_FREQ_37_5_MHZ: | |
1181 | cycles_in_40ns = 2; | |
1182 | break; | |
1183 | default: | |
1184 | dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n", | |
1185 | cycles_in_1us); | |
1186 | return -EINVAL; | |
1187 | } | |
1188 | ||
1189 | err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), ®); | |
1190 | if (err) | |
1191 | return err; | |
1192 | ||
1193 | reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK; | |
1194 | reg |= cycles_in_40ns; | |
1195 | ||
1196 | return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg); | |
1197 | } | |
1198 | ||
b4e13e1a | 1199 | static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up) |
f06fcc71 | 1200 | { |
07d2290f | 1201 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
b4e13e1a NR |
1202 | struct list_head *head = &hba->clk_list_head; |
1203 | struct ufs_clk_info *clki; | |
3b60f456 | 1204 | u32 cycles_in_1us = 0; |
f06fcc71 | 1205 | u32 core_clk_ctrl_reg; |
b4e13e1a NR |
1206 | int err; |
1207 | ||
1208 | list_for_each_entry(clki, head, list) { | |
1209 | if (!IS_ERR_OR_NULL(clki->clk) && | |
1210 | !strcmp(clki->name, "core_clk_unipro")) { | |
1211 | if (is_scale_up) | |
1212 | cycles_in_1us = ceil(clki->max_freq, (1000 * 1000)); | |
1213 | else | |
1214 | cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000)); | |
1215 | break; | |
1216 | } | |
1217 | } | |
f06fcc71 | 1218 | |
f06fcc71 YG |
1219 | err = ufshcd_dme_get(hba, |
1220 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), | |
1221 | &core_clk_ctrl_reg); | |
1222 | if (err) | |
031312db | 1223 | return err; |
f06fcc71 | 1224 | |
07d2290f NR |
1225 | /* Bit mask is different for UFS host controller V4.0.0 onwards */ |
1226 | if (host->hw_ver.major >= 4) { | |
b4e13e1a | 1227 | if (!FIELD_FIT(CLK_1US_CYCLES_MASK_V4, cycles_in_1us)) |
07d2290f NR |
1228 | return -ERANGE; |
1229 | core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4; | |
b4e13e1a | 1230 | core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us); |
07d2290f | 1231 | } else { |
b4e13e1a | 1232 | if (!FIELD_FIT(CLK_1US_CYCLES_MASK, cycles_in_1us)) |
07d2290f NR |
1233 | return -ERANGE; |
1234 | core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK; | |
b4e13e1a | 1235 | core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us); |
07d2290f | 1236 | } |
f06fcc71 YG |
1237 | |
1238 | /* Clear CORE_CLK_DIV_EN */ | |
1239 | core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; | |
1240 | ||
a53dfc00 | 1241 | err = ufshcd_dme_set(hba, |
f06fcc71 YG |
1242 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), |
1243 | core_clk_ctrl_reg); | |
a53dfc00 NR |
1244 | if (err) |
1245 | return err; | |
1246 | ||
1247 | /* Configure unipro core clk 40ns attribute */ | |
1248 | return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us); | |
f06fcc71 YG |
1249 | } |
1250 | ||
1251 | static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) | |
f06fcc71 YG |
1252 | { |
1253 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
fd915c67 NR |
1254 | struct ufs_pa_layer_attr *attr = &host->dev_req_params; |
1255 | int ret; | |
f06fcc71 | 1256 | |
b6f2e063 DC |
1257 | ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx, |
1258 | attr->hs_rate, false, true); | |
1259 | if (ret) { | |
1260 | dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__); | |
1261 | return ret; | |
fd915c67 | 1262 | } |
b4e13e1a NR |
1263 | /* set unipro core clock attributes and clear clock divider */ |
1264 | return ufs_qcom_set_core_clk_ctrl(hba, true); | |
f06fcc71 YG |
1265 | } |
1266 | ||
3091181b NR |
1267 | static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) |
1268 | { | |
1269 | return 0; | |
1270 | } | |
1271 | ||
f06fcc71 YG |
1272 | static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) |
1273 | { | |
f06fcc71 YG |
1274 | int err; |
1275 | u32 core_clk_ctrl_reg; | |
1276 | ||
f06fcc71 YG |
1277 | err = ufshcd_dme_get(hba, |
1278 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), | |
1279 | &core_clk_ctrl_reg); | |
1280 | ||
1281 | /* make sure CORE_CLK_DIV_EN is cleared */ | |
1282 | if (!err && | |
1283 | (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { | |
1284 | core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; | |
1285 | err = ufshcd_dme_set(hba, | |
1286 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), | |
1287 | core_clk_ctrl_reg); | |
1288 | } | |
1289 | ||
1290 | return err; | |
1291 | } | |
1292 | ||
1293 | static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) | |
1294 | { | |
b4e13e1a NR |
1295 | /* set unipro core clock attributes and clear clock divider */ |
1296 | return ufs_qcom_set_core_clk_ctrl(hba, false); | |
f06fcc71 YG |
1297 | } |
1298 | ||
1299 | static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, | |
1300 | bool scale_up, enum ufs_notify_change_status status) | |
81c0fc51 | 1301 | { |
1ce5898a | 1302 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1f165c87 | 1303 | int err; |
81c0fc51 | 1304 | |
21f04fb4 NR |
1305 | /* check the host controller state before sending hibern8 cmd */ |
1306 | if (!ufshcd_is_hba_active(hba)) | |
1307 | return 0; | |
1308 | ||
f06fcc71 | 1309 | if (status == PRE_CHANGE) { |
a0cea833 AD |
1310 | err = ufshcd_uic_hibern8_enter(hba); |
1311 | if (err) | |
1312 | return err; | |
f06fcc71 YG |
1313 | if (scale_up) |
1314 | err = ufs_qcom_clk_scale_up_pre_change(hba); | |
1315 | else | |
1316 | err = ufs_qcom_clk_scale_down_pre_change(hba); | |
a0cea833 | 1317 | |
9264fd61 CL |
1318 | if (err) { |
1319 | ufshcd_uic_hibern8_exit(hba); | |
1320 | return err; | |
1321 | } | |
f06fcc71 YG |
1322 | } else { |
1323 | if (scale_up) | |
1324 | err = ufs_qcom_clk_scale_up_post_change(hba); | |
1325 | else | |
1326 | err = ufs_qcom_clk_scale_down_post_change(hba); | |
1327 | ||
a0cea833 | 1328 | |
fa8d3272 | 1329 | if (err) { |
a0cea833 | 1330 | ufshcd_uic_hibern8_exit(hba); |
031312db | 1331 | return err; |
a0cea833 | 1332 | } |
f06fcc71 | 1333 | |
03ce80a1 | 1334 | ufs_qcom_icc_update_bw(host); |
a0cea833 | 1335 | ufshcd_uic_hibern8_exit(hba); |
f06fcc71 YG |
1336 | } |
1337 | ||
031312db | 1338 | return 0; |
6e3fd44d YG |
1339 | } |
1340 | ||
eba5ed35 YG |
1341 | static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) |
1342 | { | |
e4ce23fb AH |
1343 | ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, |
1344 | UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); | |
1345 | ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); | |
eba5ed35 YG |
1346 | } |
1347 | ||
6e3fd44d YG |
1348 | static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) |
1349 | { | |
1350 | /* provide a legal default configuration */ | |
9c46b867 VG |
1351 | host->testbus.select_major = TSTBUS_UNIPRO; |
1352 | host->testbus.select_minor = 37; | |
6e3fd44d YG |
1353 | } |
1354 | ||
1355 | static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) | |
1356 | { | |
1357 | if (host->testbus.select_major >= TSTBUS_MAX) { | |
1358 | dev_err(host->hba->dev, | |
1359 | "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", | |
1360 | __func__, host->testbus.select_major); | |
1361 | return false; | |
1362 | } | |
1363 | ||
6e3fd44d YG |
1364 | return true; |
1365 | } | |
1366 | ||
1367 | int ufs_qcom_testbus_config(struct ufs_qcom_host *host) | |
1368 | { | |
1369 | int reg; | |
1370 | int offset; | |
1371 | u32 mask = TEST_BUS_SUB_SEL_MASK; | |
1372 | ||
1373 | if (!host) | |
1374 | return -EINVAL; | |
81c0fc51 | 1375 | |
6e3fd44d YG |
1376 | if (!ufs_qcom_testbus_cfg_is_ok(host)) |
1377 | return -EPERM; | |
1378 | ||
1379 | switch (host->testbus.select_major) { | |
1380 | case TSTBUS_UAWM: | |
1381 | reg = UFS_TEST_BUS_CTRL_0; | |
1382 | offset = 24; | |
1383 | break; | |
1384 | case TSTBUS_UARM: | |
1385 | reg = UFS_TEST_BUS_CTRL_0; | |
1386 | offset = 16; | |
1387 | break; | |
1388 | case TSTBUS_TXUC: | |
1389 | reg = UFS_TEST_BUS_CTRL_0; | |
1390 | offset = 8; | |
1391 | break; | |
1392 | case TSTBUS_RXUC: | |
1393 | reg = UFS_TEST_BUS_CTRL_0; | |
1394 | offset = 0; | |
1395 | break; | |
1396 | case TSTBUS_DFC: | |
1397 | reg = UFS_TEST_BUS_CTRL_1; | |
1398 | offset = 24; | |
1399 | break; | |
1400 | case TSTBUS_TRLUT: | |
1401 | reg = UFS_TEST_BUS_CTRL_1; | |
1402 | offset = 16; | |
1403 | break; | |
1404 | case TSTBUS_TMRLUT: | |
1405 | reg = UFS_TEST_BUS_CTRL_1; | |
1406 | offset = 8; | |
1407 | break; | |
1408 | case TSTBUS_OCSC: | |
1409 | reg = UFS_TEST_BUS_CTRL_1; | |
1410 | offset = 0; | |
1411 | break; | |
1412 | case TSTBUS_WRAPPER: | |
1413 | reg = UFS_TEST_BUS_CTRL_2; | |
1414 | offset = 16; | |
1415 | break; | |
1416 | case TSTBUS_COMBINED: | |
1417 | reg = UFS_TEST_BUS_CTRL_2; | |
1418 | offset = 8; | |
1419 | break; | |
1420 | case TSTBUS_UTP_HCI: | |
1421 | reg = UFS_TEST_BUS_CTRL_2; | |
1422 | offset = 0; | |
1423 | break; | |
1424 | case TSTBUS_UNIPRO: | |
1425 | reg = UFS_UNIPRO_CFG; | |
9c46b867 VG |
1426 | offset = 20; |
1427 | mask = 0xFFF; | |
6e3fd44d YG |
1428 | break; |
1429 | /* | |
1430 | * No need for a default case, since | |
1431 | * ufs_qcom_testbus_cfg_is_ok() checks that the configuration | |
1432 | * is legal | |
1433 | */ | |
1434 | } | |
1435 | mask <<= offset; | |
6e3fd44d YG |
1436 | ufshcd_rmwl(host->hba, TEST_BUS_SEL, |
1437 | (u32)host->testbus.select_major << 19, | |
1438 | REG_UFS_CFG1); | |
1439 | ufshcd_rmwl(host->hba, mask, | |
1440 | (u32)host->testbus.select_minor << offset, | |
1441 | reg); | |
eba5ed35 | 1442 | ufs_qcom_enable_test_bus(host); |
6e3fd44d YG |
1443 | |
1444 | return 0; | |
81c0fc51 YG |
1445 | } |
1446 | ||
6e3fd44d YG |
1447 | static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) |
1448 | { | |
50a427a0 AH |
1449 | u32 reg; |
1450 | struct ufs_qcom_host *host; | |
1451 | ||
1452 | host = ufshcd_get_variant(hba); | |
1453 | ||
ba80917d TW |
1454 | ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, |
1455 | "HCI Vendor Specific Registers "); | |
6e3fd44d | 1456 | |
50a427a0 AH |
1457 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); |
1458 | ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC "); | |
1459 | ||
1460 | reg = ufshcd_readl(hba, REG_UFS_CFG1); | |
1461 | reg |= UTP_DBG_RAMS_EN; | |
1462 | ufshcd_writel(hba, reg, REG_UFS_CFG1); | |
1463 | ||
1464 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); | |
1465 | ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM "); | |
1466 | ||
1467 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); | |
1468 | ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM "); | |
1469 | ||
1470 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); | |
1471 | ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM "); | |
1472 | ||
1473 | /* clear bit 17 - UTP_DBG_RAMS_EN */ | |
1474 | ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); | |
1475 | ||
1476 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); | |
1477 | ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM "); | |
1478 | ||
1479 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); | |
1480 | ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM "); | |
1481 | ||
1482 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); | |
1483 | ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC "); | |
1484 | ||
1485 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); | |
1486 | ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC "); | |
1487 | ||
1488 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); | |
1489 | ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC "); | |
1490 | ||
1491 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); | |
1492 | ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT "); | |
1493 | ||
1494 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); | |
1495 | ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT "); | |
6e3fd44d | 1496 | } |
eba5ed35 | 1497 | |
b8416b2f BA |
1498 | /** |
1499 | * ufs_qcom_device_reset() - toggle the (optional) device reset line | |
1500 | * @hba: per-adapter instance | |
1501 | * | |
1502 | * Toggles the (optional) reset line to reset the attached device. | |
1503 | */ | |
151f1b66 | 1504 | static int ufs_qcom_device_reset(struct ufs_hba *hba) |
b8416b2f BA |
1505 | { |
1506 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
1507 | ||
1508 | /* reset gpio is optional */ | |
1509 | if (!host->device_reset) | |
151f1b66 | 1510 | return -EOPNOTSUPP; |
b8416b2f BA |
1511 | |
1512 | /* | |
1513 | * The UFS device shall detect reset pulses of 1us, sleep for 10us to | |
1514 | * be on the safe side. | |
1515 | */ | |
b61d0414 | 1516 | ufs_qcom_device_reset_ctrl(hba, true); |
b8416b2f BA |
1517 | usleep_range(10, 15); |
1518 | ||
b61d0414 | 1519 | ufs_qcom_device_reset_ctrl(hba, false); |
b8416b2f | 1520 | usleep_range(10, 15); |
151f1b66 AH |
1521 | |
1522 | return 0; | |
b8416b2f BA |
1523 | } |
1524 | ||
80b21006 AD |
1525 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) |
1526 | static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, | |
c906e832 BVA |
1527 | struct devfreq_dev_profile *p, |
1528 | struct devfreq_simple_ondemand_data *d) | |
80b21006 | 1529 | { |
80b21006 | 1530 | p->polling_ms = 60; |
0645ab15 | 1531 | p->timer = DEVFREQ_TIMER_DELAYED; |
80b21006 AD |
1532 | d->upthreshold = 70; |
1533 | d->downdifferential = 5; | |
1534 | } | |
1535 | #else | |
1536 | static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, | |
c906e832 BVA |
1537 | struct devfreq_dev_profile *p, |
1538 | struct devfreq_simple_ondemand_data *data) | |
80b21006 AD |
1539 | { |
1540 | } | |
1541 | #endif | |
1542 | ||
baf5ddac MS |
1543 | static void ufs_qcom_reinit_notify(struct ufs_hba *hba) |
1544 | { | |
1545 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
1546 | ||
1547 | phy_power_off(host->generic_phy); | |
1548 | } | |
1549 | ||
c263b4ef AD |
1550 | /* Resources */ |
1551 | static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { | |
1552 | {.name = "ufs_mem",}, | |
1553 | {.name = "mcq",}, | |
1554 | /* Submission Queue DAO */ | |
1555 | {.name = "mcq_sqd",}, | |
1556 | /* Submission Queue Interrupt Status */ | |
1557 | {.name = "mcq_sqis",}, | |
1558 | /* Completion Queue DAO */ | |
1559 | {.name = "mcq_cqd",}, | |
1560 | /* Completion Queue Interrupt Status */ | |
1561 | {.name = "mcq_cqis",}, | |
1562 | /* MCQ vendor specific */ | |
1563 | {.name = "mcq_vs",}, | |
1564 | }; | |
1565 | ||
1566 | static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba) | |
1567 | { | |
1568 | struct platform_device *pdev = to_platform_device(hba->dev); | |
1569 | struct ufshcd_res_info *res; | |
1570 | struct resource *res_mem, *res_mcq; | |
1f165c87 | 1571 | int i, ret; |
c263b4ef AD |
1572 | |
1573 | memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info)); | |
1574 | ||
1575 | for (i = 0; i < RES_MAX; i++) { | |
1576 | res = &hba->res[i]; | |
1577 | res->resource = platform_get_resource_byname(pdev, | |
1578 | IORESOURCE_MEM, | |
1579 | res->name); | |
1580 | if (!res->resource) { | |
1581 | dev_info(hba->dev, "Resource %s not provided\n", res->name); | |
1582 | if (i == RES_UFS) | |
3a747c5c | 1583 | return -ENODEV; |
c263b4ef AD |
1584 | continue; |
1585 | } else if (i == RES_UFS) { | |
1586 | res_mem = res->resource; | |
1587 | res->base = hba->mmio_base; | |
1588 | continue; | |
1589 | } | |
1590 | ||
1591 | res->base = devm_ioremap_resource(hba->dev, res->resource); | |
1592 | if (IS_ERR(res->base)) { | |
1593 | dev_err(hba->dev, "Failed to map res %s, err=%d\n", | |
1594 | res->name, (int)PTR_ERR(res->base)); | |
c263b4ef | 1595 | ret = PTR_ERR(res->base); |
c8be073b | 1596 | res->base = NULL; |
c263b4ef AD |
1597 | return ret; |
1598 | } | |
1599 | } | |
1600 | ||
1601 | /* MCQ resource provided in DT */ | |
1602 | res = &hba->res[RES_MCQ]; | |
1603 | /* Bail if MCQ resource is provided */ | |
1604 | if (res->base) | |
1605 | goto out; | |
1606 | ||
1607 | /* Explicitly allocate MCQ resource from ufs_mem */ | |
1608 | res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL); | |
1609 | if (!res_mcq) | |
c9507eab | 1610 | return -ENOMEM; |
c263b4ef AD |
1611 | |
1612 | res_mcq->start = res_mem->start + | |
1613 | MCQ_SQATTR_OFFSET(hba->mcq_capabilities); | |
1614 | res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1; | |
1615 | res_mcq->flags = res_mem->flags; | |
1616 | res_mcq->name = "mcq"; | |
1617 | ||
1618 | ret = insert_resource(&iomem_resource, res_mcq); | |
1619 | if (ret) { | |
1620 | dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n", | |
1621 | ret); | |
c9507eab | 1622 | return ret; |
c263b4ef AD |
1623 | } |
1624 | ||
1625 | res->base = devm_ioremap_resource(hba->dev, res_mcq); | |
1626 | if (IS_ERR(res->base)) { | |
1627 | dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n", | |
1628 | (int)PTR_ERR(res->base)); | |
1629 | ret = PTR_ERR(res->base); | |
1630 | goto ioremap_err; | |
1631 | } | |
1632 | ||
1633 | out: | |
1634 | hba->mcq_base = res->base; | |
1635 | return 0; | |
1636 | ioremap_err: | |
1637 | res->base = NULL; | |
1638 | remove_resource(res_mcq); | |
c263b4ef AD |
1639 | return ret; |
1640 | } | |
1641 | ||
2468da61 AD |
1642 | static int ufs_qcom_op_runtime_config(struct ufs_hba *hba) |
1643 | { | |
1644 | struct ufshcd_res_info *mem_res, *sqdao_res; | |
1645 | struct ufshcd_mcq_opr_info_t *opr; | |
1646 | int i; | |
1647 | ||
1648 | mem_res = &hba->res[RES_UFS]; | |
1649 | sqdao_res = &hba->res[RES_MCQ_SQD]; | |
1650 | ||
1651 | if (!mem_res->base || !sqdao_res->base) | |
1652 | return -EINVAL; | |
1653 | ||
1654 | for (i = 0; i < OPR_MAX; i++) { | |
1655 | opr = &hba->mcq_opr[i]; | |
1656 | opr->offset = sqdao_res->resource->start - | |
1657 | mem_res->resource->start + 0x40 * i; | |
1658 | opr->stride = 0x100; | |
1659 | opr->base = sqdao_res->base + 0x40 * i; | |
1660 | } | |
1661 | ||
1662 | return 0; | |
1663 | } | |
1664 | ||
7224c806 AD |
1665 | static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) |
1666 | { | |
1667 | /* Qualcomm HC supports up to 64 */ | |
1668 | return MAX_SUPP_MAC; | |
1669 | } | |
1670 | ||
f87b2c41 AD |
1671 | static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba, |
1672 | unsigned long *ocqs) | |
1673 | { | |
1674 | struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS]; | |
1675 | ||
1676 | if (!mcq_vs_res->base) | |
1677 | return -EINVAL; | |
1678 | ||
1679 | *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS); | |
1680 | ||
1681 | return 0; | |
1682 | } | |
1683 | ||
519b6274 CG |
1684 | static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) |
1685 | { | |
1686 | struct device *dev = msi_desc_to_dev(desc); | |
1687 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
1688 | ||
1689 | ufshcd_mcq_config_esi(hba, msg); | |
1690 | } | |
1691 | ||
8f2b7865 | 1692 | static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) |
519b6274 | 1693 | { |
8f2b7865 ZC |
1694 | struct msi_desc *desc = data; |
1695 | struct device *dev = msi_desc_to_dev(desc); | |
1696 | struct ufs_hba *hba = dev_get_drvdata(dev); | |
8f2b7865 | 1697 | u32 id = desc->msi_index; |
519b6274 CG |
1698 | struct ufs_hw_queue *hwq = &hba->uhq[id]; |
1699 | ||
1700 | ufshcd_mcq_write_cqis(hba, 0x1, id); | |
57d6ef46 | 1701 | ufshcd_mcq_poll_cqe_lock(hba, hwq); |
519b6274 CG |
1702 | |
1703 | return IRQ_HANDLED; | |
1704 | } | |
1705 | ||
1706 | static int ufs_qcom_config_esi(struct ufs_hba *hba) | |
1707 | { | |
1708 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
1709 | struct msi_desc *desc; | |
1710 | struct msi_desc *failed_desc = NULL; | |
1711 | int nr_irqs, ret; | |
1712 | ||
1713 | if (host->esi_enabled) | |
1714 | return 0; | |
519b6274 CG |
1715 | |
1716 | /* | |
1717 | * 1. We only handle CQs as of now. | |
1718 | * 2. Poll queues do not need ESI. | |
1719 | */ | |
1720 | nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; | |
14fd06c7 TG |
1721 | ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, |
1722 | ufs_qcom_write_msi_msg); | |
8f2b7865 ZC |
1723 | if (ret) { |
1724 | dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); | |
b08d86e6 | 1725 | return ret; |
8f2b7865 | 1726 | } |
519b6274 | 1727 | |
f52a805e | 1728 | msi_lock_descs(hba->dev); |
519b6274 | 1729 | msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { |
519b6274 CG |
1730 | ret = devm_request_irq(hba->dev, desc->irq, |
1731 | ufs_qcom_mcq_esi_handler, | |
8f2b7865 | 1732 | IRQF_SHARED, "qcom-mcq-esi", desc); |
519b6274 CG |
1733 | if (ret) { |
1734 | dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", | |
1735 | __func__, desc->irq, ret); | |
1736 | failed_desc = desc; | |
1737 | break; | |
1738 | } | |
1739 | } | |
f52a805e | 1740 | msi_unlock_descs(hba->dev); |
519b6274 CG |
1741 | |
1742 | if (ret) { | |
1743 | /* Rewind */ | |
f52a805e | 1744 | msi_lock_descs(hba->dev); |
519b6274 CG |
1745 | msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { |
1746 | if (desc == failed_desc) | |
1747 | break; | |
1748 | devm_free_irq(hba->dev, desc->irq, hba); | |
1749 | } | |
f52a805e | 1750 | msi_unlock_descs(hba->dev); |
14fd06c7 | 1751 | platform_device_msi_free_irqs_all(hba->dev); |
519b6274 CG |
1752 | } else { |
1753 | if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && | |
0e9f4375 | 1754 | host->hw_ver.step == 0) |
26cdd694 MS |
1755 | ufshcd_rmwl(hba, ESI_VEC_MASK, |
1756 | FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), | |
1757 | REG_UFS_CFG3); | |
519b6274 | 1758 | ufshcd_mcq_enable_esi(hba); |
519b6274 | 1759 | host->esi_enabled = true; |
b08d86e6 | 1760 | } |
519b6274 CG |
1761 | |
1762 | return ret; | |
1763 | } | |
1764 | ||
bc5b6816 | 1765 | /* |
81c0fc51 YG |
1766 | * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations |
1767 | * | |
1768 | * The variant operations configure the necessary controller and PHY | |
1769 | * handshake during initialization. | |
1770 | */ | |
d508e31d | 1771 | static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { |
81c0fc51 YG |
1772 | .name = "qcom", |
1773 | .init = ufs_qcom_init, | |
1774 | .exit = ufs_qcom_exit, | |
ae977587 | 1775 | .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, |
81c0fc51 YG |
1776 | .clk_scale_notify = ufs_qcom_clk_scale_notify, |
1777 | .setup_clocks = ufs_qcom_setup_clocks, | |
1778 | .hce_enable_notify = ufs_qcom_hce_enable_notify, | |
1779 | .link_startup_notify = ufs_qcom_link_startup_notify, | |
1780 | .pwr_change_notify = ufs_qcom_pwr_change_notify, | |
56d4a186 | 1781 | .apply_dev_quirks = ufs_qcom_apply_dev_quirks, |
81c0fc51 YG |
1782 | .suspend = ufs_qcom_suspend, |
1783 | .resume = ufs_qcom_resume, | |
6e3fd44d | 1784 | .dbg_register_dump = ufs_qcom_dump_dbg_regs, |
b8416b2f | 1785 | .device_reset = ufs_qcom_device_reset, |
80b21006 | 1786 | .config_scaling_param = ufs_qcom_config_scaling_param, |
df4ec2fa | 1787 | .program_key = ufs_qcom_ice_program_key, |
baf5ddac | 1788 | .reinit_notify = ufs_qcom_reinit_notify, |
c263b4ef | 1789 | .mcq_config_resource = ufs_qcom_mcq_config_resource, |
7224c806 | 1790 | .get_hba_mac = ufs_qcom_get_hba_mac, |
2468da61 | 1791 | .op_runtime_config = ufs_qcom_op_runtime_config, |
f87b2c41 | 1792 | .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs, |
519b6274 | 1793 | .config_esi = ufs_qcom_config_esi, |
81c0fc51 | 1794 | }; |
fb819ee8 | 1795 | |
47555a5c YG |
1796 | /** |
1797 | * ufs_qcom_probe - probe routine of the driver | |
1798 | * @pdev: pointer to Platform device handle | |
1799 | * | |
3a17fefe | 1800 | * Return: zero for success and non-zero for failure. |
47555a5c YG |
1801 | */ |
1802 | static int ufs_qcom_probe(struct platform_device *pdev) | |
1803 | { | |
1804 | int err; | |
1805 | struct device *dev = &pdev->dev; | |
1806 | ||
1807 | /* Perform generic probe */ | |
1808 | err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); | |
1809 | if (err) | |
132b0272 | 1810 | return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n"); |
47555a5c | 1811 | |
132b0272 | 1812 | return 0; |
47555a5c YG |
1813 | } |
1814 | ||
1815 | /** | |
1816 | * ufs_qcom_remove - set driver_data of the device to NULL | |
1817 | * @pdev: pointer to platform device handle | |
1818 | * | |
4b9ad0b8 | 1819 | * Always returns 0 |
47555a5c | 1820 | */ |
0842b761 | 1821 | static void ufs_qcom_remove(struct platform_device *pdev) |
47555a5c YG |
1822 | { |
1823 | struct ufs_hba *hba = platform_get_drvdata(pdev); | |
1824 | ||
1825 | pm_runtime_get_sync(&(pdev)->dev); | |
1826 | ufshcd_remove(hba); | |
14fd06c7 | 1827 | platform_device_msi_free_irqs_all(hba->dev); |
47555a5c YG |
1828 | } |
1829 | ||
dd3f5330 | 1830 | static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = { |
47555a5c YG |
1831 | { .compatible = "qcom,ufshc"}, |
1832 | {}, | |
1833 | }; | |
ab3dabb3 | 1834 | MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); |
47555a5c | 1835 | |
e1a7752c LJ |
1836 | #ifdef CONFIG_ACPI |
1837 | static const struct acpi_device_id ufs_qcom_acpi_match[] = { | |
1838 | { "QCOM24A5" }, | |
1839 | { }, | |
1840 | }; | |
1841 | MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); | |
1842 | #endif | |
1843 | ||
47555a5c | 1844 | static const struct dev_pm_ops ufs_qcom_pm_ops = { |
f1ecbe1e | 1845 | SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) |
b294ff3e AD |
1846 | .prepare = ufshcd_suspend_prepare, |
1847 | .complete = ufshcd_resume_complete, | |
88441a8d AH |
1848 | #ifdef CONFIG_PM_SLEEP |
1849 | .suspend = ufshcd_system_suspend, | |
1850 | .resume = ufshcd_system_resume, | |
1851 | .freeze = ufshcd_system_freeze, | |
1852 | .restore = ufshcd_system_restore, | |
1853 | .thaw = ufshcd_system_thaw, | |
1854 | #endif | |
47555a5c YG |
1855 | }; |
1856 | ||
1857 | static struct platform_driver ufs_qcom_pltform = { | |
1858 | .probe = ufs_qcom_probe, | |
0842b761 | 1859 | .remove_new = ufs_qcom_remove, |
47555a5c YG |
1860 | .driver = { |
1861 | .name = "ufshcd-qcom", | |
1862 | .pm = &ufs_qcom_pm_ops, | |
1863 | .of_match_table = of_match_ptr(ufs_qcom_of_match), | |
e1a7752c | 1864 | .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), |
47555a5c YG |
1865 | }, |
1866 | }; | |
1867 | module_platform_driver(ufs_qcom_pltform); | |
1868 | ||
fb819ee8 | 1869 | MODULE_LICENSE("GPL v2"); |