Commit | Line | Data |
---|---|---|
97fb5e8d | 1 | // SPDX-License-Identifier: GPL-2.0-only |
81c0fc51 | 2 | /* |
54b879b7 | 3 | * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. |
81c0fc51 YG |
4 | */ |
5 | ||
e1a7752c | 6 | #include <linux/acpi.h> |
81c0fc51 YG |
7 | #include <linux/time.h> |
8 | #include <linux/of.h> | |
9 | #include <linux/platform_device.h> | |
10 | #include <linux/phy/phy.h> | |
b8416b2f | 11 | #include <linux/gpio/consumer.h> |
12fd5f25 | 12 | #include <linux/reset-controller.h> |
80b21006 | 13 | #include <linux/devfreq.h> |
4b9ad0b8 | 14 | |
81c0fc51 | 15 | #include "ufshcd.h" |
47555a5c | 16 | #include "ufshcd-pltfrm.h" |
81c0fc51 YG |
17 | #include "unipro.h" |
18 | #include "ufs-qcom.h" | |
19 | #include "ufshci.h" | |
56d4a186 | 20 | #include "ufs_quirks.h" |
6e3fd44d YG |
21 | #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ |
22 | (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) | |
23 | ||
24 | enum { | |
25 | TSTBUS_UAWM, | |
26 | TSTBUS_UARM, | |
27 | TSTBUS_TXUC, | |
28 | TSTBUS_RXUC, | |
29 | TSTBUS_DFC, | |
30 | TSTBUS_TRLUT, | |
31 | TSTBUS_TMRLUT, | |
32 | TSTBUS_OCSC, | |
33 | TSTBUS_UTP_HCI, | |
34 | TSTBUS_COMBINED, | |
35 | TSTBUS_WRAPPER, | |
36 | TSTBUS_UNIPRO, | |
37 | TSTBUS_MAX, | |
38 | }; | |
81c0fc51 YG |
39 | |
40 | static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; | |
41 | ||
6e3fd44d | 42 | static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); |
f06fcc71 YG |
43 | static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, |
44 | u32 clk_cycles); | |
45 | ||
12fd5f25 EG |
46 | static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) |
47 | { | |
48 | return container_of(rcd, struct ufs_qcom_host, rcdev); | |
49 | } | |
50 | ||
eba5ed35 | 51 | static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, |
ba80917d | 52 | const char *prefix, void *priv) |
eba5ed35 | 53 | { |
ba80917d | 54 | ufshcd_dump_regs(hba, offset, len * 4, prefix); |
eba5ed35 YG |
55 | } |
56 | ||
81c0fc51 YG |
57 | static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) |
58 | { | |
59 | int err = 0; | |
60 | ||
61 | err = ufshcd_dme_get(hba, | |
62 | UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); | |
63 | if (err) | |
64 | dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", | |
65 | __func__, err); | |
66 | ||
67 | return err; | |
68 | } | |
69 | ||
70 | static int ufs_qcom_host_clk_get(struct device *dev, | |
5adaf1e8 | 71 | const char *name, struct clk **clk_out, bool optional) |
81c0fc51 YG |
72 | { |
73 | struct clk *clk; | |
74 | int err = 0; | |
75 | ||
76 | clk = devm_clk_get(dev, name); | |
5adaf1e8 | 77 | if (!IS_ERR(clk)) { |
81c0fc51 | 78 | *clk_out = clk; |
5adaf1e8 VG |
79 | return 0; |
80 | } | |
81 | ||
82 | err = PTR_ERR(clk); | |
83 | ||
84 | if (optional && err == -ENOENT) { | |
85 | *clk_out = NULL; | |
86 | return 0; | |
81c0fc51 YG |
87 | } |
88 | ||
5adaf1e8 VG |
89 | if (err != -EPROBE_DEFER) |
90 | dev_err(dev, "failed to get %s err %d\n", name, err); | |
91 | ||
81c0fc51 YG |
92 | return err; |
93 | } | |
94 | ||
95 | static int ufs_qcom_host_clk_enable(struct device *dev, | |
96 | const char *name, struct clk *clk) | |
97 | { | |
98 | int err = 0; | |
99 | ||
100 | err = clk_prepare_enable(clk); | |
101 | if (err) | |
102 | dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); | |
103 | ||
104 | return err; | |
105 | } | |
106 | ||
107 | static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) | |
108 | { | |
109 | if (!host->is_lane_clks_enabled) | |
110 | return; | |
111 | ||
5adaf1e8 | 112 | clk_disable_unprepare(host->tx_l1_sync_clk); |
81c0fc51 | 113 | clk_disable_unprepare(host->tx_l0_sync_clk); |
5adaf1e8 | 114 | clk_disable_unprepare(host->rx_l1_sync_clk); |
81c0fc51 YG |
115 | clk_disable_unprepare(host->rx_l0_sync_clk); |
116 | ||
117 | host->is_lane_clks_enabled = false; | |
118 | } | |
119 | ||
120 | static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) | |
121 | { | |
122 | int err = 0; | |
123 | struct device *dev = host->hba->dev; | |
124 | ||
125 | if (host->is_lane_clks_enabled) | |
126 | return 0; | |
127 | ||
128 | err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", | |
129 | host->rx_l0_sync_clk); | |
130 | if (err) | |
131 | goto out; | |
132 | ||
133 | err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", | |
134 | host->tx_l0_sync_clk); | |
135 | if (err) | |
136 | goto disable_rx_l0; | |
137 | ||
5adaf1e8 | 138 | err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", |
54b879b7 | 139 | host->rx_l1_sync_clk); |
5adaf1e8 VG |
140 | if (err) |
141 | goto disable_tx_l0; | |
81c0fc51 | 142 | |
5adaf1e8 | 143 | err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", |
54b879b7 | 144 | host->tx_l1_sync_clk); |
5adaf1e8 VG |
145 | if (err) |
146 | goto disable_rx_l1; | |
81c0fc51 YG |
147 | |
148 | host->is_lane_clks_enabled = true; | |
149 | goto out; | |
150 | ||
151 | disable_rx_l1: | |
5adaf1e8 | 152 | clk_disable_unprepare(host->rx_l1_sync_clk); |
81c0fc51 YG |
153 | disable_tx_l0: |
154 | clk_disable_unprepare(host->tx_l0_sync_clk); | |
155 | disable_rx_l0: | |
156 | clk_disable_unprepare(host->rx_l0_sync_clk); | |
157 | out: | |
158 | return err; | |
159 | } | |
160 | ||
161 | static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) | |
162 | { | |
163 | int err = 0; | |
164 | struct device *dev = host->hba->dev; | |
165 | ||
e1a7752c LJ |
166 | if (has_acpi_companion(dev)) |
167 | return 0; | |
168 | ||
5adaf1e8 VG |
169 | err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk", |
170 | &host->rx_l0_sync_clk, false); | |
81c0fc51 YG |
171 | if (err) |
172 | goto out; | |
173 | ||
5adaf1e8 VG |
174 | err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk", |
175 | &host->tx_l0_sync_clk, false); | |
81c0fc51 YG |
176 | if (err) |
177 | goto out; | |
178 | ||
54b879b7 YG |
179 | /* In case of single lane per direction, don't read lane1 clocks */ |
180 | if (host->hba->lanes_per_direction > 1) { | |
181 | err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", | |
5adaf1e8 | 182 | &host->rx_l1_sync_clk, false); |
54b879b7 YG |
183 | if (err) |
184 | goto out; | |
f06fcc71 | 185 | |
54b879b7 | 186 | err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", |
5adaf1e8 | 187 | &host->tx_l1_sync_clk, true); |
54b879b7 | 188 | } |
81c0fc51 YG |
189 | out: |
190 | return err; | |
191 | } | |
192 | ||
193 | static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) | |
194 | { | |
81c0fc51 | 195 | u32 tx_lanes; |
81c0fc51 | 196 | |
1e1e465c | 197 | return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); |
81c0fc51 YG |
198 | } |
199 | ||
200 | static int ufs_qcom_check_hibern8(struct ufs_hba *hba) | |
201 | { | |
202 | int err; | |
203 | u32 tx_fsm_val = 0; | |
204 | unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); | |
205 | ||
206 | do { | |
207 | err = ufshcd_dme_get(hba, | |
f06fcc71 YG |
208 | UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, |
209 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), | |
210 | &tx_fsm_val); | |
81c0fc51 YG |
211 | if (err || tx_fsm_val == TX_FSM_HIBERN8) |
212 | break; | |
213 | ||
214 | /* sleep for max. 200us */ | |
215 | usleep_range(100, 200); | |
216 | } while (time_before(jiffies, timeout)); | |
217 | ||
218 | /* | |
219 | * we might have scheduled out for long during polling so | |
220 | * check the state again. | |
221 | */ | |
222 | if (time_after(jiffies, timeout)) | |
223 | err = ufshcd_dme_get(hba, | |
f06fcc71 YG |
224 | UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, |
225 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), | |
226 | &tx_fsm_val); | |
81c0fc51 YG |
227 | |
228 | if (err) { | |
229 | dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", | |
230 | __func__, err); | |
231 | } else if (tx_fsm_val != TX_FSM_HIBERN8) { | |
232 | err = tx_fsm_val; | |
233 | dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", | |
234 | __func__, err); | |
235 | } | |
236 | ||
237 | return err; | |
238 | } | |
239 | ||
f06fcc71 YG |
240 | static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) |
241 | { | |
242 | ufshcd_rmwl(host->hba, QUNIPRO_SEL, | |
243 | ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, | |
244 | REG_UFS_CFG1); | |
245 | /* make sure above configuration is applied before we return */ | |
246 | mb(); | |
247 | } | |
248 | ||
bc5b6816 | 249 | /* |
870b1279 CG |
250 | * ufs_qcom_host_reset - reset host controller and PHY |
251 | */ | |
252 | static int ufs_qcom_host_reset(struct ufs_hba *hba) | |
253 | { | |
254 | int ret = 0; | |
255 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
4a791574 | 256 | bool reenable_intr = false; |
870b1279 CG |
257 | |
258 | if (!host->core_reset) { | |
259 | dev_warn(hba->dev, "%s: reset control not set\n", __func__); | |
260 | goto out; | |
261 | } | |
262 | ||
4a791574 NR |
263 | reenable_intr = hba->is_irq_enabled; |
264 | disable_irq(hba->irq); | |
265 | hba->is_irq_enabled = false; | |
266 | ||
870b1279 CG |
267 | ret = reset_control_assert(host->core_reset); |
268 | if (ret) { | |
269 | dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", | |
270 | __func__, ret); | |
271 | goto out; | |
272 | } | |
273 | ||
274 | /* | |
275 | * The hardware requirement for delay between assert/deassert | |
276 | * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to | |
277 | * ~125us (4/32768). To be on the safe side add 200us delay. | |
278 | */ | |
279 | usleep_range(200, 210); | |
280 | ||
281 | ret = reset_control_deassert(host->core_reset); | |
282 | if (ret) | |
283 | dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", | |
284 | __func__, ret); | |
285 | ||
286 | usleep_range(1000, 1100); | |
287 | ||
4a791574 NR |
288 | if (reenable_intr) { |
289 | enable_irq(hba->irq); | |
290 | hba->is_irq_enabled = true; | |
291 | } | |
292 | ||
870b1279 CG |
293 | out: |
294 | return ret; | |
295 | } | |
296 | ||
81c0fc51 YG |
297 | static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) |
298 | { | |
1ce5898a | 299 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
300 | struct phy *phy = host->generic_phy; |
301 | int ret = 0; | |
81c0fc51 YG |
302 | bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) |
303 | ? true : false; | |
304 | ||
870b1279 CG |
305 | /* Reset UFS Host Controller and PHY */ |
306 | ret = ufs_qcom_host_reset(hba); | |
307 | if (ret) | |
308 | dev_warn(hba->dev, "%s: host reset returned %d\n", | |
309 | __func__, ret); | |
310 | ||
e9dc42c7 VG |
311 | if (is_rate_B) |
312 | phy_set_mode(phy, PHY_MODE_UFS_HS_B); | |
313 | ||
052553af VG |
314 | /* phy initialization - calibrate the phy */ |
315 | ret = phy_init(phy); | |
81c0fc51 | 316 | if (ret) { |
052553af | 317 | dev_err(hba->dev, "%s: phy init failed, ret = %d\n", |
4b9ad0b8 | 318 | __func__, ret); |
81c0fc51 YG |
319 | goto out; |
320 | } | |
321 | ||
052553af VG |
322 | /* power on phy - start serdes and phy's power and clocks */ |
323 | ret = phy_power_on(phy); | |
81c0fc51 | 324 | if (ret) { |
052553af | 325 | dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", |
81c0fc51 | 326 | __func__, ret); |
052553af | 327 | goto out_disable_phy; |
81c0fc51 YG |
328 | } |
329 | ||
f06fcc71 YG |
330 | ufs_qcom_select_unipro_mode(host); |
331 | ||
052553af VG |
332 | return 0; |
333 | ||
334 | out_disable_phy: | |
052553af | 335 | phy_exit(phy); |
81c0fc51 YG |
336 | out: |
337 | return ret; | |
338 | } | |
339 | ||
340 | /* | |
341 | * The UTP controller has a number of internal clock gating cells (CGCs). | |
342 | * Internal hardware sub-modules within the UTP controller control the CGCs. | |
343 | * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved | |
344 | * in a specific operation, UTP controller CGCs are by default disabled and | |
345 | * this function enables them (after every UFS link startup) to save some power | |
346 | * leakage. | |
347 | */ | |
348 | static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) | |
349 | { | |
350 | ufshcd_writel(hba, | |
351 | ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, | |
352 | REG_UFS_CFG2); | |
353 | ||
354 | /* Ensure that HW clock gating is enabled before next operations */ | |
355 | mb(); | |
356 | } | |
357 | ||
f06fcc71 YG |
358 | static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, |
359 | enum ufs_notify_change_status status) | |
81c0fc51 | 360 | { |
1ce5898a | 361 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
362 | int err = 0; |
363 | ||
364 | switch (status) { | |
365 | case PRE_CHANGE: | |
366 | ufs_qcom_power_up_sequence(hba); | |
367 | /* | |
368 | * The PHY PLL output is the source of tx/rx lane symbol | |
369 | * clocks, hence, enable the lane clocks only after PHY | |
370 | * is initialized. | |
371 | */ | |
372 | err = ufs_qcom_enable_lane_clks(host); | |
373 | break; | |
374 | case POST_CHANGE: | |
375 | /* check if UFS PHY moved from DISABLED to HIBERN8 */ | |
376 | err = ufs_qcom_check_hibern8(hba); | |
377 | ufs_qcom_enable_hw_clk_gating(hba); | |
df4ec2fa | 378 | ufs_qcom_ice_enable(host); |
81c0fc51 YG |
379 | break; |
380 | default: | |
381 | dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); | |
382 | err = -EINVAL; | |
383 | break; | |
384 | } | |
385 | return err; | |
386 | } | |
387 | ||
bc5b6816 | 388 | /* |
f06fcc71 | 389 | * Returns zero for success and non-zero in case of a failure |
81c0fc51 | 390 | */ |
f06fcc71 YG |
391 | static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, |
392 | u32 hs, u32 rate, bool update_link_startup_timer) | |
81c0fc51 | 393 | { |
f06fcc71 | 394 | int ret = 0; |
1ce5898a | 395 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
396 | struct ufs_clk_info *clki; |
397 | u32 core_clk_period_in_ns; | |
398 | u32 tx_clk_cycles_per_us = 0; | |
399 | unsigned long core_clk_rate = 0; | |
400 | u32 core_clk_cycles_per_us = 0; | |
401 | ||
402 | static u32 pwm_fr_table[][2] = { | |
403 | {UFS_PWM_G1, 0x1}, | |
404 | {UFS_PWM_G2, 0x1}, | |
405 | {UFS_PWM_G3, 0x1}, | |
406 | {UFS_PWM_G4, 0x1}, | |
407 | }; | |
408 | ||
409 | static u32 hs_fr_table_rA[][2] = { | |
410 | {UFS_HS_G1, 0x1F}, | |
411 | {UFS_HS_G2, 0x3e}, | |
f06fcc71 | 412 | {UFS_HS_G3, 0x7D}, |
81c0fc51 YG |
413 | }; |
414 | ||
415 | static u32 hs_fr_table_rB[][2] = { | |
416 | {UFS_HS_G1, 0x24}, | |
417 | {UFS_HS_G2, 0x49}, | |
f06fcc71 | 418 | {UFS_HS_G3, 0x92}, |
81c0fc51 YG |
419 | }; |
420 | ||
81c7e06a YG |
421 | /* |
422 | * The Qunipro controller does not use following registers: | |
423 | * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & | |
424 | * UFS_REG_PA_LINK_STARTUP_TIMER | |
425 | * But UTP controller uses SYS1CLK_1US_REG register for Interrupt | |
426 | * Aggregation logic. | |
427 | */ | |
428 | if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) | |
429 | goto out; | |
430 | ||
81c0fc51 YG |
431 | if (gear == 0) { |
432 | dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); | |
433 | goto out_error; | |
434 | } | |
435 | ||
436 | list_for_each_entry(clki, &hba->clk_list_head, list) { | |
437 | if (!strcmp(clki->name, "core_clk")) | |
438 | core_clk_rate = clk_get_rate(clki->clk); | |
439 | } | |
440 | ||
441 | /* If frequency is smaller than 1MHz, set to 1MHz */ | |
442 | if (core_clk_rate < DEFAULT_CLK_RATE_HZ) | |
443 | core_clk_rate = DEFAULT_CLK_RATE_HZ; | |
444 | ||
445 | core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; | |
f06fcc71 YG |
446 | if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { |
447 | ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); | |
448 | /* | |
449 | * make sure above write gets applied before we return from | |
450 | * this function. | |
451 | */ | |
452 | mb(); | |
453 | } | |
454 | ||
455 | if (ufs_qcom_cap_qunipro(host)) | |
456 | goto out; | |
81c0fc51 YG |
457 | |
458 | core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; | |
459 | core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; | |
460 | core_clk_period_in_ns &= MASK_CLK_NS_REG; | |
461 | ||
462 | switch (hs) { | |
463 | case FASTAUTO_MODE: | |
464 | case FAST_MODE: | |
465 | if (rate == PA_HS_MODE_A) { | |
466 | if (gear > ARRAY_SIZE(hs_fr_table_rA)) { | |
467 | dev_err(hba->dev, | |
468 | "%s: index %d exceeds table size %zu\n", | |
469 | __func__, gear, | |
470 | ARRAY_SIZE(hs_fr_table_rA)); | |
471 | goto out_error; | |
472 | } | |
473 | tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; | |
474 | } else if (rate == PA_HS_MODE_B) { | |
475 | if (gear > ARRAY_SIZE(hs_fr_table_rB)) { | |
476 | dev_err(hba->dev, | |
477 | "%s: index %d exceeds table size %zu\n", | |
478 | __func__, gear, | |
479 | ARRAY_SIZE(hs_fr_table_rB)); | |
480 | goto out_error; | |
481 | } | |
482 | tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; | |
483 | } else { | |
484 | dev_err(hba->dev, "%s: invalid rate = %d\n", | |
485 | __func__, rate); | |
486 | goto out_error; | |
487 | } | |
488 | break; | |
489 | case SLOWAUTO_MODE: | |
490 | case SLOW_MODE: | |
491 | if (gear > ARRAY_SIZE(pwm_fr_table)) { | |
492 | dev_err(hba->dev, | |
493 | "%s: index %d exceeds table size %zu\n", | |
494 | __func__, gear, | |
495 | ARRAY_SIZE(pwm_fr_table)); | |
496 | goto out_error; | |
497 | } | |
498 | tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; | |
499 | break; | |
500 | case UNCHANGED: | |
501 | default: | |
502 | dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); | |
503 | goto out_error; | |
504 | } | |
505 | ||
f06fcc71 YG |
506 | if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != |
507 | (core_clk_period_in_ns | tx_clk_cycles_per_us)) { | |
508 | /* this register 2 fields shall be written at once */ | |
509 | ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, | |
510 | REG_UFS_TX_SYMBOL_CLK_NS_US); | |
511 | /* | |
512 | * make sure above write gets applied before we return from | |
513 | * this function. | |
514 | */ | |
515 | mb(); | |
516 | } | |
517 | ||
518 | if (update_link_startup_timer) { | |
519 | ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), | |
520 | REG_UFS_PA_LINK_STARTUP_TIMER); | |
521 | /* | |
522 | * make sure that this configuration is applied before | |
523 | * we return | |
524 | */ | |
525 | mb(); | |
526 | } | |
81c0fc51 YG |
527 | goto out; |
528 | ||
529 | out_error: | |
f06fcc71 | 530 | ret = -EINVAL; |
81c0fc51 | 531 | out: |
f06fcc71 | 532 | return ret; |
81c0fc51 YG |
533 | } |
534 | ||
f06fcc71 YG |
535 | static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, |
536 | enum ufs_notify_change_status status) | |
81c0fc51 | 537 | { |
f06fcc71 YG |
538 | int err = 0; |
539 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
81c0fc51 YG |
540 | |
541 | switch (status) { | |
542 | case PRE_CHANGE: | |
f06fcc71 YG |
543 | if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, |
544 | 0, true)) { | |
81c0fc51 YG |
545 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", |
546 | __func__); | |
f06fcc71 YG |
547 | err = -EINVAL; |
548 | goto out; | |
81c0fc51 | 549 | } |
f06fcc71 YG |
550 | |
551 | if (ufs_qcom_cap_qunipro(host)) | |
552 | /* | |
553 | * set unipro core clock cycles to 150 & clear clock | |
554 | * divider | |
555 | */ | |
556 | err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, | |
557 | 150); | |
558 | ||
4b9ad0b8 YG |
559 | /* |
560 | * Some UFS devices (and may be host) have issues if LCC is | |
561 | * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 | |
562 | * before link startup which will make sure that both host | |
563 | * and device TX LCC are disabled once link startup is | |
564 | * completed. | |
565 | */ | |
566 | if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) | |
984eaac1 | 567 | err = ufshcd_disable_host_tx_lcc(hba); |
4b9ad0b8 | 568 | |
81c0fc51 YG |
569 | break; |
570 | case POST_CHANGE: | |
571 | ufs_qcom_link_startup_post_change(hba); | |
572 | break; | |
573 | default: | |
574 | break; | |
575 | } | |
576 | ||
f06fcc71 YG |
577 | out: |
578 | return err; | |
81c0fc51 YG |
579 | } |
580 | ||
b61d0414 ZC |
581 | static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) |
582 | { | |
583 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
584 | ||
585 | /* reset gpio is optional */ | |
586 | if (!host->device_reset) | |
587 | return; | |
588 | ||
589 | gpiod_set_value_cansleep(host->device_reset, asserted); | |
590 | } | |
591 | ||
9561f584 PW |
592 | static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, |
593 | enum ufs_notify_change_status status) | |
81c0fc51 | 594 | { |
1ce5898a | 595 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 | 596 | struct phy *phy = host->generic_phy; |
81c0fc51 | 597 | |
9561f584 PW |
598 | if (status == PRE_CHANGE) |
599 | return 0; | |
600 | ||
81c0fc51 YG |
601 | if (ufs_qcom_is_link_off(hba)) { |
602 | /* | |
603 | * Disable the tx/rx lane symbol clocks before PHY is | |
604 | * powered down as the PLL source should be disabled | |
605 | * after downstream clocks are disabled. | |
606 | */ | |
607 | ufs_qcom_disable_lane_clks(host); | |
608 | phy_power_off(phy); | |
81c0fc51 | 609 | |
b61d0414 ZC |
610 | /* reset the connected UFS device during power down */ |
611 | ufs_qcom_device_reset_ctrl(hba, true); | |
612 | ||
3f6d1767 | 613 | } else if (!ufs_qcom_is_link_active(hba)) { |
f06fcc71 | 614 | ufs_qcom_disable_lane_clks(host); |
f06fcc71 | 615 | } |
81c0fc51 | 616 | |
f336c700 | 617 | return 0; |
81c0fc51 YG |
618 | } |
619 | ||
620 | static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) | |
621 | { | |
1ce5898a | 622 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
623 | struct phy *phy = host->generic_phy; |
624 | int err; | |
625 | ||
3f6d1767 EG |
626 | if (ufs_qcom_is_link_off(hba)) { |
627 | err = phy_power_on(phy); | |
628 | if (err) { | |
629 | dev_err(hba->dev, "%s: failed PHY power on: %d\n", | |
630 | __func__, err); | |
631 | return err; | |
632 | } | |
81c0fc51 | 633 | |
3f6d1767 EG |
634 | err = ufs_qcom_enable_lane_clks(host); |
635 | if (err) | |
636 | return err; | |
f06fcc71 | 637 | |
3f6d1767 EG |
638 | } else if (!ufs_qcom_is_link_active(hba)) { |
639 | err = ufs_qcom_enable_lane_clks(host); | |
640 | if (err) | |
641 | return err; | |
642 | } | |
81c0fc51 | 643 | |
df4ec2fa EB |
644 | err = ufs_qcom_ice_resume(host); |
645 | if (err) | |
646 | return err; | |
647 | ||
3f6d1767 EG |
648 | hba->is_sys_suspended = false; |
649 | return 0; | |
81c0fc51 YG |
650 | } |
651 | ||
f06fcc71 YG |
652 | static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) |
653 | { | |
654 | if (host->dev_ref_clk_ctrl_mmio && | |
655 | (enable ^ host->is_dev_ref_clk_enabled)) { | |
656 | u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); | |
657 | ||
658 | if (enable) | |
659 | temp |= host->dev_ref_clk_en_mask; | |
660 | else | |
661 | temp &= ~host->dev_ref_clk_en_mask; | |
662 | ||
663 | /* | |
664 | * If we are here to disable this clock it might be immediately | |
665 | * after entering into hibern8 in which case we need to make | |
1cbadd0c | 666 | * sure that device ref_clk is active for specific time after |
f06fcc71 YG |
667 | * hibern8 enter. |
668 | */ | |
1cbadd0c CG |
669 | if (!enable) { |
670 | unsigned long gating_wait; | |
671 | ||
672 | gating_wait = host->hba->dev_info.clk_gating_wait_us; | |
673 | if (!gating_wait) { | |
674 | udelay(1); | |
675 | } else { | |
676 | /* | |
677 | * bRefClkGatingWaitTime defines the minimum | |
678 | * time for which the reference clock is | |
679 | * required by device during transition from | |
680 | * HS-MODE to LS-MODE or HIBERN8 state. Give it | |
681 | * more delay to be on the safe side. | |
682 | */ | |
683 | gating_wait += 10; | |
684 | usleep_range(gating_wait, gating_wait + 10); | |
685 | } | |
686 | } | |
f06fcc71 YG |
687 | |
688 | writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); | |
689 | ||
690 | /* ensure that ref_clk is enabled/disabled before we return */ | |
691 | wmb(); | |
692 | ||
693 | /* | |
694 | * If we call hibern8 exit after this, we need to make sure that | |
695 | * device ref_clk is stable for at least 1us before the hibern8 | |
696 | * exit command. | |
697 | */ | |
698 | if (enable) | |
699 | udelay(1); | |
700 | ||
701 | host->is_dev_ref_clk_enabled = enable; | |
702 | } | |
703 | } | |
704 | ||
81c0fc51 | 705 | static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, |
f06fcc71 | 706 | enum ufs_notify_change_status status, |
81c0fc51 YG |
707 | struct ufs_pa_layer_attr *dev_max_params, |
708 | struct ufs_pa_layer_attr *dev_req_params) | |
709 | { | |
1ce5898a | 710 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
e4c0ee3f | 711 | struct ufs_dev_params ufs_qcom_cap; |
81c0fc51 | 712 | int ret = 0; |
81c0fc51 YG |
713 | |
714 | if (!dev_req_params) { | |
715 | pr_err("%s: incoming dev_req_params is NULL\n", __func__); | |
716 | ret = -EINVAL; | |
717 | goto out; | |
718 | } | |
719 | ||
720 | switch (status) { | |
721 | case PRE_CHANGE: | |
8beef547 | 722 | ufshcd_init_pwr_dev_param(&ufs_qcom_cap); |
81c0fc51 | 723 | ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; |
81c0fc51 | 724 | |
f06fcc71 YG |
725 | if (host->hw_ver.major == 0x1) { |
726 | /* | |
727 | * HS-G3 operations may not reliably work on legacy QCOM | |
728 | * UFS host controller hardware even though capability | |
729 | * exchange during link startup phase may end up | |
730 | * negotiating maximum supported gear as G3. | |
731 | * Hence downgrade the maximum supported gear to HS-G2. | |
732 | */ | |
733 | if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2) | |
734 | ufs_qcom_cap.hs_tx_gear = UFS_HS_G2; | |
735 | if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2) | |
736 | ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; | |
737 | } | |
738 | ||
e4c0ee3f SC |
739 | ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap, |
740 | dev_max_params, | |
741 | dev_req_params); | |
81c0fc51 YG |
742 | if (ret) { |
743 | pr_err("%s: failed to determine capabilities\n", | |
744 | __func__); | |
745 | goto out; | |
746 | } | |
747 | ||
f37aabcf YG |
748 | /* enable the device ref clock before changing to HS mode */ |
749 | if (!ufshcd_is_hs_mode(&hba->pwr_info) && | |
750 | ufshcd_is_hs_mode(dev_req_params)) | |
751 | ufs_qcom_dev_ref_clk_ctrl(host, true); | |
518b32f1 CG |
752 | |
753 | if (host->hw_ver.major >= 0x4) { | |
d9fa1e73 SC |
754 | ufshcd_dme_configure_adapt(hba, |
755 | dev_req_params->gear_tx, | |
756 | PA_INITIAL_ADAPT); | |
518b32f1 | 757 | } |
81c0fc51 YG |
758 | break; |
759 | case POST_CHANGE: | |
f06fcc71 | 760 | if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, |
81c0fc51 | 761 | dev_req_params->pwr_rx, |
f06fcc71 | 762 | dev_req_params->hs_rate, false)) { |
81c0fc51 YG |
763 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", |
764 | __func__); | |
765 | /* | |
766 | * we return error code at the end of the routine, | |
767 | * but continue to configure UFS_PHY_TX_LANE_ENABLE | |
768 | * and bus voting as usual | |
769 | */ | |
770 | ret = -EINVAL; | |
771 | } | |
772 | ||
81c0fc51 YG |
773 | /* cache the power mode parameters to use internally */ |
774 | memcpy(&host->dev_req_params, | |
775 | dev_req_params, sizeof(*dev_req_params)); | |
f37aabcf YG |
776 | |
777 | /* disable the device ref clock if entered PWM mode */ | |
778 | if (ufshcd_is_hs_mode(&hba->pwr_info) && | |
779 | !ufshcd_is_hs_mode(dev_req_params)) | |
780 | ufs_qcom_dev_ref_clk_ctrl(host, false); | |
81c0fc51 YG |
781 | break; |
782 | default: | |
783 | ret = -EINVAL; | |
784 | break; | |
785 | } | |
786 | out: | |
787 | return ret; | |
788 | } | |
789 | ||
56d4a186 SJ |
790 | static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) |
791 | { | |
792 | int err; | |
793 | u32 pa_vs_config_reg1; | |
794 | ||
795 | err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), | |
796 | &pa_vs_config_reg1); | |
797 | if (err) | |
798 | goto out; | |
799 | ||
800 | /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ | |
801 | err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), | |
802 | (pa_vs_config_reg1 | (1 << 12))); | |
803 | ||
804 | out: | |
805 | return err; | |
806 | } | |
807 | ||
808 | static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) | |
809 | { | |
810 | int err = 0; | |
811 | ||
812 | if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) | |
813 | err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); | |
814 | ||
27ff2c60 CG |
815 | if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) |
816 | hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; | |
817 | ||
56d4a186 SJ |
818 | return err; |
819 | } | |
820 | ||
ae977587 YG |
821 | static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) |
822 | { | |
1ce5898a | 823 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
ae977587 YG |
824 | |
825 | if (host->hw_ver.major == 0x1) | |
f065aca2 | 826 | return ufshci_version(1, 1); |
ae977587 | 827 | else |
f065aca2 | 828 | return ufshci_version(2, 0); |
ae977587 YG |
829 | } |
830 | ||
81c0fc51 YG |
831 | /** |
832 | * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks | |
833 | * @hba: host controller instance | |
834 | * | |
835 | * QCOM UFS host controller might have some non standard behaviours (quirks) | |
836 | * than what is specified by UFSHCI specification. Advertise all such | |
837 | * quirks to standard UFS host controller driver so standard takes them into | |
838 | * account. | |
839 | */ | |
840 | static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) | |
841 | { | |
1ce5898a | 842 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 | 843 | |
81c7e06a | 844 | if (host->hw_ver.major == 0x01) { |
8163743e | 845 | hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS |
2c0cc2e2 YG |
846 | | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP |
847 | | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE; | |
81c0fc51 | 848 | |
81c7e06a YG |
849 | if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) |
850 | hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; | |
f06fcc71 YG |
851 | |
852 | hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; | |
81c7e06a YG |
853 | } |
854 | ||
69a6fff0 | 855 | if (host->hw_ver.major == 0x2) { |
ae977587 | 856 | hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; |
2f018378 | 857 | |
cad2e03d YG |
858 | if (!ufs_qcom_cap_qunipro(host)) |
859 | /* Legacy UniPro mode still need following quirks */ | |
8163743e | 860 | hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS |
2c0cc2e2 | 861 | | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE |
8163743e | 862 | | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); |
cad2e03d YG |
863 | } |
864 | } | |
865 | ||
866 | static void ufs_qcom_set_caps(struct ufs_hba *hba) | |
867 | { | |
1ce5898a | 868 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
cad2e03d | 869 | |
f06fcc71 YG |
870 | hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; |
871 | hba->caps |= UFSHCD_CAP_CLK_SCALING; | |
872 | hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; | |
04ee8a01 | 873 | hba->caps |= UFSHCD_CAP_WB_EN; |
df4ec2fa | 874 | hba->caps |= UFSHCD_CAP_CRYPTO; |
61906fd4 | 875 | hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; |
81c0fc51 | 876 | |
f06fcc71 YG |
877 | if (host->hw_ver.major >= 0x2) { |
878 | host->caps = UFS_QCOM_CAP_QUNIPRO | | |
879 | UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE; | |
81c0fc51 YG |
880 | } |
881 | } | |
882 | ||
f06fcc71 YG |
883 | /** |
884 | * ufs_qcom_setup_clocks - enables/disable clocks | |
885 | * @hba: host controller instance | |
886 | * @on: If true, enable clocks else disable them. | |
1e879e8f | 887 | * @status: PRE_CHANGE or POST_CHANGE notify |
f06fcc71 YG |
888 | * |
889 | * Returns 0 on success, non-zero on failure. | |
890 | */ | |
1e879e8f SJ |
891 | static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, |
892 | enum ufs_notify_change_status status) | |
81c0fc51 | 893 | { |
1ce5898a | 894 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
895 | |
896 | /* | |
897 | * In case ufs_qcom_init() is not yet done, simply ignore. | |
898 | * This ufs_qcom_setup_clocks() shall be called from | |
899 | * ufs_qcom_init() after init is done. | |
900 | */ | |
901 | if (!host) | |
902 | return 0; | |
903 | ||
8240dd97 CG |
904 | switch (status) { |
905 | case PRE_CHANGE: | |
68bdb3db | 906 | if (!on) { |
8240dd97 CG |
907 | if (!ufs_qcom_is_link_active(hba)) { |
908 | /* disable device ref_clk */ | |
909 | ufs_qcom_dev_ref_clk_ctrl(host, false); | |
910 | } | |
feb3d798 | 911 | } |
8240dd97 CG |
912 | break; |
913 | case POST_CHANGE: | |
914 | if (on) { | |
915 | /* enable the device ref clock for HS mode*/ | |
916 | if (ufshcd_is_hs_mode(&hba->pwr_info)) | |
917 | ufs_qcom_dev_ref_clk_ctrl(host, true); | |
8240dd97 CG |
918 | } |
919 | break; | |
81c0fc51 YG |
920 | } |
921 | ||
c4adf171 | 922 | return 0; |
81c0fc51 YG |
923 | } |
924 | ||
12fd5f25 EG |
925 | static int |
926 | ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) | |
927 | { | |
928 | struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); | |
929 | ||
930 | /* Currently this code only knows about a single reset. */ | |
931 | WARN_ON(id); | |
932 | ufs_qcom_assert_reset(host->hba); | |
933 | /* provide 1ms delay to let the reset pulse propagate. */ | |
934 | usleep_range(1000, 1100); | |
935 | return 0; | |
936 | } | |
937 | ||
938 | static int | |
939 | ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) | |
940 | { | |
941 | struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); | |
942 | ||
943 | /* Currently this code only knows about a single reset. */ | |
944 | WARN_ON(id); | |
945 | ufs_qcom_deassert_reset(host->hba); | |
946 | ||
947 | /* | |
948 | * after reset deassertion, phy will need all ref clocks, | |
949 | * voltage, current to settle down before starting serdes. | |
950 | */ | |
951 | usleep_range(1000, 1100); | |
952 | return 0; | |
953 | } | |
954 | ||
955 | static const struct reset_control_ops ufs_qcom_reset_ops = { | |
956 | .assert = ufs_qcom_reset_assert, | |
957 | .deassert = ufs_qcom_reset_deassert, | |
958 | }; | |
959 | ||
81c0fc51 YG |
960 | /** |
961 | * ufs_qcom_init - bind phy with controller | |
962 | * @hba: host controller instance | |
963 | * | |
964 | * Binds PHY with controller and powers up PHY enabling clocks | |
965 | * and regulators. | |
966 | * | |
967 | * Returns -EPROBE_DEFER if binding fails, returns negative error | |
968 | * on phy power up failure and returns zero on success. | |
969 | */ | |
970 | static int ufs_qcom_init(struct ufs_hba *hba) | |
971 | { | |
972 | int err; | |
973 | struct device *dev = hba->dev; | |
f06fcc71 | 974 | struct platform_device *pdev = to_platform_device(dev); |
81c0fc51 | 975 | struct ufs_qcom_host *host; |
f06fcc71 | 976 | struct resource *res; |
96f08cc5 | 977 | struct ufs_clk_info *clki; |
81c0fc51 | 978 | |
81c0fc51 YG |
979 | host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); |
980 | if (!host) { | |
981 | err = -ENOMEM; | |
982 | dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); | |
983 | goto out; | |
984 | } | |
985 | ||
f06fcc71 | 986 | /* Make a two way bind between the qcom host and the hba */ |
81c0fc51 | 987 | host->hba = hba; |
1ce5898a | 988 | ufshcd_set_variant(hba, host); |
81c0fc51 | 989 | |
870b1279 CG |
990 | /* Setup the reset control of HCI */ |
991 | host->core_reset = devm_reset_control_get(hba->dev, "rst"); | |
992 | if (IS_ERR(host->core_reset)) { | |
993 | err = PTR_ERR(host->core_reset); | |
994 | dev_warn(dev, "Failed to get reset control %d\n", err); | |
995 | host->core_reset = NULL; | |
996 | err = 0; | |
997 | } | |
998 | ||
12fd5f25 EG |
999 | /* Fire up the reset controller. Failure here is non-fatal. */ |
1000 | host->rcdev.of_node = dev->of_node; | |
1001 | host->rcdev.ops = &ufs_qcom_reset_ops; | |
1002 | host->rcdev.owner = dev->driver->owner; | |
1003 | host->rcdev.nr_resets = 1; | |
1004 | err = devm_reset_controller_register(dev, &host->rcdev); | |
1005 | if (err) { | |
1006 | dev_warn(dev, "Failed to register reset controller\n"); | |
1007 | err = 0; | |
1008 | } | |
1009 | ||
f06fcc71 YG |
1010 | /* |
1011 | * voting/devoting device ref_clk source is time consuming hence | |
1012 | * skip devoting it during aggressive clock gating. This clock | |
1013 | * will still be gated off during runtime suspend. | |
1014 | */ | |
81c0fc51 YG |
1015 | host->generic_phy = devm_phy_get(dev, "ufsphy"); |
1016 | ||
ab436706 YG |
1017 | if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) { |
1018 | /* | |
1019 | * UFS driver might be probed before the phy driver does. | |
1020 | * In that case we would like to return EPROBE_DEFER code. | |
1021 | */ | |
1022 | err = -EPROBE_DEFER; | |
1023 | dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n", | |
1024 | __func__, err); | |
1025 | goto out_variant_clear; | |
1026 | } else if (IS_ERR(host->generic_phy)) { | |
e1a7752c LJ |
1027 | if (has_acpi_companion(dev)) { |
1028 | host->generic_phy = NULL; | |
1029 | } else { | |
1030 | err = PTR_ERR(host->generic_phy); | |
1031 | dev_err(dev, "%s: PHY get failed %d\n", __func__, err); | |
1032 | goto out_variant_clear; | |
1033 | } | |
81c0fc51 YG |
1034 | } |
1035 | ||
b8416b2f BA |
1036 | host->device_reset = devm_gpiod_get_optional(dev, "reset", |
1037 | GPIOD_OUT_HIGH); | |
1038 | if (IS_ERR(host->device_reset)) { | |
1039 | err = PTR_ERR(host->device_reset); | |
1040 | if (err != -EPROBE_DEFER) | |
1041 | dev_err(dev, "failed to acquire reset gpio: %d\n", err); | |
1042 | goto out_variant_clear; | |
1043 | } | |
1044 | ||
bfdbe8ba YG |
1045 | ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, |
1046 | &host->hw_ver.minor, &host->hw_ver.step); | |
1047 | ||
f06fcc71 YG |
1048 | /* |
1049 | * for newer controllers, device reference clock control bit has | |
1050 | * moved inside UFS controller register address space itself. | |
1051 | */ | |
1052 | if (host->hw_ver.major >= 0x02) { | |
1053 | host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; | |
1054 | host->dev_ref_clk_en_mask = BIT(26); | |
1055 | } else { | |
1056 | /* "dev_ref_clk_ctrl_mem" is optional resource */ | |
083dd788 EB |
1057 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
1058 | "dev_ref_clk_ctrl_mem"); | |
f06fcc71 YG |
1059 | if (res) { |
1060 | host->dev_ref_clk_ctrl_mmio = | |
1061 | devm_ioremap_resource(dev, res); | |
790f9a48 | 1062 | if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) |
f06fcc71 | 1063 | host->dev_ref_clk_ctrl_mmio = NULL; |
f06fcc71 YG |
1064 | host->dev_ref_clk_en_mask = BIT(5); |
1065 | } | |
1066 | } | |
1067 | ||
96f08cc5 CG |
1068 | list_for_each_entry(clki, &hba->clk_list_head, list) { |
1069 | if (!strcmp(clki->name, "core_clk_unipro")) | |
1070 | clki->keep_link_active = true; | |
1071 | } | |
1072 | ||
81c0fc51 YG |
1073 | err = ufs_qcom_init_lane_clks(host); |
1074 | if (err) | |
052553af | 1075 | goto out_variant_clear; |
81c0fc51 | 1076 | |
cad2e03d | 1077 | ufs_qcom_set_caps(hba); |
81c0fc51 YG |
1078 | ufs_qcom_advertise_quirks(hba); |
1079 | ||
df4ec2fa EB |
1080 | err = ufs_qcom_ice_init(host); |
1081 | if (err) | |
1082 | goto out_variant_clear; | |
1083 | ||
1e879e8f | 1084 | ufs_qcom_setup_clocks(hba, true, POST_CHANGE); |
81c0fc51 YG |
1085 | |
1086 | if (hba->dev->id < MAX_UFS_QCOM_HOSTS) | |
1087 | ufs_qcom_hosts[hba->dev->id] = host; | |
1088 | ||
6e3fd44d YG |
1089 | host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN; |
1090 | ufs_qcom_get_default_testbus_cfg(host); | |
1091 | err = ufs_qcom_testbus_config(host); | |
1092 | if (err) { | |
1093 | dev_warn(dev, "%s: failed to configure the testbus %d\n", | |
1094 | __func__, err); | |
1095 | err = 0; | |
1096 | } | |
1097 | ||
81c0fc51 YG |
1098 | goto out; |
1099 | ||
a6854dff | 1100 | out_variant_clear: |
1ce5898a | 1101 | ufshcd_set_variant(hba, NULL); |
81c0fc51 YG |
1102 | out: |
1103 | return err; | |
1104 | } | |
1105 | ||
1106 | static void ufs_qcom_exit(struct ufs_hba *hba) | |
1107 | { | |
1ce5898a | 1108 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 YG |
1109 | |
1110 | ufs_qcom_disable_lane_clks(host); | |
1111 | phy_power_off(host->generic_phy); | |
d7fe6b66 | 1112 | phy_exit(host->generic_phy); |
81c0fc51 YG |
1113 | } |
1114 | ||
f06fcc71 YG |
1115 | static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, |
1116 | u32 clk_cycles) | |
1117 | { | |
1118 | int err; | |
1119 | u32 core_clk_ctrl_reg; | |
1120 | ||
1121 | if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK) | |
1122 | return -EINVAL; | |
1123 | ||
1124 | err = ufshcd_dme_get(hba, | |
1125 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), | |
1126 | &core_clk_ctrl_reg); | |
1127 | if (err) | |
1128 | goto out; | |
1129 | ||
1130 | core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK; | |
1131 | core_clk_ctrl_reg |= clk_cycles; | |
1132 | ||
1133 | /* Clear CORE_CLK_DIV_EN */ | |
1134 | core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; | |
1135 | ||
1136 | err = ufshcd_dme_set(hba, | |
1137 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), | |
1138 | core_clk_ctrl_reg); | |
1139 | out: | |
1140 | return err; | |
1141 | } | |
1142 | ||
1143 | static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) | |
1144 | { | |
1145 | /* nothing to do as of now */ | |
1146 | return 0; | |
1147 | } | |
1148 | ||
1149 | static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) | |
1150 | { | |
1151 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
1152 | ||
1153 | if (!ufs_qcom_cap_qunipro(host)) | |
1154 | return 0; | |
1155 | ||
1156 | /* set unipro core clock cycles to 150 and clear clock divider */ | |
1157 | return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); | |
1158 | } | |
1159 | ||
1160 | static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) | |
1161 | { | |
1162 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
1163 | int err; | |
1164 | u32 core_clk_ctrl_reg; | |
1165 | ||
1166 | if (!ufs_qcom_cap_qunipro(host)) | |
1167 | return 0; | |
1168 | ||
1169 | err = ufshcd_dme_get(hba, | |
1170 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), | |
1171 | &core_clk_ctrl_reg); | |
1172 | ||
1173 | /* make sure CORE_CLK_DIV_EN is cleared */ | |
1174 | if (!err && | |
1175 | (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { | |
1176 | core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; | |
1177 | err = ufshcd_dme_set(hba, | |
1178 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), | |
1179 | core_clk_ctrl_reg); | |
1180 | } | |
1181 | ||
1182 | return err; | |
1183 | } | |
1184 | ||
1185 | static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) | |
1186 | { | |
1187 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
1188 | ||
1189 | if (!ufs_qcom_cap_qunipro(host)) | |
1190 | return 0; | |
1191 | ||
1192 | /* set unipro core clock cycles to 75 and clear clock divider */ | |
1193 | return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); | |
1194 | } | |
1195 | ||
1196 | static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, | |
1197 | bool scale_up, enum ufs_notify_change_status status) | |
81c0fc51 | 1198 | { |
1ce5898a | 1199 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
81c0fc51 | 1200 | struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; |
f06fcc71 | 1201 | int err = 0; |
81c0fc51 | 1202 | |
f06fcc71 | 1203 | if (status == PRE_CHANGE) { |
a0cea833 AD |
1204 | err = ufshcd_uic_hibern8_enter(hba); |
1205 | if (err) | |
1206 | return err; | |
f06fcc71 YG |
1207 | if (scale_up) |
1208 | err = ufs_qcom_clk_scale_up_pre_change(hba); | |
1209 | else | |
1210 | err = ufs_qcom_clk_scale_down_pre_change(hba); | |
a0cea833 AD |
1211 | if (err) |
1212 | ufshcd_uic_hibern8_exit(hba); | |
1213 | ||
f06fcc71 YG |
1214 | } else { |
1215 | if (scale_up) | |
1216 | err = ufs_qcom_clk_scale_up_post_change(hba); | |
1217 | else | |
1218 | err = ufs_qcom_clk_scale_down_post_change(hba); | |
1219 | ||
a0cea833 AD |
1220 | |
1221 | if (err || !dev_req_params) { | |
1222 | ufshcd_uic_hibern8_exit(hba); | |
f06fcc71 | 1223 | goto out; |
a0cea833 | 1224 | } |
f06fcc71 YG |
1225 | |
1226 | ufs_qcom_cfg_timers(hba, | |
1227 | dev_req_params->gear_rx, | |
1228 | dev_req_params->pwr_rx, | |
1229 | dev_req_params->hs_rate, | |
1230 | false); | |
a0cea833 | 1231 | ufshcd_uic_hibern8_exit(hba); |
f06fcc71 YG |
1232 | } |
1233 | ||
1234 | out: | |
1235 | return err; | |
6e3fd44d YG |
1236 | } |
1237 | ||
eba5ed35 YG |
1238 | static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, |
1239 | void *priv, void (*print_fn)(struct ufs_hba *hba, | |
ba80917d | 1240 | int offset, int num_regs, const char *str, void *priv)) |
eba5ed35 YG |
1241 | { |
1242 | u32 reg; | |
1243 | struct ufs_qcom_host *host; | |
1244 | ||
1245 | if (unlikely(!hba)) { | |
1246 | pr_err("%s: hba is NULL\n", __func__); | |
1247 | return; | |
1248 | } | |
1249 | if (unlikely(!print_fn)) { | |
1250 | dev_err(hba->dev, "%s: print_fn is NULL\n", __func__); | |
1251 | return; | |
1252 | } | |
1253 | ||
1254 | host = ufshcd_get_variant(hba); | |
1255 | if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN)) | |
1256 | return; | |
1257 | ||
1258 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC); | |
1259 | print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); | |
1260 | ||
1261 | reg = ufshcd_readl(hba, REG_UFS_CFG1); | |
7e014efd | 1262 | reg |= UTP_DBG_RAMS_EN; |
eba5ed35 YG |
1263 | ufshcd_writel(hba, reg, REG_UFS_CFG1); |
1264 | ||
1265 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); | |
1266 | print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv); | |
1267 | ||
1268 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM); | |
1269 | print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv); | |
1270 | ||
1271 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM); | |
1272 | print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); | |
1273 | ||
b84ca6e9 | 1274 | /* clear bit 17 - UTP_DBG_RAMS_EN */ |
7e014efd | 1275 | ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); |
eba5ed35 YG |
1276 | |
1277 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); | |
1278 | print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); | |
1279 | ||
1280 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM); | |
1281 | print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv); | |
1282 | ||
1283 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC); | |
1284 | print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv); | |
1285 | ||
1286 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC); | |
1287 | print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv); | |
1288 | ||
1289 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC); | |
1290 | print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv); | |
1291 | ||
1292 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT); | |
1293 | print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv); | |
1294 | ||
1295 | reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); | |
1296 | print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv); | |
1297 | } | |
1298 | ||
1299 | static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) | |
1300 | { | |
9c46b867 VG |
1301 | if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) { |
1302 | ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, | |
1303 | UFS_REG_TEST_BUS_EN, REG_UFS_CFG1); | |
eba5ed35 | 1304 | ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); |
9c46b867 VG |
1305 | } else { |
1306 | ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1); | |
eba5ed35 | 1307 | ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1); |
9c46b867 | 1308 | } |
eba5ed35 YG |
1309 | } |
1310 | ||
6e3fd44d YG |
1311 | static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) |
1312 | { | |
1313 | /* provide a legal default configuration */ | |
9c46b867 VG |
1314 | host->testbus.select_major = TSTBUS_UNIPRO; |
1315 | host->testbus.select_minor = 37; | |
6e3fd44d YG |
1316 | } |
1317 | ||
1318 | static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) | |
1319 | { | |
1320 | if (host->testbus.select_major >= TSTBUS_MAX) { | |
1321 | dev_err(host->hba->dev, | |
1322 | "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", | |
1323 | __func__, host->testbus.select_major); | |
1324 | return false; | |
1325 | } | |
1326 | ||
6e3fd44d YG |
1327 | return true; |
1328 | } | |
1329 | ||
1330 | int ufs_qcom_testbus_config(struct ufs_qcom_host *host) | |
1331 | { | |
1332 | int reg; | |
1333 | int offset; | |
1334 | u32 mask = TEST_BUS_SUB_SEL_MASK; | |
1335 | ||
1336 | if (!host) | |
1337 | return -EINVAL; | |
81c0fc51 | 1338 | |
6e3fd44d YG |
1339 | if (!ufs_qcom_testbus_cfg_is_ok(host)) |
1340 | return -EPERM; | |
1341 | ||
1342 | switch (host->testbus.select_major) { | |
1343 | case TSTBUS_UAWM: | |
1344 | reg = UFS_TEST_BUS_CTRL_0; | |
1345 | offset = 24; | |
1346 | break; | |
1347 | case TSTBUS_UARM: | |
1348 | reg = UFS_TEST_BUS_CTRL_0; | |
1349 | offset = 16; | |
1350 | break; | |
1351 | case TSTBUS_TXUC: | |
1352 | reg = UFS_TEST_BUS_CTRL_0; | |
1353 | offset = 8; | |
1354 | break; | |
1355 | case TSTBUS_RXUC: | |
1356 | reg = UFS_TEST_BUS_CTRL_0; | |
1357 | offset = 0; | |
1358 | break; | |
1359 | case TSTBUS_DFC: | |
1360 | reg = UFS_TEST_BUS_CTRL_1; | |
1361 | offset = 24; | |
1362 | break; | |
1363 | case TSTBUS_TRLUT: | |
1364 | reg = UFS_TEST_BUS_CTRL_1; | |
1365 | offset = 16; | |
1366 | break; | |
1367 | case TSTBUS_TMRLUT: | |
1368 | reg = UFS_TEST_BUS_CTRL_1; | |
1369 | offset = 8; | |
1370 | break; | |
1371 | case TSTBUS_OCSC: | |
1372 | reg = UFS_TEST_BUS_CTRL_1; | |
1373 | offset = 0; | |
1374 | break; | |
1375 | case TSTBUS_WRAPPER: | |
1376 | reg = UFS_TEST_BUS_CTRL_2; | |
1377 | offset = 16; | |
1378 | break; | |
1379 | case TSTBUS_COMBINED: | |
1380 | reg = UFS_TEST_BUS_CTRL_2; | |
1381 | offset = 8; | |
1382 | break; | |
1383 | case TSTBUS_UTP_HCI: | |
1384 | reg = UFS_TEST_BUS_CTRL_2; | |
1385 | offset = 0; | |
1386 | break; | |
1387 | case TSTBUS_UNIPRO: | |
1388 | reg = UFS_UNIPRO_CFG; | |
9c46b867 VG |
1389 | offset = 20; |
1390 | mask = 0xFFF; | |
6e3fd44d YG |
1391 | break; |
1392 | /* | |
1393 | * No need for a default case, since | |
1394 | * ufs_qcom_testbus_cfg_is_ok() checks that the configuration | |
1395 | * is legal | |
1396 | */ | |
1397 | } | |
1398 | mask <<= offset; | |
6e3fd44d YG |
1399 | ufshcd_rmwl(host->hba, TEST_BUS_SEL, |
1400 | (u32)host->testbus.select_major << 19, | |
1401 | REG_UFS_CFG1); | |
1402 | ufshcd_rmwl(host->hba, mask, | |
1403 | (u32)host->testbus.select_minor << offset, | |
1404 | reg); | |
eba5ed35 | 1405 | ufs_qcom_enable_test_bus(host); |
9c46b867 VG |
1406 | /* |
1407 | * Make sure the test bus configuration is | |
1408 | * committed before returning. | |
1409 | */ | |
1410 | mb(); | |
6e3fd44d YG |
1411 | |
1412 | return 0; | |
81c0fc51 YG |
1413 | } |
1414 | ||
6e3fd44d YG |
1415 | static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) |
1416 | { | |
ba80917d TW |
1417 | ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, |
1418 | "HCI Vendor Specific Registers "); | |
6e3fd44d | 1419 | |
eba5ed35 | 1420 | ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); |
6e3fd44d | 1421 | } |
eba5ed35 | 1422 | |
b8416b2f BA |
1423 | /** |
1424 | * ufs_qcom_device_reset() - toggle the (optional) device reset line | |
1425 | * @hba: per-adapter instance | |
1426 | * | |
1427 | * Toggles the (optional) reset line to reset the attached device. | |
1428 | */ | |
151f1b66 | 1429 | static int ufs_qcom_device_reset(struct ufs_hba *hba) |
b8416b2f BA |
1430 | { |
1431 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); | |
1432 | ||
1433 | /* reset gpio is optional */ | |
1434 | if (!host->device_reset) | |
151f1b66 | 1435 | return -EOPNOTSUPP; |
b8416b2f BA |
1436 | |
1437 | /* | |
1438 | * The UFS device shall detect reset pulses of 1us, sleep for 10us to | |
1439 | * be on the safe side. | |
1440 | */ | |
b61d0414 | 1441 | ufs_qcom_device_reset_ctrl(hba, true); |
b8416b2f BA |
1442 | usleep_range(10, 15); |
1443 | ||
b61d0414 | 1444 | ufs_qcom_device_reset_ctrl(hba, false); |
b8416b2f | 1445 | usleep_range(10, 15); |
151f1b66 AH |
1446 | |
1447 | return 0; | |
b8416b2f BA |
1448 | } |
1449 | ||
80b21006 AD |
1450 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) |
1451 | static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, | |
1452 | struct devfreq_dev_profile *p, | |
1453 | void *data) | |
1454 | { | |
1455 | static struct devfreq_simple_ondemand_data *d; | |
1456 | ||
1457 | if (!data) | |
1458 | return; | |
1459 | ||
1460 | d = (struct devfreq_simple_ondemand_data *)data; | |
1461 | p->polling_ms = 60; | |
1462 | d->upthreshold = 70; | |
1463 | d->downdifferential = 5; | |
1464 | } | |
1465 | #else | |
1466 | static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, | |
1467 | struct devfreq_dev_profile *p, | |
1468 | void *data) | |
1469 | { | |
1470 | } | |
1471 | #endif | |
1472 | ||
bc5b6816 | 1473 | /* |
81c0fc51 YG |
1474 | * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations |
1475 | * | |
1476 | * The variant operations configure the necessary controller and PHY | |
1477 | * handshake during initialization. | |
1478 | */ | |
d508e31d | 1479 | static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { |
81c0fc51 YG |
1480 | .name = "qcom", |
1481 | .init = ufs_qcom_init, | |
1482 | .exit = ufs_qcom_exit, | |
ae977587 | 1483 | .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, |
81c0fc51 YG |
1484 | .clk_scale_notify = ufs_qcom_clk_scale_notify, |
1485 | .setup_clocks = ufs_qcom_setup_clocks, | |
1486 | .hce_enable_notify = ufs_qcom_hce_enable_notify, | |
1487 | .link_startup_notify = ufs_qcom_link_startup_notify, | |
1488 | .pwr_change_notify = ufs_qcom_pwr_change_notify, | |
56d4a186 | 1489 | .apply_dev_quirks = ufs_qcom_apply_dev_quirks, |
81c0fc51 YG |
1490 | .suspend = ufs_qcom_suspend, |
1491 | .resume = ufs_qcom_resume, | |
6e3fd44d | 1492 | .dbg_register_dump = ufs_qcom_dump_dbg_regs, |
b8416b2f | 1493 | .device_reset = ufs_qcom_device_reset, |
80b21006 | 1494 | .config_scaling_param = ufs_qcom_config_scaling_param, |
df4ec2fa | 1495 | .program_key = ufs_qcom_ice_program_key, |
81c0fc51 | 1496 | }; |
fb819ee8 | 1497 | |
47555a5c YG |
1498 | /** |
1499 | * ufs_qcom_probe - probe routine of the driver | |
1500 | * @pdev: pointer to Platform device handle | |
1501 | * | |
1502 | * Return zero for success and non-zero for failure | |
1503 | */ | |
1504 | static int ufs_qcom_probe(struct platform_device *pdev) | |
1505 | { | |
1506 | int err; | |
1507 | struct device *dev = &pdev->dev; | |
1508 | ||
1509 | /* Perform generic probe */ | |
1510 | err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); | |
1511 | if (err) | |
1512 | dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); | |
1513 | ||
1514 | return err; | |
1515 | } | |
1516 | ||
1517 | /** | |
1518 | * ufs_qcom_remove - set driver_data of the device to NULL | |
1519 | * @pdev: pointer to platform device handle | |
1520 | * | |
4b9ad0b8 | 1521 | * Always returns 0 |
47555a5c YG |
1522 | */ |
1523 | static int ufs_qcom_remove(struct platform_device *pdev) | |
1524 | { | |
1525 | struct ufs_hba *hba = platform_get_drvdata(pdev); | |
1526 | ||
1527 | pm_runtime_get_sync(&(pdev)->dev); | |
1528 | ufshcd_remove(hba); | |
1529 | return 0; | |
1530 | } | |
1531 | ||
1532 | static const struct of_device_id ufs_qcom_of_match[] = { | |
1533 | { .compatible = "qcom,ufshc"}, | |
1534 | {}, | |
1535 | }; | |
ab3dabb3 | 1536 | MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); |
47555a5c | 1537 | |
e1a7752c LJ |
1538 | #ifdef CONFIG_ACPI |
1539 | static const struct acpi_device_id ufs_qcom_acpi_match[] = { | |
1540 | { "QCOM24A5" }, | |
1541 | { }, | |
1542 | }; | |
1543 | MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); | |
1544 | #endif | |
1545 | ||
47555a5c | 1546 | static const struct dev_pm_ops ufs_qcom_pm_ops = { |
f1ecbe1e BVA |
1547 | SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) |
1548 | SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) | |
b294ff3e AD |
1549 | .prepare = ufshcd_suspend_prepare, |
1550 | .complete = ufshcd_resume_complete, | |
47555a5c YG |
1551 | }; |
1552 | ||
1553 | static struct platform_driver ufs_qcom_pltform = { | |
1554 | .probe = ufs_qcom_probe, | |
1555 | .remove = ufs_qcom_remove, | |
1556 | .shutdown = ufshcd_pltfrm_shutdown, | |
1557 | .driver = { | |
1558 | .name = "ufshcd-qcom", | |
1559 | .pm = &ufs_qcom_pm_ops, | |
1560 | .of_match_table = of_match_ptr(ufs_qcom_of_match), | |
e1a7752c | 1561 | .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), |
47555a5c YG |
1562 | }, |
1563 | }; | |
1564 | module_platform_driver(ufs_qcom_pltform); | |
1565 | ||
fb819ee8 | 1566 | MODULE_LICENSE("GPL v2"); |