Commit | Line | Data |
---|---|---|
adaafaa3 YG |
1 | /* |
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 and | |
6 | * only version 2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include "phy-qcom-ufs-i.h" | |
16 | ||
17 | #define MAX_PROP_NAME 32 | |
18 | #define VDDA_PHY_MIN_UV 1000000 | |
19 | #define VDDA_PHY_MAX_UV 1000000 | |
20 | #define VDDA_PLL_MIN_UV 1800000 | |
21 | #define VDDA_PLL_MAX_UV 1800000 | |
22 | #define VDDP_REF_CLK_MIN_UV 1200000 | |
23 | #define VDDP_REF_CLK_MAX_UV 1200000 | |
24 | ||
adaafaa3 YG |
25 | int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, |
26 | struct ufs_qcom_phy_calibration *tbl_A, | |
27 | int tbl_size_A, | |
28 | struct ufs_qcom_phy_calibration *tbl_B, | |
29 | int tbl_size_B, bool is_rate_B) | |
30 | { | |
31 | int i; | |
32 | int ret = 0; | |
33 | ||
34 | if (!tbl_A) { | |
35 | dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__); | |
36 | ret = EINVAL; | |
37 | goto out; | |
38 | } | |
39 | ||
40 | for (i = 0; i < tbl_size_A; i++) | |
41 | writel_relaxed(tbl_A[i].cfg_value, | |
42 | ufs_qcom_phy->mmio + tbl_A[i].reg_offset); | |
43 | ||
44 | /* | |
45 | * In case we would like to work in rate B, we need | |
46 | * to override a registers that were configured in rate A table | |
47 | * with registers of rate B table. | |
48 | * table. | |
49 | */ | |
50 | if (is_rate_B) { | |
51 | if (!tbl_B) { | |
52 | dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL", | |
53 | __func__); | |
54 | ret = EINVAL; | |
55 | goto out; | |
56 | } | |
57 | ||
58 | for (i = 0; i < tbl_size_B; i++) | |
59 | writel_relaxed(tbl_B[i].cfg_value, | |
60 | ufs_qcom_phy->mmio + tbl_B[i].reg_offset); | |
61 | } | |
62 | ||
63 | /* flush buffered writes */ | |
64 | mb(); | |
65 | ||
66 | out: | |
67 | return ret; | |
68 | } | |
358d6c87 | 69 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate); |
adaafaa3 | 70 | |
adaafaa3 YG |
71 | /* |
72 | * This assumes the embedded phy structure inside generic_phy is of type | |
73 | * struct ufs_qcom_phy. In order to function properly it's crucial | |
74 | * to keep the embedded struct "struct ufs_qcom_phy common_cfg" | |
75 | * as the first inside generic_phy. | |
76 | */ | |
77 | struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy) | |
78 | { | |
79 | return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy); | |
80 | } | |
358d6c87 | 81 | EXPORT_SYMBOL_GPL(get_ufs_qcom_phy); |
adaafaa3 YG |
82 | |
83 | static | |
84 | int ufs_qcom_phy_base_init(struct platform_device *pdev, | |
85 | struct ufs_qcom_phy *phy_common) | |
86 | { | |
87 | struct device *dev = &pdev->dev; | |
88 | struct resource *res; | |
89 | int err = 0; | |
90 | ||
91 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem"); | |
adaafaa3 YG |
92 | phy_common->mmio = devm_ioremap_resource(dev, res); |
93 | if (IS_ERR((void const *)phy_common->mmio)) { | |
94 | err = PTR_ERR((void const *)phy_common->mmio); | |
95 | phy_common->mmio = NULL; | |
96 | dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n", | |
97 | __func__, err); | |
52ea796b | 98 | return err; |
adaafaa3 YG |
99 | } |
100 | ||
101 | /* "dev_ref_clk_ctrl_mem" is optional resource */ | |
102 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | |
103 | "dev_ref_clk_ctrl_mem"); | |
adaafaa3 | 104 | phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res); |
52ea796b | 105 | if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) |
adaafaa3 | 106 | phy_common->dev_ref_clk_ctrl_mmio = NULL; |
adaafaa3 | 107 | |
52ea796b | 108 | return 0; |
adaafaa3 YG |
109 | } |
110 | ||
15887cb8 VG |
111 | struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, |
112 | struct ufs_qcom_phy *common_cfg, | |
113 | const struct phy_ops *ufs_qcom_phy_gen_ops, | |
114 | struct ufs_qcom_phy_specific_ops *phy_spec_ops) | |
115 | { | |
116 | int err; | |
117 | struct device *dev = &pdev->dev; | |
118 | struct phy *generic_phy = NULL; | |
119 | struct phy_provider *phy_provider; | |
120 | ||
121 | err = ufs_qcom_phy_base_init(pdev, common_cfg); | |
122 | if (err) { | |
123 | dev_err(dev, "%s: phy base init failed %d\n", __func__, err); | |
124 | goto out; | |
125 | } | |
126 | ||
127 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | |
128 | if (IS_ERR(phy_provider)) { | |
129 | err = PTR_ERR(phy_provider); | |
130 | dev_err(dev, "%s: failed to register phy %d\n", __func__, err); | |
131 | goto out; | |
132 | } | |
133 | ||
134 | generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); | |
135 | if (IS_ERR(generic_phy)) { | |
136 | err = PTR_ERR(generic_phy); | |
137 | dev_err(dev, "%s: failed to create phy %d\n", __func__, err); | |
138 | generic_phy = NULL; | |
139 | goto out; | |
140 | } | |
141 | ||
142 | common_cfg->phy_spec_ops = phy_spec_ops; | |
143 | common_cfg->dev = dev; | |
144 | ||
145 | out: | |
146 | return generic_phy; | |
147 | } | |
148 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe); | |
149 | ||
89bd296b | 150 | static int __ufs_qcom_phy_clk_get(struct device *dev, |
adaafaa3 YG |
151 | const char *name, struct clk **clk_out, bool err_print) |
152 | { | |
153 | struct clk *clk; | |
154 | int err = 0; | |
adaafaa3 YG |
155 | |
156 | clk = devm_clk_get(dev, name); | |
157 | if (IS_ERR(clk)) { | |
158 | err = PTR_ERR(clk); | |
159 | if (err_print) | |
160 | dev_err(dev, "failed to get %s err %d", name, err); | |
161 | } else { | |
162 | *clk_out = clk; | |
163 | } | |
164 | ||
165 | return err; | |
166 | } | |
167 | ||
89bd296b | 168 | static int ufs_qcom_phy_clk_get(struct device *dev, |
adaafaa3 YG |
169 | const char *name, struct clk **clk_out) |
170 | { | |
89bd296b | 171 | return __ufs_qcom_phy_clk_get(dev, name, clk_out, true); |
adaafaa3 YG |
172 | } |
173 | ||
89bd296b | 174 | int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common) |
adaafaa3 YG |
175 | { |
176 | int err; | |
177 | ||
300f9677 VG |
178 | if (of_device_is_compatible(phy_common->dev->of_node, |
179 | "qcom,msm8996-ufs-phy-qmp-14nm")) | |
180 | goto skip_txrx_clk; | |
181 | ||
89bd296b | 182 | err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk", |
adaafaa3 YG |
183 | &phy_common->tx_iface_clk); |
184 | if (err) | |
185 | goto out; | |
186 | ||
89bd296b | 187 | err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk", |
adaafaa3 YG |
188 | &phy_common->rx_iface_clk); |
189 | if (err) | |
190 | goto out; | |
191 | ||
0b10f64d | 192 | skip_txrx_clk: |
89bd296b | 193 | err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src", |
adaafaa3 YG |
194 | &phy_common->ref_clk_src); |
195 | if (err) | |
196 | goto out; | |
197 | ||
198 | /* | |
199 | * "ref_clk_parent" is optional hence don't abort init if it's not | |
200 | * found. | |
201 | */ | |
89bd296b | 202 | __ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent", |
adaafaa3 YG |
203 | &phy_common->ref_clk_parent, false); |
204 | ||
89bd296b | 205 | err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk", |
adaafaa3 YG |
206 | &phy_common->ref_clk); |
207 | ||
208 | out: | |
209 | return err; | |
210 | } | |
358d6c87 | 211 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks); |
adaafaa3 | 212 | |
3471426f BA |
213 | static int ufs_qcom_phy_init_vreg(struct device *dev, |
214 | struct ufs_qcom_phy_vreg *vreg, | |
215 | const char *name) | |
adaafaa3 YG |
216 | { |
217 | int err = 0; | |
adaafaa3 YG |
218 | |
219 | char prop_name[MAX_PROP_NAME]; | |
220 | ||
e7d5e412 | 221 | vreg->name = name; |
adaafaa3 YG |
222 | vreg->reg = devm_regulator_get(dev, name); |
223 | if (IS_ERR(vreg->reg)) { | |
224 | err = PTR_ERR(vreg->reg); | |
3471426f | 225 | dev_err(dev, "failed to get %s, %d\n", name, err); |
adaafaa3 YG |
226 | goto out; |
227 | } | |
228 | ||
229 | if (dev->of_node) { | |
230 | snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name); | |
231 | err = of_property_read_u32(dev->of_node, | |
232 | prop_name, &vreg->max_uA); | |
233 | if (err && err != -EINVAL) { | |
234 | dev_err(dev, "%s: failed to read %s\n", | |
235 | __func__, prop_name); | |
236 | goto out; | |
237 | } else if (err == -EINVAL || !vreg->max_uA) { | |
238 | if (regulator_count_voltages(vreg->reg) > 0) { | |
239 | dev_err(dev, "%s: %s is mandatory\n", | |
240 | __func__, prop_name); | |
241 | goto out; | |
242 | } | |
243 | err = 0; | |
244 | } | |
adaafaa3 YG |
245 | } |
246 | ||
247 | if (!strcmp(name, "vdda-pll")) { | |
248 | vreg->max_uV = VDDA_PLL_MAX_UV; | |
249 | vreg->min_uV = VDDA_PLL_MIN_UV; | |
250 | } else if (!strcmp(name, "vdda-phy")) { | |
251 | vreg->max_uV = VDDA_PHY_MAX_UV; | |
252 | vreg->min_uV = VDDA_PHY_MIN_UV; | |
253 | } else if (!strcmp(name, "vddp-ref-clk")) { | |
254 | vreg->max_uV = VDDP_REF_CLK_MAX_UV; | |
255 | vreg->min_uV = VDDP_REF_CLK_MIN_UV; | |
256 | } | |
257 | ||
258 | out: | |
adaafaa3 YG |
259 | return err; |
260 | } | |
261 | ||
15887cb8 VG |
262 | int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common) |
263 | { | |
264 | int err; | |
265 | ||
266 | err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll, | |
267 | "vdda-pll"); | |
268 | if (err) | |
269 | goto out; | |
270 | ||
271 | err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy, | |
272 | "vdda-phy"); | |
273 | ||
274 | if (err) | |
275 | goto out; | |
276 | ||
3471426f BA |
277 | err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk, |
278 | "vddp-ref-clk"); | |
279 | ||
15887cb8 VG |
280 | out: |
281 | return err; | |
282 | } | |
283 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators); | |
284 | ||
89bd296b | 285 | static int ufs_qcom_phy_cfg_vreg(struct device *dev, |
adaafaa3 YG |
286 | struct ufs_qcom_phy_vreg *vreg, bool on) |
287 | { | |
288 | int ret = 0; | |
289 | struct regulator *reg = vreg->reg; | |
290 | const char *name = vreg->name; | |
291 | int min_uV; | |
292 | int uA_load; | |
adaafaa3 | 293 | |
adaafaa3 YG |
294 | if (regulator_count_voltages(reg) > 0) { |
295 | min_uV = on ? vreg->min_uV : 0; | |
296 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | |
297 | if (ret) { | |
298 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | |
299 | __func__, name, ret); | |
300 | goto out; | |
301 | } | |
302 | uA_load = on ? vreg->max_uA : 0; | |
7e476c7d | 303 | ret = regulator_set_load(reg, uA_load); |
adaafaa3 YG |
304 | if (ret >= 0) { |
305 | /* | |
7e476c7d | 306 | * regulator_set_load() returns new regulator |
adaafaa3 YG |
307 | * mode upon success. |
308 | */ | |
309 | ret = 0; | |
310 | } else { | |
311 | dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", | |
312 | __func__, name, uA_load, ret); | |
313 | goto out; | |
314 | } | |
315 | } | |
316 | out: | |
317 | return ret; | |
318 | } | |
319 | ||
89bd296b | 320 | static int ufs_qcom_phy_enable_vreg(struct device *dev, |
adaafaa3 YG |
321 | struct ufs_qcom_phy_vreg *vreg) |
322 | { | |
adaafaa3 YG |
323 | int ret = 0; |
324 | ||
325 | if (!vreg || vreg->enabled) | |
326 | goto out; | |
327 | ||
89bd296b | 328 | ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true); |
adaafaa3 YG |
329 | if (ret) { |
330 | dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n", | |
331 | __func__, ret); | |
332 | goto out; | |
333 | } | |
334 | ||
335 | ret = regulator_enable(vreg->reg); | |
336 | if (ret) { | |
337 | dev_err(dev, "%s: enable failed, err=%d\n", | |
338 | __func__, ret); | |
339 | goto out; | |
340 | } | |
341 | ||
342 | vreg->enabled = true; | |
343 | out: | |
344 | return ret; | |
345 | } | |
346 | ||
feb3d798 | 347 | static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy) |
adaafaa3 YG |
348 | { |
349 | int ret = 0; | |
adaafaa3 YG |
350 | |
351 | if (phy->is_ref_clk_enabled) | |
352 | goto out; | |
353 | ||
354 | /* | |
355 | * reference clock is propagated in a daisy-chained manner from | |
356 | * source to phy, so ungate them at each stage. | |
357 | */ | |
358 | ret = clk_prepare_enable(phy->ref_clk_src); | |
359 | if (ret) { | |
360 | dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n", | |
361 | __func__, ret); | |
362 | goto out; | |
363 | } | |
364 | ||
365 | /* | |
366 | * "ref_clk_parent" is optional clock hence make sure that clk reference | |
367 | * is available before trying to enable the clock. | |
368 | */ | |
369 | if (phy->ref_clk_parent) { | |
370 | ret = clk_prepare_enable(phy->ref_clk_parent); | |
371 | if (ret) { | |
372 | dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n", | |
373 | __func__, ret); | |
374 | goto out_disable_src; | |
375 | } | |
376 | } | |
377 | ||
378 | ret = clk_prepare_enable(phy->ref_clk); | |
379 | if (ret) { | |
380 | dev_err(phy->dev, "%s: ref_clk enable failed %d\n", | |
381 | __func__, ret); | |
382 | goto out_disable_parent; | |
383 | } | |
384 | ||
385 | phy->is_ref_clk_enabled = true; | |
386 | goto out; | |
387 | ||
388 | out_disable_parent: | |
389 | if (phy->ref_clk_parent) | |
390 | clk_disable_unprepare(phy->ref_clk_parent); | |
391 | out_disable_src: | |
392 | clk_disable_unprepare(phy->ref_clk_src); | |
393 | out: | |
394 | return ret; | |
395 | } | |
396 | ||
89bd296b | 397 | static int ufs_qcom_phy_disable_vreg(struct device *dev, |
adaafaa3 YG |
398 | struct ufs_qcom_phy_vreg *vreg) |
399 | { | |
adaafaa3 YG |
400 | int ret = 0; |
401 | ||
96c163f1 | 402 | if (!vreg || !vreg->enabled) |
adaafaa3 YG |
403 | goto out; |
404 | ||
405 | ret = regulator_disable(vreg->reg); | |
406 | ||
407 | if (!ret) { | |
408 | /* ignore errors on applying disable config */ | |
89bd296b | 409 | ufs_qcom_phy_cfg_vreg(dev, vreg, false); |
adaafaa3 YG |
410 | vreg->enabled = false; |
411 | } else { | |
412 | dev_err(dev, "%s: %s disable failed, err=%d\n", | |
413 | __func__, vreg->name, ret); | |
414 | } | |
415 | out: | |
416 | return ret; | |
417 | } | |
418 | ||
feb3d798 | 419 | static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy) |
adaafaa3 | 420 | { |
adaafaa3 YG |
421 | if (phy->is_ref_clk_enabled) { |
422 | clk_disable_unprepare(phy->ref_clk); | |
423 | /* | |
424 | * "ref_clk_parent" is optional clock hence make sure that clk | |
425 | * reference is available before trying to disable the clock. | |
426 | */ | |
427 | if (phy->ref_clk_parent) | |
428 | clk_disable_unprepare(phy->ref_clk_parent); | |
429 | clk_disable_unprepare(phy->ref_clk_src); | |
430 | phy->is_ref_clk_enabled = false; | |
431 | } | |
432 | } | |
433 | ||
434 | #define UFS_REF_CLK_EN (1 << 5) | |
435 | ||
436 | static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable) | |
437 | { | |
438 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | |
439 | ||
440 | if (phy->dev_ref_clk_ctrl_mmio && | |
441 | (enable ^ phy->is_dev_ref_clk_enabled)) { | |
442 | u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio); | |
443 | ||
444 | if (enable) | |
445 | temp |= UFS_REF_CLK_EN; | |
446 | else | |
447 | temp &= ~UFS_REF_CLK_EN; | |
448 | ||
449 | /* | |
450 | * If we are here to disable this clock immediately after | |
451 | * entering into hibern8, we need to make sure that device | |
452 | * ref_clk is active atleast 1us after the hibern8 enter. | |
453 | */ | |
454 | if (!enable) | |
455 | udelay(1); | |
456 | ||
457 | writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio); | |
458 | /* ensure that ref_clk is enabled/disabled before we return */ | |
459 | wmb(); | |
460 | /* | |
461 | * If we call hibern8 exit after this, we need to make sure that | |
462 | * device ref_clk is stable for atleast 1us before the hibern8 | |
463 | * exit command. | |
464 | */ | |
465 | if (enable) | |
466 | udelay(1); | |
467 | ||
468 | phy->is_dev_ref_clk_enabled = enable; | |
469 | } | |
470 | } | |
471 | ||
472 | void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) | |
473 | { | |
474 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); | |
475 | } | |
65d49b3d | 476 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); |
adaafaa3 YG |
477 | |
478 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) | |
479 | { | |
480 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); | |
481 | } | |
65d49b3d | 482 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); |
adaafaa3 YG |
483 | |
484 | /* Turn ON M-PHY RMMI interface clocks */ | |
feb3d798 | 485 | static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy) |
adaafaa3 | 486 | { |
adaafaa3 YG |
487 | int ret = 0; |
488 | ||
489 | if (phy->is_iface_clk_enabled) | |
490 | goto out; | |
491 | ||
492 | ret = clk_prepare_enable(phy->tx_iface_clk); | |
493 | if (ret) { | |
494 | dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n", | |
495 | __func__, ret); | |
496 | goto out; | |
497 | } | |
498 | ret = clk_prepare_enable(phy->rx_iface_clk); | |
499 | if (ret) { | |
500 | clk_disable_unprepare(phy->tx_iface_clk); | |
501 | dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n", | |
502 | __func__, ret); | |
503 | goto out; | |
504 | } | |
505 | phy->is_iface_clk_enabled = true; | |
506 | ||
507 | out: | |
508 | return ret; | |
509 | } | |
510 | ||
511 | /* Turn OFF M-PHY RMMI interface clocks */ | |
feb3d798 | 512 | void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy) |
adaafaa3 | 513 | { |
adaafaa3 YG |
514 | if (phy->is_iface_clk_enabled) { |
515 | clk_disable_unprepare(phy->tx_iface_clk); | |
516 | clk_disable_unprepare(phy->rx_iface_clk); | |
517 | phy->is_iface_clk_enabled = false; | |
518 | } | |
519 | } | |
520 | ||
052553af | 521 | static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy) |
adaafaa3 | 522 | { |
adaafaa3 YG |
523 | int ret = 0; |
524 | ||
525 | if (!ufs_qcom_phy->phy_spec_ops->start_serdes) { | |
526 | dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n", | |
527 | __func__); | |
528 | ret = -ENOTSUPP; | |
529 | } else { | |
530 | ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy); | |
531 | } | |
532 | ||
533 | return ret; | |
534 | } | |
535 | ||
536 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) | |
537 | { | |
538 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
539 | int ret = 0; | |
540 | ||
541 | if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) { | |
542 | dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n", | |
543 | __func__); | |
544 | ret = -ENOTSUPP; | |
545 | } else { | |
546 | ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy, | |
547 | tx_lanes); | |
548 | } | |
549 | ||
550 | return ret; | |
551 | } | |
65d49b3d | 552 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); |
adaafaa3 YG |
553 | |
554 | void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, | |
555 | u8 major, u16 minor, u16 step) | |
556 | { | |
557 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | |
558 | ||
559 | ufs_qcom_phy->host_ctrl_rev_major = major; | |
560 | ufs_qcom_phy->host_ctrl_rev_minor = minor; | |
561 | ufs_qcom_phy->host_ctrl_rev_step = step; | |
562 | } | |
65d49b3d | 563 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); |
adaafaa3 | 564 | |
052553af | 565 | static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy) |
adaafaa3 | 566 | { |
adaafaa3 YG |
567 | if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) { |
568 | dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n", | |
569 | __func__); | |
570 | return -ENOTSUPP; | |
571 | } | |
572 | ||
573 | return ufs_qcom_phy->phy_spec_ops-> | |
574 | is_physical_coding_sublayer_ready(ufs_qcom_phy); | |
575 | } | |
576 | ||
577 | int ufs_qcom_phy_power_on(struct phy *generic_phy) | |
578 | { | |
579 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | |
580 | struct device *dev = phy_common->dev; | |
581 | int err; | |
582 | ||
3d4640f1 VG |
583 | if (phy_common->is_powered_on) |
584 | return 0; | |
585 | ||
052553af VG |
586 | if (!phy_common->is_started) { |
587 | err = ufs_qcom_phy_start_serdes(phy_common); | |
588 | if (err) | |
589 | return err; | |
590 | ||
591 | err = ufs_qcom_phy_is_pcs_ready(phy_common); | |
592 | if (err) | |
593 | return err; | |
594 | ||
595 | phy_common->is_started = true; | |
596 | } | |
597 | ||
89bd296b | 598 | err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy); |
adaafaa3 YG |
599 | if (err) { |
600 | dev_err(dev, "%s enable vdda_phy failed, err=%d\n", | |
601 | __func__, err); | |
602 | goto out; | |
603 | } | |
604 | ||
605 | phy_common->phy_spec_ops->power_control(phy_common, true); | |
606 | ||
607 | /* vdda_pll also enables ref clock LDOs so enable it first */ | |
89bd296b | 608 | err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll); |
adaafaa3 YG |
609 | if (err) { |
610 | dev_err(dev, "%s enable vdda_pll failed, err=%d\n", | |
611 | __func__, err); | |
612 | goto out_disable_phy; | |
613 | } | |
614 | ||
feb3d798 | 615 | err = ufs_qcom_phy_enable_iface_clk(phy_common); |
adaafaa3 | 616 | if (err) { |
feb3d798 | 617 | dev_err(dev, "%s enable phy iface clock failed, err=%d\n", |
adaafaa3 YG |
618 | __func__, err); |
619 | goto out_disable_pll; | |
620 | } | |
621 | ||
feb3d798 VG |
622 | err = ufs_qcom_phy_enable_ref_clk(phy_common); |
623 | if (err) { | |
624 | dev_err(dev, "%s enable phy ref clock failed, err=%d\n", | |
625 | __func__, err); | |
626 | goto out_disable_iface_clk; | |
627 | } | |
628 | ||
adaafaa3 YG |
629 | /* enable device PHY ref_clk pad rail */ |
630 | if (phy_common->vddp_ref_clk.reg) { | |
89bd296b | 631 | err = ufs_qcom_phy_enable_vreg(dev, |
adaafaa3 YG |
632 | &phy_common->vddp_ref_clk); |
633 | if (err) { | |
634 | dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n", | |
635 | __func__, err); | |
636 | goto out_disable_ref_clk; | |
637 | } | |
638 | } | |
639 | ||
640 | phy_common->is_powered_on = true; | |
641 | goto out; | |
642 | ||
643 | out_disable_ref_clk: | |
feb3d798 VG |
644 | ufs_qcom_phy_disable_ref_clk(phy_common); |
645 | out_disable_iface_clk: | |
646 | ufs_qcom_phy_disable_iface_clk(phy_common); | |
adaafaa3 | 647 | out_disable_pll: |
89bd296b | 648 | ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll); |
adaafaa3 | 649 | out_disable_phy: |
89bd296b | 650 | ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy); |
adaafaa3 YG |
651 | out: |
652 | return err; | |
653 | } | |
358d6c87 | 654 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on); |
adaafaa3 YG |
655 | |
656 | int ufs_qcom_phy_power_off(struct phy *generic_phy) | |
657 | { | |
658 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | |
659 | ||
3d4640f1 VG |
660 | if (!phy_common->is_powered_on) |
661 | return 0; | |
662 | ||
adaafaa3 YG |
663 | phy_common->phy_spec_ops->power_control(phy_common, false); |
664 | ||
665 | if (phy_common->vddp_ref_clk.reg) | |
89bd296b | 666 | ufs_qcom_phy_disable_vreg(phy_common->dev, |
adaafaa3 | 667 | &phy_common->vddp_ref_clk); |
feb3d798 VG |
668 | ufs_qcom_phy_disable_ref_clk(phy_common); |
669 | ufs_qcom_phy_disable_iface_clk(phy_common); | |
adaafaa3 | 670 | |
89bd296b VG |
671 | ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll); |
672 | ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy); | |
adaafaa3 YG |
673 | phy_common->is_powered_on = false; |
674 | ||
675 | return 0; | |
676 | } | |
358d6c87 | 677 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); |
59fba086 AB |
678 | |
679 | MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>"); | |
680 | MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>"); | |
681 | MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY"); | |
682 | MODULE_LICENSE("GPL v2"); |