2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/platform_device.h>
18 #define S_DIV_ROUND_UP(n, d) \
19 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
21 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
22 s32 min_result, bool even)
26 v = (tmax - tmin) * percent;
27 v = S_DIV_ROUND_UP(v, 100) + tmin;
28 if (even && (v & 0x1))
29 return max_t(s32, min_result, v - 1);
31 return max_t(s32, min_result, v);
34 static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
35 s32 ui, s32 coeff, s32 pcnt)
37 s32 tmax, tmin, clk_z;
41 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
42 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
45 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
48 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
52 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
53 timing->clk_zero = clk_z + 8 - temp;
56 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
57 const unsigned long bit_rate, const unsigned long esc_rate)
62 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
64 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
65 s32 coeff = 1000; /* Precision, should avoid overflow */
68 if (!bit_rate || !esc_rate)
71 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
72 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
74 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
75 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
76 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
80 timing->hs_rqst = temp;
82 timing->hs_rqst = max_t(s32, 0, temp - 2);
84 /* Calculate clk_zero after clk_prepare and hs_rqst */
85 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
87 temp = 105 * coeff + 12 * ui - 20 * coeff;
88 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
89 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
90 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
92 temp = 85 * coeff + 6 * ui;
93 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
94 temp = 40 * coeff + 4 * ui;
95 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
96 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
99 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
100 temp = 145 * coeff + 10 * ui - temp;
101 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
102 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
104 temp = 105 * coeff + 12 * ui - 20 * coeff;
105 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
106 temp = 60 * coeff + 4 * ui;
107 tmin = DIV_ROUND_UP(temp, ui) - 2;
108 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
111 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
112 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
115 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
116 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
117 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
118 timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
121 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
122 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
123 temp += 8 * ui + lpx;
124 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
126 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
127 timing->clk_pre = temp >> 1;
129 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
136 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
137 timing->clk_pre, timing->clk_post, timing->clk_zero,
138 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
139 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
145 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
148 int phy_id = phy->id;
151 if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
154 val = dsi_phy_read(phy->base + reg);
156 if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
157 dsi_phy_write(phy->base + reg, val | bit_mask);
159 dsi_phy_write(phy->base + reg, val & (~bit_mask));
162 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
164 struct regulator_bulk_data *s = phy->supplies;
165 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
166 struct device *dev = &phy->pdev->dev;
167 int num = phy->cfg->reg_cfg.num;
170 for (i = 0; i < num; i++)
171 s[i].supply = regs[i].name;
173 ret = devm_regulator_bulk_get(dev, num, s);
175 dev_err(dev, "%s: failed to init regulator, ret=%d\n",
183 static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
185 struct regulator_bulk_data *s = phy->supplies;
186 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
187 int num = phy->cfg->reg_cfg.num;
191 for (i = num - 1; i >= 0; i--)
192 if (regs[i].disable_load >= 0)
193 regulator_set_load(s[i].consumer, regs[i].disable_load);
195 regulator_bulk_disable(num, s);
198 static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
200 struct regulator_bulk_data *s = phy->supplies;
201 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
202 struct device *dev = &phy->pdev->dev;
203 int num = phy->cfg->reg_cfg.num;
207 for (i = 0; i < num; i++) {
208 if (regs[i].enable_load >= 0) {
209 ret = regulator_set_load(s[i].consumer,
210 regs[i].enable_load);
213 "regulator %d set op mode failed, %d\n",
220 ret = regulator_bulk_enable(num, s);
222 dev_err(dev, "regulator enable failed, %d\n", ret);
229 for (i--; i >= 0; i--)
230 regulator_set_load(s[i].consumer, regs[i].disable_load);
234 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
236 struct device *dev = &phy->pdev->dev;
239 pm_runtime_get_sync(dev);
241 ret = clk_prepare_enable(phy->ahb_clk);
243 dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
244 pm_runtime_put_sync(dev);
250 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
252 clk_disable_unprepare(phy->ahb_clk);
253 pm_runtime_put_sync(&phy->pdev->dev);
256 static const struct of_device_id dsi_phy_dt_match[] = {
257 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
258 { .compatible = "qcom,dsi-phy-28nm-hpm",
259 .data = &dsi_phy_28nm_hpm_cfgs },
260 { .compatible = "qcom,dsi-phy-28nm-lp",
261 .data = &dsi_phy_28nm_lp_cfgs },
263 #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
264 { .compatible = "qcom,dsi-phy-20nm",
265 .data = &dsi_phy_20nm_cfgs },
267 #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
268 { .compatible = "qcom,dsi-phy-28nm-8960",
269 .data = &dsi_phy_28nm_8960_cfgs },
274 static int dsi_phy_driver_probe(struct platform_device *pdev)
276 struct msm_dsi_phy *phy;
277 struct device *dev = &pdev->dev;
278 const struct of_device_id *match;
281 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
285 match = of_match_node(dsi_phy_dt_match, dev->of_node);
289 phy->cfg = match->data;
292 ret = of_property_read_u32(dev->of_node,
293 "qcom,dsi-phy-index", &phy->id);
295 dev_err(dev, "%s: PHY index not specified, %d\n",
300 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
301 "qcom,dsi-phy-regulator-ldo-mode");
303 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
304 if (IS_ERR(phy->base)) {
305 dev_err(dev, "%s: failed to map phy base\n", __func__);
310 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
312 if (IS_ERR(phy->reg_base)) {
313 dev_err(dev, "%s: failed to map phy regulator base\n",
319 ret = dsi_phy_regulator_init(phy);
321 dev_err(dev, "%s: failed to init regulator\n", __func__);
325 phy->ahb_clk = devm_clk_get(dev, "iface_clk");
326 if (IS_ERR(phy->ahb_clk)) {
327 dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
328 ret = PTR_ERR(phy->ahb_clk);
332 /* PLL init will call into clk_register which requires
333 * register access, so we need to enable power and ahb clock.
335 ret = dsi_phy_enable_resource(phy);
339 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
342 "%s: pll init failed, need separate pll clk driver\n",
345 dsi_phy_disable_resource(phy);
347 platform_set_drvdata(pdev, phy);
355 static int dsi_phy_driver_remove(struct platform_device *pdev)
357 struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
359 if (phy && phy->pll) {
360 msm_dsi_pll_destroy(phy->pll);
364 platform_set_drvdata(pdev, NULL);
369 static struct platform_driver dsi_phy_platform_driver = {
370 .probe = dsi_phy_driver_probe,
371 .remove = dsi_phy_driver_remove,
373 .name = "msm_dsi_phy",
374 .of_match_table = dsi_phy_dt_match,
378 void __init msm_dsi_phy_driver_register(void)
380 platform_driver_register(&dsi_phy_platform_driver);
383 void __exit msm_dsi_phy_driver_unregister(void)
385 platform_driver_unregister(&dsi_phy_platform_driver);
388 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
389 const unsigned long bit_rate, const unsigned long esc_rate)
391 struct device *dev = &phy->pdev->dev;
394 if (!phy || !phy->cfg->ops.enable)
397 ret = dsi_phy_regulator_enable(phy);
399 dev_err(dev, "%s: regulator enable failed, %d\n",
404 ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
406 dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
407 dsi_phy_regulator_disable(phy);
414 void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
416 if (!phy || !phy->cfg->ops.disable)
419 phy->cfg->ops.disable(phy);
421 dsi_phy_regulator_disable(phy);
424 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
425 u32 *clk_pre, u32 *clk_post)
431 *clk_pre = phy->timing.clk_pre;
433 *clk_post = phy->timing.clk_post;
436 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)