Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f76ee892 TV |
2 | /* |
3 | * linux/drivers/video/omap2/dss/dsi.c | |
4 | * | |
5 | * Copyright (C) 2009 Nokia Corporation | |
6 | * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> | |
f76ee892 TV |
7 | */ |
8 | ||
9 | #define DSS_SUBSYS_NAME "DSI" | |
10 | ||
11 | #include <linux/kernel.h> | |
12 | #include <linux/io.h> | |
13 | #include <linux/clk.h> | |
14 | #include <linux/device.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/mutex.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/semaphore.h> | |
21 | #include <linux/seq_file.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/regulator/consumer.h> | |
24 | #include <linux/wait.h> | |
25 | #include <linux/workqueue.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/debugfs.h> | |
29 | #include <linux/pm_runtime.h> | |
30 | #include <linux/of.h> | |
31 | #include <linux/of_platform.h> | |
32 | #include <linux/component.h> | |
33 | ||
62d9e44e | 34 | #include <video/omapfb_dss.h> |
f76ee892 TV |
35 | #include <video/mipi_display.h> |
36 | ||
37 | #include "dss.h" | |
38 | #include "dss_features.h" | |
39 | ||
40 | #define DSI_CATCH_MISSING_TE | |
41 | ||
42 | struct dsi_reg { u16 module; u16 idx; }; | |
43 | ||
44 | #define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx }) | |
45 | ||
46 | /* DSI Protocol Engine */ | |
47 | ||
48 | #define DSI_PROTO 0 | |
49 | #define DSI_PROTO_SZ 0x200 | |
50 | ||
51 | #define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000) | |
52 | #define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010) | |
53 | #define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014) | |
54 | #define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018) | |
55 | #define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C) | |
56 | #define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040) | |
57 | #define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044) | |
58 | #define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048) | |
59 | #define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C) | |
60 | #define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050) | |
61 | #define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054) | |
62 | #define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058) | |
63 | #define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C) | |
64 | #define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060) | |
65 | #define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064) | |
66 | #define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068) | |
67 | #define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C) | |
68 | #define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070) | |
69 | #define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074) | |
70 | #define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078) | |
71 | #define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C) | |
72 | #define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080) | |
73 | #define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084) | |
74 | #define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088) | |
75 | #define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C) | |
76 | #define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090) | |
77 | #define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094) | |
78 | #define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20)) | |
79 | #define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20)) | |
80 | #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20)) | |
81 | #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20)) | |
82 | #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20)) | |
83 | #define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20)) | |
84 | #define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20)) | |
85 | ||
86 | /* DSIPHY_SCP */ | |
87 | ||
88 | #define DSI_PHY 1 | |
89 | #define DSI_PHY_OFFSET 0x200 | |
90 | #define DSI_PHY_SZ 0x40 | |
91 | ||
92 | #define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000) | |
93 | #define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004) | |
94 | #define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008) | |
95 | #define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014) | |
96 | #define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028) | |
97 | ||
98 | /* DSI_PLL_CTRL_SCP */ | |
99 | ||
100 | #define DSI_PLL 2 | |
101 | #define DSI_PLL_OFFSET 0x300 | |
102 | #define DSI_PLL_SZ 0x20 | |
103 | ||
104 | #define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000) | |
105 | #define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004) | |
106 | #define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008) | |
107 | #define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C) | |
108 | #define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010) | |
109 | ||
110 | #define REG_GET(dsidev, idx, start, end) \ | |
111 | FLD_GET(dsi_read_reg(dsidev, idx), start, end) | |
112 | ||
113 | #define REG_FLD_MOD(dsidev, idx, val, start, end) \ | |
114 | dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end)) | |
115 | ||
116 | /* Global interrupts */ | |
117 | #define DSI_IRQ_VC0 (1 << 0) | |
118 | #define DSI_IRQ_VC1 (1 << 1) | |
119 | #define DSI_IRQ_VC2 (1 << 2) | |
120 | #define DSI_IRQ_VC3 (1 << 3) | |
121 | #define DSI_IRQ_WAKEUP (1 << 4) | |
122 | #define DSI_IRQ_RESYNC (1 << 5) | |
123 | #define DSI_IRQ_PLL_LOCK (1 << 7) | |
124 | #define DSI_IRQ_PLL_UNLOCK (1 << 8) | |
125 | #define DSI_IRQ_PLL_RECALL (1 << 9) | |
126 | #define DSI_IRQ_COMPLEXIO_ERR (1 << 10) | |
127 | #define DSI_IRQ_HS_TX_TIMEOUT (1 << 14) | |
128 | #define DSI_IRQ_LP_RX_TIMEOUT (1 << 15) | |
129 | #define DSI_IRQ_TE_TRIGGER (1 << 16) | |
130 | #define DSI_IRQ_ACK_TRIGGER (1 << 17) | |
131 | #define DSI_IRQ_SYNC_LOST (1 << 18) | |
132 | #define DSI_IRQ_LDO_POWER_GOOD (1 << 19) | |
133 | #define DSI_IRQ_TA_TIMEOUT (1 << 20) | |
134 | #define DSI_IRQ_ERROR_MASK \ | |
135 | (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \ | |
136 | DSI_IRQ_TA_TIMEOUT) | |
137 | #define DSI_IRQ_CHANNEL_MASK 0xf | |
138 | ||
139 | /* Virtual channel interrupts */ | |
140 | #define DSI_VC_IRQ_CS (1 << 0) | |
141 | #define DSI_VC_IRQ_ECC_CORR (1 << 1) | |
142 | #define DSI_VC_IRQ_PACKET_SENT (1 << 2) | |
143 | #define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3) | |
144 | #define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4) | |
145 | #define DSI_VC_IRQ_BTA (1 << 5) | |
146 | #define DSI_VC_IRQ_ECC_NO_CORR (1 << 6) | |
147 | #define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7) | |
148 | #define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8) | |
149 | #define DSI_VC_IRQ_ERROR_MASK \ | |
150 | (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \ | |
151 | DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \ | |
152 | DSI_VC_IRQ_FIFO_TX_UDF) | |
153 | ||
154 | /* ComplexIO interrupts */ | |
155 | #define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0) | |
156 | #define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1) | |
157 | #define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2) | |
158 | #define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3) | |
159 | #define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4) | |
160 | #define DSI_CIO_IRQ_ERRESC1 (1 << 5) | |
161 | #define DSI_CIO_IRQ_ERRESC2 (1 << 6) | |
162 | #define DSI_CIO_IRQ_ERRESC3 (1 << 7) | |
163 | #define DSI_CIO_IRQ_ERRESC4 (1 << 8) | |
164 | #define DSI_CIO_IRQ_ERRESC5 (1 << 9) | |
165 | #define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10) | |
166 | #define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11) | |
167 | #define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12) | |
168 | #define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13) | |
169 | #define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14) | |
170 | #define DSI_CIO_IRQ_STATEULPS1 (1 << 15) | |
171 | #define DSI_CIO_IRQ_STATEULPS2 (1 << 16) | |
172 | #define DSI_CIO_IRQ_STATEULPS3 (1 << 17) | |
173 | #define DSI_CIO_IRQ_STATEULPS4 (1 << 18) | |
174 | #define DSI_CIO_IRQ_STATEULPS5 (1 << 19) | |
175 | #define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20) | |
176 | #define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21) | |
177 | #define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22) | |
178 | #define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23) | |
179 | #define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24) | |
180 | #define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) | |
181 | #define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26) | |
182 | #define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27) | |
183 | #define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28) | |
184 | #define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29) | |
185 | #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) | |
186 | #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) | |
187 | #define DSI_CIO_IRQ_ERROR_MASK \ | |
188 | (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \ | |
189 | DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \ | |
190 | DSI_CIO_IRQ_ERRSYNCESC5 | \ | |
191 | DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \ | |
192 | DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \ | |
193 | DSI_CIO_IRQ_ERRESC5 | \ | |
194 | DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \ | |
195 | DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \ | |
196 | DSI_CIO_IRQ_ERRCONTROL5 | \ | |
197 | DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \ | |
198 | DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \ | |
199 | DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \ | |
200 | DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \ | |
201 | DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5) | |
202 | ||
203 | typedef void (*omap_dsi_isr_t) (void *arg, u32 mask); | |
204 | ||
205 | static int dsi_display_init_dispc(struct platform_device *dsidev, | |
206 | struct omap_overlay_manager *mgr); | |
207 | static void dsi_display_uninit_dispc(struct platform_device *dsidev, | |
208 | struct omap_overlay_manager *mgr); | |
209 | ||
210 | static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel); | |
211 | ||
212 | /* DSI PLL HSDIV indices */ | |
213 | #define HSDIV_DISPC 0 | |
214 | #define HSDIV_DSI 1 | |
215 | ||
216 | #define DSI_MAX_NR_ISRS 2 | |
217 | #define DSI_MAX_NR_LANES 5 | |
218 | ||
219 | enum dsi_lane_function { | |
220 | DSI_LANE_UNUSED = 0, | |
221 | DSI_LANE_CLK, | |
222 | DSI_LANE_DATA1, | |
223 | DSI_LANE_DATA2, | |
224 | DSI_LANE_DATA3, | |
225 | DSI_LANE_DATA4, | |
226 | }; | |
227 | ||
228 | struct dsi_lane_config { | |
229 | enum dsi_lane_function function; | |
230 | u8 polarity; | |
231 | }; | |
232 | ||
233 | struct dsi_isr_data { | |
234 | omap_dsi_isr_t isr; | |
235 | void *arg; | |
236 | u32 mask; | |
237 | }; | |
238 | ||
239 | enum fifo_size { | |
240 | DSI_FIFO_SIZE_0 = 0, | |
241 | DSI_FIFO_SIZE_32 = 1, | |
242 | DSI_FIFO_SIZE_64 = 2, | |
243 | DSI_FIFO_SIZE_96 = 3, | |
244 | DSI_FIFO_SIZE_128 = 4, | |
245 | }; | |
246 | ||
247 | enum dsi_vc_source { | |
248 | DSI_VC_SOURCE_L4 = 0, | |
249 | DSI_VC_SOURCE_VP, | |
250 | }; | |
251 | ||
252 | struct dsi_irq_stats { | |
253 | unsigned long last_reset; | |
254 | unsigned irq_count; | |
255 | unsigned dsi_irqs[32]; | |
256 | unsigned vc_irqs[4][32]; | |
257 | unsigned cio_irqs[32]; | |
258 | }; | |
259 | ||
260 | struct dsi_isr_tables { | |
261 | struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS]; | |
262 | struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS]; | |
263 | struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; | |
264 | }; | |
265 | ||
266 | struct dsi_clk_calc_ctx { | |
267 | struct platform_device *dsidev; | |
268 | struct dss_pll *pll; | |
269 | ||
270 | /* inputs */ | |
271 | ||
272 | const struct omap_dss_dsi_config *config; | |
273 | ||
274 | unsigned long req_pck_min, req_pck_nom, req_pck_max; | |
275 | ||
276 | /* outputs */ | |
277 | ||
278 | struct dss_pll_clock_info dsi_cinfo; | |
279 | struct dispc_clock_info dispc_cinfo; | |
280 | ||
281 | struct omap_video_timings dispc_vm; | |
282 | struct omap_dss_dsi_videomode_timings dsi_vm; | |
283 | }; | |
284 | ||
285 | struct dsi_lp_clock_info { | |
286 | unsigned long lp_clk; | |
287 | u16 lp_clk_div; | |
288 | }; | |
289 | ||
290 | struct dsi_data { | |
291 | struct platform_device *pdev; | |
292 | void __iomem *proto_base; | |
293 | void __iomem *phy_base; | |
294 | void __iomem *pll_base; | |
295 | ||
296 | int module_id; | |
297 | ||
298 | int irq; | |
299 | ||
300 | bool is_enabled; | |
301 | ||
302 | struct clk *dss_clk; | |
303 | ||
304 | struct dispc_clock_info user_dispc_cinfo; | |
305 | struct dss_pll_clock_info user_dsi_cinfo; | |
306 | ||
307 | struct dsi_lp_clock_info user_lp_cinfo; | |
308 | struct dsi_lp_clock_info current_lp_cinfo; | |
309 | ||
310 | struct dss_pll pll; | |
311 | ||
312 | bool vdds_dsi_enabled; | |
313 | struct regulator *vdds_dsi_reg; | |
314 | ||
315 | struct { | |
316 | enum dsi_vc_source source; | |
317 | struct omap_dss_device *dssdev; | |
318 | enum fifo_size tx_fifo_size; | |
319 | enum fifo_size rx_fifo_size; | |
320 | int vc_id; | |
321 | } vc[4]; | |
322 | ||
323 | struct mutex lock; | |
324 | struct semaphore bus_lock; | |
325 | ||
326 | spinlock_t irq_lock; | |
327 | struct dsi_isr_tables isr_tables; | |
328 | /* space for a copy used by the interrupt handler */ | |
329 | struct dsi_isr_tables isr_tables_copy; | |
330 | ||
331 | int update_channel; | |
332 | #ifdef DSI_PERF_MEASURE | |
333 | unsigned update_bytes; | |
334 | #endif | |
335 | ||
336 | bool te_enabled; | |
337 | bool ulps_enabled; | |
338 | ||
339 | void (*framedone_callback)(int, void *); | |
340 | void *framedone_data; | |
341 | ||
342 | struct delayed_work framedone_timeout_work; | |
343 | ||
344 | #ifdef DSI_CATCH_MISSING_TE | |
345 | struct timer_list te_timer; | |
346 | #endif | |
347 | ||
348 | unsigned long cache_req_pck; | |
349 | unsigned long cache_clk_freq; | |
350 | struct dss_pll_clock_info cache_cinfo; | |
351 | ||
352 | u32 errors; | |
353 | spinlock_t errors_lock; | |
354 | #ifdef DSI_PERF_MEASURE | |
355 | ktime_t perf_setup_time; | |
356 | ktime_t perf_start_time; | |
357 | #endif | |
358 | int debug_read; | |
359 | int debug_write; | |
360 | ||
35b522cf | 361 | #ifdef CONFIG_FB_OMAP2_DSS_COLLECT_IRQ_STATS |
f76ee892 TV |
362 | spinlock_t irq_stats_lock; |
363 | struct dsi_irq_stats irq_stats; | |
364 | #endif | |
365 | ||
366 | unsigned num_lanes_supported; | |
367 | unsigned line_buffer_size; | |
368 | ||
369 | struct dsi_lane_config lanes[DSI_MAX_NR_LANES]; | |
370 | unsigned num_lanes_used; | |
371 | ||
372 | unsigned scp_clk_refcount; | |
373 | ||
374 | struct dss_lcd_mgr_config mgr_config; | |
375 | struct omap_video_timings timings; | |
376 | enum omap_dss_dsi_pixel_format pix_fmt; | |
377 | enum omap_dss_dsi_mode mode; | |
378 | struct omap_dss_dsi_videomode_timings vm_timings; | |
379 | ||
380 | struct omap_dss_device output; | |
381 | }; | |
382 | ||
383 | struct dsi_packet_sent_handler_data { | |
384 | struct platform_device *dsidev; | |
385 | struct completion *completion; | |
386 | }; | |
387 | ||
388 | struct dsi_module_id_data { | |
389 | u32 address; | |
390 | int id; | |
391 | }; | |
392 | ||
393 | static const struct of_device_id dsi_of_match[]; | |
394 | ||
395 | #ifdef DSI_PERF_MEASURE | |
396 | static bool dsi_perf; | |
397 | module_param(dsi_perf, bool, 0644); | |
398 | #endif | |
399 | ||
400 | static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev) | |
401 | { | |
402 | return dev_get_drvdata(&dsidev->dev); | |
403 | } | |
404 | ||
405 | static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev) | |
406 | { | |
407 | return to_platform_device(dssdev->dev); | |
408 | } | |
409 | ||
410 | static struct platform_device *dsi_get_dsidev_from_id(int module) | |
411 | { | |
412 | struct omap_dss_device *out; | |
413 | enum omap_dss_output_id id; | |
414 | ||
415 | switch (module) { | |
416 | case 0: | |
417 | id = OMAP_DSS_OUTPUT_DSI1; | |
418 | break; | |
419 | case 1: | |
420 | id = OMAP_DSS_OUTPUT_DSI2; | |
421 | break; | |
422 | default: | |
423 | return NULL; | |
424 | } | |
425 | ||
426 | out = omap_dss_get_output(id); | |
427 | ||
428 | return out ? to_platform_device(out->dev) : NULL; | |
429 | } | |
430 | ||
431 | static inline void dsi_write_reg(struct platform_device *dsidev, | |
432 | const struct dsi_reg idx, u32 val) | |
433 | { | |
434 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
435 | void __iomem *base; | |
436 | ||
437 | switch(idx.module) { | |
438 | case DSI_PROTO: base = dsi->proto_base; break; | |
439 | case DSI_PHY: base = dsi->phy_base; break; | |
440 | case DSI_PLL: base = dsi->pll_base; break; | |
441 | default: return; | |
442 | } | |
443 | ||
444 | __raw_writel(val, base + idx.idx); | |
445 | } | |
446 | ||
447 | static inline u32 dsi_read_reg(struct platform_device *dsidev, | |
448 | const struct dsi_reg idx) | |
449 | { | |
450 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
451 | void __iomem *base; | |
452 | ||
453 | switch(idx.module) { | |
454 | case DSI_PROTO: base = dsi->proto_base; break; | |
455 | case DSI_PHY: base = dsi->phy_base; break; | |
456 | case DSI_PLL: base = dsi->pll_base; break; | |
457 | default: return 0; | |
458 | } | |
459 | ||
460 | return __raw_readl(base + idx.idx); | |
461 | } | |
462 | ||
463 | static void dsi_bus_lock(struct omap_dss_device *dssdev) | |
464 | { | |
465 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
466 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
467 | ||
468 | down(&dsi->bus_lock); | |
469 | } | |
470 | ||
471 | static void dsi_bus_unlock(struct omap_dss_device *dssdev) | |
472 | { | |
473 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
474 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
475 | ||
476 | up(&dsi->bus_lock); | |
477 | } | |
478 | ||
479 | static bool dsi_bus_is_locked(struct platform_device *dsidev) | |
480 | { | |
481 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
482 | ||
483 | return dsi->bus_lock.count == 0; | |
484 | } | |
485 | ||
486 | static void dsi_completion_handler(void *data, u32 mask) | |
487 | { | |
488 | complete((struct completion *)data); | |
489 | } | |
490 | ||
491 | static inline int wait_for_bit_change(struct platform_device *dsidev, | |
492 | const struct dsi_reg idx, int bitnum, int value) | |
493 | { | |
494 | unsigned long timeout; | |
495 | ktime_t wait; | |
496 | int t; | |
497 | ||
498 | /* first busyloop to see if the bit changes right away */ | |
499 | t = 100; | |
500 | while (t-- > 0) { | |
501 | if (REG_GET(dsidev, idx, bitnum, bitnum) == value) | |
502 | return value; | |
503 | } | |
504 | ||
505 | /* then loop for 500ms, sleeping for 1ms in between */ | |
506 | timeout = jiffies + msecs_to_jiffies(500); | |
507 | while (time_before(jiffies, timeout)) { | |
508 | if (REG_GET(dsidev, idx, bitnum, bitnum) == value) | |
509 | return value; | |
510 | ||
511 | wait = ns_to_ktime(1000 * 1000); | |
512 | set_current_state(TASK_UNINTERRUPTIBLE); | |
513 | schedule_hrtimeout(&wait, HRTIMER_MODE_REL); | |
514 | } | |
515 | ||
516 | return !value; | |
517 | } | |
518 | ||
519 | u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt) | |
520 | { | |
521 | switch (fmt) { | |
522 | case OMAP_DSS_DSI_FMT_RGB888: | |
523 | case OMAP_DSS_DSI_FMT_RGB666: | |
524 | return 24; | |
525 | case OMAP_DSS_DSI_FMT_RGB666_PACKED: | |
526 | return 18; | |
527 | case OMAP_DSS_DSI_FMT_RGB565: | |
528 | return 16; | |
529 | default: | |
530 | BUG(); | |
531 | return 0; | |
532 | } | |
533 | } | |
534 | ||
535 | #ifdef DSI_PERF_MEASURE | |
536 | static void dsi_perf_mark_setup(struct platform_device *dsidev) | |
537 | { | |
538 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
539 | dsi->perf_setup_time = ktime_get(); | |
540 | } | |
541 | ||
542 | static void dsi_perf_mark_start(struct platform_device *dsidev) | |
543 | { | |
544 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
545 | dsi->perf_start_time = ktime_get(); | |
546 | } | |
547 | ||
548 | static void dsi_perf_show(struct platform_device *dsidev, const char *name) | |
549 | { | |
550 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
551 | ktime_t t, setup_time, trans_time; | |
552 | u32 total_bytes; | |
553 | u32 setup_us, trans_us, total_us; | |
554 | ||
555 | if (!dsi_perf) | |
556 | return; | |
557 | ||
558 | t = ktime_get(); | |
559 | ||
560 | setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time); | |
561 | setup_us = (u32)ktime_to_us(setup_time); | |
562 | if (setup_us == 0) | |
563 | setup_us = 1; | |
564 | ||
565 | trans_time = ktime_sub(t, dsi->perf_start_time); | |
566 | trans_us = (u32)ktime_to_us(trans_time); | |
567 | if (trans_us == 0) | |
568 | trans_us = 1; | |
569 | ||
570 | total_us = setup_us + trans_us; | |
571 | ||
572 | total_bytes = dsi->update_bytes; | |
573 | ||
574 | printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), " | |
575 | "%u bytes, %u kbytes/sec\n", | |
576 | name, | |
577 | setup_us, | |
578 | trans_us, | |
579 | total_us, | |
580 | 1000*1000 / total_us, | |
581 | total_bytes, | |
582 | total_bytes * 1000 / total_us); | |
583 | } | |
584 | #else | |
585 | static inline void dsi_perf_mark_setup(struct platform_device *dsidev) | |
586 | { | |
587 | } | |
588 | ||
589 | static inline void dsi_perf_mark_start(struct platform_device *dsidev) | |
590 | { | |
591 | } | |
592 | ||
593 | static inline void dsi_perf_show(struct platform_device *dsidev, | |
594 | const char *name) | |
595 | { | |
596 | } | |
597 | #endif | |
598 | ||
599 | static int verbose_irq; | |
600 | ||
601 | static void print_irq_status(u32 status) | |
602 | { | |
603 | if (status == 0) | |
604 | return; | |
605 | ||
606 | if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0) | |
607 | return; | |
608 | ||
609 | #define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : "" | |
610 | ||
611 | pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", | |
612 | status, | |
613 | verbose_irq ? PIS(VC0) : "", | |
614 | verbose_irq ? PIS(VC1) : "", | |
615 | verbose_irq ? PIS(VC2) : "", | |
616 | verbose_irq ? PIS(VC3) : "", | |
617 | PIS(WAKEUP), | |
618 | PIS(RESYNC), | |
619 | PIS(PLL_LOCK), | |
620 | PIS(PLL_UNLOCK), | |
621 | PIS(PLL_RECALL), | |
622 | PIS(COMPLEXIO_ERR), | |
623 | PIS(HS_TX_TIMEOUT), | |
624 | PIS(LP_RX_TIMEOUT), | |
625 | PIS(TE_TRIGGER), | |
626 | PIS(ACK_TRIGGER), | |
627 | PIS(SYNC_LOST), | |
628 | PIS(LDO_POWER_GOOD), | |
629 | PIS(TA_TIMEOUT)); | |
630 | #undef PIS | |
631 | } | |
632 | ||
633 | static void print_irq_status_vc(int channel, u32 status) | |
634 | { | |
635 | if (status == 0) | |
636 | return; | |
637 | ||
638 | if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0) | |
639 | return; | |
640 | ||
641 | #define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : "" | |
642 | ||
643 | pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n", | |
644 | channel, | |
645 | status, | |
646 | PIS(CS), | |
647 | PIS(ECC_CORR), | |
648 | PIS(ECC_NO_CORR), | |
649 | verbose_irq ? PIS(PACKET_SENT) : "", | |
650 | PIS(BTA), | |
651 | PIS(FIFO_TX_OVF), | |
652 | PIS(FIFO_RX_OVF), | |
653 | PIS(FIFO_TX_UDF), | |
654 | PIS(PP_BUSY_CHANGE)); | |
655 | #undef PIS | |
656 | } | |
657 | ||
658 | static void print_irq_status_cio(u32 status) | |
659 | { | |
660 | if (status == 0) | |
661 | return; | |
662 | ||
663 | #define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : "" | |
664 | ||
665 | pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", | |
666 | status, | |
667 | PIS(ERRSYNCESC1), | |
668 | PIS(ERRSYNCESC2), | |
669 | PIS(ERRSYNCESC3), | |
670 | PIS(ERRESC1), | |
671 | PIS(ERRESC2), | |
672 | PIS(ERRESC3), | |
673 | PIS(ERRCONTROL1), | |
674 | PIS(ERRCONTROL2), | |
675 | PIS(ERRCONTROL3), | |
676 | PIS(STATEULPS1), | |
677 | PIS(STATEULPS2), | |
678 | PIS(STATEULPS3), | |
679 | PIS(ERRCONTENTIONLP0_1), | |
680 | PIS(ERRCONTENTIONLP1_1), | |
681 | PIS(ERRCONTENTIONLP0_2), | |
682 | PIS(ERRCONTENTIONLP1_2), | |
683 | PIS(ERRCONTENTIONLP0_3), | |
684 | PIS(ERRCONTENTIONLP1_3), | |
685 | PIS(ULPSACTIVENOT_ALL0), | |
686 | PIS(ULPSACTIVENOT_ALL1)); | |
687 | #undef PIS | |
688 | } | |
689 | ||
35b522cf | 690 | #ifdef CONFIG_FB_OMAP2_DSS_COLLECT_IRQ_STATS |
f76ee892 TV |
691 | static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus, |
692 | u32 *vcstatus, u32 ciostatus) | |
693 | { | |
694 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
695 | int i; | |
696 | ||
697 | spin_lock(&dsi->irq_stats_lock); | |
698 | ||
699 | dsi->irq_stats.irq_count++; | |
700 | dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs); | |
701 | ||
702 | for (i = 0; i < 4; ++i) | |
703 | dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]); | |
704 | ||
705 | dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs); | |
706 | ||
707 | spin_unlock(&dsi->irq_stats_lock); | |
708 | } | |
709 | #else | |
710 | #define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus) | |
711 | #endif | |
712 | ||
713 | static int debug_irq; | |
714 | ||
715 | static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus, | |
716 | u32 *vcstatus, u32 ciostatus) | |
717 | { | |
718 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
719 | int i; | |
720 | ||
721 | if (irqstatus & DSI_IRQ_ERROR_MASK) { | |
722 | DSSERR("DSI error, irqstatus %x\n", irqstatus); | |
723 | print_irq_status(irqstatus); | |
724 | spin_lock(&dsi->errors_lock); | |
725 | dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK; | |
726 | spin_unlock(&dsi->errors_lock); | |
727 | } else if (debug_irq) { | |
728 | print_irq_status(irqstatus); | |
729 | } | |
730 | ||
731 | for (i = 0; i < 4; ++i) { | |
732 | if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) { | |
733 | DSSERR("DSI VC(%d) error, vc irqstatus %x\n", | |
734 | i, vcstatus[i]); | |
735 | print_irq_status_vc(i, vcstatus[i]); | |
736 | } else if (debug_irq) { | |
737 | print_irq_status_vc(i, vcstatus[i]); | |
738 | } | |
739 | } | |
740 | ||
741 | if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) { | |
742 | DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus); | |
743 | print_irq_status_cio(ciostatus); | |
744 | } else if (debug_irq) { | |
745 | print_irq_status_cio(ciostatus); | |
746 | } | |
747 | } | |
748 | ||
749 | static void dsi_call_isrs(struct dsi_isr_data *isr_array, | |
750 | unsigned isr_array_size, u32 irqstatus) | |
751 | { | |
752 | struct dsi_isr_data *isr_data; | |
753 | int i; | |
754 | ||
755 | for (i = 0; i < isr_array_size; i++) { | |
756 | isr_data = &isr_array[i]; | |
757 | if (isr_data->isr && isr_data->mask & irqstatus) | |
758 | isr_data->isr(isr_data->arg, irqstatus); | |
759 | } | |
760 | } | |
761 | ||
762 | static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables, | |
763 | u32 irqstatus, u32 *vcstatus, u32 ciostatus) | |
764 | { | |
765 | int i; | |
766 | ||
767 | dsi_call_isrs(isr_tables->isr_table, | |
768 | ARRAY_SIZE(isr_tables->isr_table), | |
769 | irqstatus); | |
770 | ||
771 | for (i = 0; i < 4; ++i) { | |
772 | if (vcstatus[i] == 0) | |
773 | continue; | |
774 | dsi_call_isrs(isr_tables->isr_table_vc[i], | |
775 | ARRAY_SIZE(isr_tables->isr_table_vc[i]), | |
776 | vcstatus[i]); | |
777 | } | |
778 | ||
779 | if (ciostatus != 0) | |
780 | dsi_call_isrs(isr_tables->isr_table_cio, | |
781 | ARRAY_SIZE(isr_tables->isr_table_cio), | |
782 | ciostatus); | |
783 | } | |
784 | ||
785 | static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) | |
786 | { | |
787 | struct platform_device *dsidev; | |
788 | struct dsi_data *dsi; | |
789 | u32 irqstatus, vcstatus[4], ciostatus; | |
790 | int i; | |
791 | ||
792 | dsidev = (struct platform_device *) arg; | |
793 | dsi = dsi_get_dsidrv_data(dsidev); | |
794 | ||
795 | if (!dsi->is_enabled) | |
796 | return IRQ_NONE; | |
797 | ||
798 | spin_lock(&dsi->irq_lock); | |
799 | ||
800 | irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS); | |
801 | ||
802 | /* IRQ is not for us */ | |
803 | if (!irqstatus) { | |
804 | spin_unlock(&dsi->irq_lock); | |
805 | return IRQ_NONE; | |
806 | } | |
807 | ||
808 | dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); | |
809 | /* flush posted write */ | |
810 | dsi_read_reg(dsidev, DSI_IRQSTATUS); | |
811 | ||
812 | for (i = 0; i < 4; ++i) { | |
813 | if ((irqstatus & (1 << i)) == 0) { | |
814 | vcstatus[i] = 0; | |
815 | continue; | |
816 | } | |
817 | ||
818 | vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i)); | |
819 | ||
820 | dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]); | |
821 | /* flush posted write */ | |
822 | dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i)); | |
823 | } | |
824 | ||
825 | if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { | |
826 | ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS); | |
827 | ||
828 | dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus); | |
829 | /* flush posted write */ | |
830 | dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS); | |
831 | } else { | |
832 | ciostatus = 0; | |
833 | } | |
834 | ||
835 | #ifdef DSI_CATCH_MISSING_TE | |
836 | if (irqstatus & DSI_IRQ_TE_TRIGGER) | |
837 | del_timer(&dsi->te_timer); | |
838 | #endif | |
839 | ||
840 | /* make a copy and unlock, so that isrs can unregister | |
841 | * themselves */ | |
842 | memcpy(&dsi->isr_tables_copy, &dsi->isr_tables, | |
843 | sizeof(dsi->isr_tables)); | |
844 | ||
845 | spin_unlock(&dsi->irq_lock); | |
846 | ||
847 | dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus); | |
848 | ||
849 | dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus); | |
850 | ||
851 | dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus); | |
852 | ||
853 | return IRQ_HANDLED; | |
854 | } | |
855 | ||
856 | /* dsi->irq_lock has to be locked by the caller */ | |
857 | static void _omap_dsi_configure_irqs(struct platform_device *dsidev, | |
858 | struct dsi_isr_data *isr_array, | |
859 | unsigned isr_array_size, u32 default_mask, | |
860 | const struct dsi_reg enable_reg, | |
861 | const struct dsi_reg status_reg) | |
862 | { | |
863 | struct dsi_isr_data *isr_data; | |
864 | u32 mask; | |
865 | u32 old_mask; | |
866 | int i; | |
867 | ||
868 | mask = default_mask; | |
869 | ||
870 | for (i = 0; i < isr_array_size; i++) { | |
871 | isr_data = &isr_array[i]; | |
872 | ||
873 | if (isr_data->isr == NULL) | |
874 | continue; | |
875 | ||
876 | mask |= isr_data->mask; | |
877 | } | |
878 | ||
879 | old_mask = dsi_read_reg(dsidev, enable_reg); | |
880 | /* clear the irqstatus for newly enabled irqs */ | |
881 | dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask); | |
882 | dsi_write_reg(dsidev, enable_reg, mask); | |
883 | ||
884 | /* flush posted writes */ | |
885 | dsi_read_reg(dsidev, enable_reg); | |
886 | dsi_read_reg(dsidev, status_reg); | |
887 | } | |
888 | ||
889 | /* dsi->irq_lock has to be locked by the caller */ | |
890 | static void _omap_dsi_set_irqs(struct platform_device *dsidev) | |
891 | { | |
892 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
893 | u32 mask = DSI_IRQ_ERROR_MASK; | |
894 | #ifdef DSI_CATCH_MISSING_TE | |
895 | mask |= DSI_IRQ_TE_TRIGGER; | |
896 | #endif | |
897 | _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table, | |
898 | ARRAY_SIZE(dsi->isr_tables.isr_table), mask, | |
899 | DSI_IRQENABLE, DSI_IRQSTATUS); | |
900 | } | |
901 | ||
902 | /* dsi->irq_lock has to be locked by the caller */ | |
903 | static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc) | |
904 | { | |
905 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
906 | ||
907 | _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc], | |
908 | ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]), | |
909 | DSI_VC_IRQ_ERROR_MASK, | |
910 | DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); | |
911 | } | |
912 | ||
913 | /* dsi->irq_lock has to be locked by the caller */ | |
914 | static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev) | |
915 | { | |
916 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
917 | ||
918 | _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio, | |
919 | ARRAY_SIZE(dsi->isr_tables.isr_table_cio), | |
920 | DSI_CIO_IRQ_ERROR_MASK, | |
921 | DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); | |
922 | } | |
923 | ||
924 | static void _dsi_initialize_irq(struct platform_device *dsidev) | |
925 | { | |
926 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
927 | unsigned long flags; | |
928 | int vc; | |
929 | ||
930 | spin_lock_irqsave(&dsi->irq_lock, flags); | |
931 | ||
932 | memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables)); | |
933 | ||
934 | _omap_dsi_set_irqs(dsidev); | |
935 | for (vc = 0; vc < 4; ++vc) | |
936 | _omap_dsi_set_irqs_vc(dsidev, vc); | |
937 | _omap_dsi_set_irqs_cio(dsidev); | |
938 | ||
939 | spin_unlock_irqrestore(&dsi->irq_lock, flags); | |
940 | } | |
941 | ||
942 | static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask, | |
943 | struct dsi_isr_data *isr_array, unsigned isr_array_size) | |
944 | { | |
945 | struct dsi_isr_data *isr_data; | |
946 | int free_idx; | |
947 | int i; | |
948 | ||
949 | BUG_ON(isr == NULL); | |
950 | ||
951 | /* check for duplicate entry and find a free slot */ | |
952 | free_idx = -1; | |
953 | for (i = 0; i < isr_array_size; i++) { | |
954 | isr_data = &isr_array[i]; | |
955 | ||
956 | if (isr_data->isr == isr && isr_data->arg == arg && | |
957 | isr_data->mask == mask) { | |
958 | return -EINVAL; | |
959 | } | |
960 | ||
961 | if (isr_data->isr == NULL && free_idx == -1) | |
962 | free_idx = i; | |
963 | } | |
964 | ||
965 | if (free_idx == -1) | |
966 | return -EBUSY; | |
967 | ||
968 | isr_data = &isr_array[free_idx]; | |
969 | isr_data->isr = isr; | |
970 | isr_data->arg = arg; | |
971 | isr_data->mask = mask; | |
972 | ||
973 | return 0; | |
974 | } | |
975 | ||
976 | static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask, | |
977 | struct dsi_isr_data *isr_array, unsigned isr_array_size) | |
978 | { | |
979 | struct dsi_isr_data *isr_data; | |
980 | int i; | |
981 | ||
982 | for (i = 0; i < isr_array_size; i++) { | |
983 | isr_data = &isr_array[i]; | |
984 | if (isr_data->isr != isr || isr_data->arg != arg || | |
985 | isr_data->mask != mask) | |
986 | continue; | |
987 | ||
988 | isr_data->isr = NULL; | |
989 | isr_data->arg = NULL; | |
990 | isr_data->mask = 0; | |
991 | ||
992 | return 0; | |
993 | } | |
994 | ||
995 | return -EINVAL; | |
996 | } | |
997 | ||
998 | static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr, | |
999 | void *arg, u32 mask) | |
1000 | { | |
1001 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1002 | unsigned long flags; | |
1003 | int r; | |
1004 | ||
1005 | spin_lock_irqsave(&dsi->irq_lock, flags); | |
1006 | ||
1007 | r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table, | |
1008 | ARRAY_SIZE(dsi->isr_tables.isr_table)); | |
1009 | ||
1010 | if (r == 0) | |
1011 | _omap_dsi_set_irqs(dsidev); | |
1012 | ||
1013 | spin_unlock_irqrestore(&dsi->irq_lock, flags); | |
1014 | ||
1015 | return r; | |
1016 | } | |
1017 | ||
1018 | static int dsi_unregister_isr(struct platform_device *dsidev, | |
1019 | omap_dsi_isr_t isr, void *arg, u32 mask) | |
1020 | { | |
1021 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1022 | unsigned long flags; | |
1023 | int r; | |
1024 | ||
1025 | spin_lock_irqsave(&dsi->irq_lock, flags); | |
1026 | ||
1027 | r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table, | |
1028 | ARRAY_SIZE(dsi->isr_tables.isr_table)); | |
1029 | ||
1030 | if (r == 0) | |
1031 | _omap_dsi_set_irqs(dsidev); | |
1032 | ||
1033 | spin_unlock_irqrestore(&dsi->irq_lock, flags); | |
1034 | ||
1035 | return r; | |
1036 | } | |
1037 | ||
1038 | static int dsi_register_isr_vc(struct platform_device *dsidev, int channel, | |
1039 | omap_dsi_isr_t isr, void *arg, u32 mask) | |
1040 | { | |
1041 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1042 | unsigned long flags; | |
1043 | int r; | |
1044 | ||
1045 | spin_lock_irqsave(&dsi->irq_lock, flags); | |
1046 | ||
1047 | r = _dsi_register_isr(isr, arg, mask, | |
1048 | dsi->isr_tables.isr_table_vc[channel], | |
1049 | ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel])); | |
1050 | ||
1051 | if (r == 0) | |
1052 | _omap_dsi_set_irqs_vc(dsidev, channel); | |
1053 | ||
1054 | spin_unlock_irqrestore(&dsi->irq_lock, flags); | |
1055 | ||
1056 | return r; | |
1057 | } | |
1058 | ||
1059 | static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel, | |
1060 | omap_dsi_isr_t isr, void *arg, u32 mask) | |
1061 | { | |
1062 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1063 | unsigned long flags; | |
1064 | int r; | |
1065 | ||
1066 | spin_lock_irqsave(&dsi->irq_lock, flags); | |
1067 | ||
1068 | r = _dsi_unregister_isr(isr, arg, mask, | |
1069 | dsi->isr_tables.isr_table_vc[channel], | |
1070 | ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel])); | |
1071 | ||
1072 | if (r == 0) | |
1073 | _omap_dsi_set_irqs_vc(dsidev, channel); | |
1074 | ||
1075 | spin_unlock_irqrestore(&dsi->irq_lock, flags); | |
1076 | ||
1077 | return r; | |
1078 | } | |
1079 | ||
1080 | static int dsi_register_isr_cio(struct platform_device *dsidev, | |
1081 | omap_dsi_isr_t isr, void *arg, u32 mask) | |
1082 | { | |
1083 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1084 | unsigned long flags; | |
1085 | int r; | |
1086 | ||
1087 | spin_lock_irqsave(&dsi->irq_lock, flags); | |
1088 | ||
1089 | r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio, | |
1090 | ARRAY_SIZE(dsi->isr_tables.isr_table_cio)); | |
1091 | ||
1092 | if (r == 0) | |
1093 | _omap_dsi_set_irqs_cio(dsidev); | |
1094 | ||
1095 | spin_unlock_irqrestore(&dsi->irq_lock, flags); | |
1096 | ||
1097 | return r; | |
1098 | } | |
1099 | ||
1100 | static int dsi_unregister_isr_cio(struct platform_device *dsidev, | |
1101 | omap_dsi_isr_t isr, void *arg, u32 mask) | |
1102 | { | |
1103 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1104 | unsigned long flags; | |
1105 | int r; | |
1106 | ||
1107 | spin_lock_irqsave(&dsi->irq_lock, flags); | |
1108 | ||
1109 | r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio, | |
1110 | ARRAY_SIZE(dsi->isr_tables.isr_table_cio)); | |
1111 | ||
1112 | if (r == 0) | |
1113 | _omap_dsi_set_irqs_cio(dsidev); | |
1114 | ||
1115 | spin_unlock_irqrestore(&dsi->irq_lock, flags); | |
1116 | ||
1117 | return r; | |
1118 | } | |
1119 | ||
1120 | static u32 dsi_get_errors(struct platform_device *dsidev) | |
1121 | { | |
1122 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1123 | unsigned long flags; | |
1124 | u32 e; | |
1125 | spin_lock_irqsave(&dsi->errors_lock, flags); | |
1126 | e = dsi->errors; | |
1127 | dsi->errors = 0; | |
1128 | spin_unlock_irqrestore(&dsi->errors_lock, flags); | |
1129 | return e; | |
1130 | } | |
1131 | ||
1132 | static int dsi_runtime_get(struct platform_device *dsidev) | |
1133 | { | |
1134 | int r; | |
1135 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1136 | ||
1137 | DSSDBG("dsi_runtime_get\n"); | |
1138 | ||
1139 | r = pm_runtime_get_sync(&dsi->pdev->dev); | |
1140 | WARN_ON(r < 0); | |
1141 | return r < 0 ? r : 0; | |
1142 | } | |
1143 | ||
1144 | static void dsi_runtime_put(struct platform_device *dsidev) | |
1145 | { | |
1146 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1147 | int r; | |
1148 | ||
1149 | DSSDBG("dsi_runtime_put\n"); | |
1150 | ||
1151 | r = pm_runtime_put_sync(&dsi->pdev->dev); | |
1152 | WARN_ON(r < 0 && r != -ENOSYS); | |
1153 | } | |
1154 | ||
1155 | static int dsi_regulator_init(struct platform_device *dsidev) | |
1156 | { | |
1157 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1158 | struct regulator *vdds_dsi; | |
f76ee892 TV |
1159 | |
1160 | if (dsi->vdds_dsi_reg != NULL) | |
1161 | return 0; | |
1162 | ||
1163 | vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdd"); | |
1164 | ||
1165 | if (IS_ERR(vdds_dsi)) { | |
1166 | if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) | |
1167 | DSSERR("can't get DSI VDD regulator\n"); | |
1168 | return PTR_ERR(vdds_dsi); | |
1169 | } | |
1170 | ||
f76ee892 TV |
1171 | dsi->vdds_dsi_reg = vdds_dsi; |
1172 | ||
1173 | return 0; | |
1174 | } | |
1175 | ||
1176 | static void _dsi_print_reset_status(struct platform_device *dsidev) | |
1177 | { | |
1178 | u32 l; | |
1179 | int b0, b1, b2; | |
1180 | ||
1181 | /* A dummy read using the SCP interface to any DSIPHY register is | |
1182 | * required after DSIPHY reset to complete the reset of the DSI complex | |
1183 | * I/O. */ | |
1184 | l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); | |
1185 | ||
1186 | if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) { | |
1187 | b0 = 28; | |
1188 | b1 = 27; | |
1189 | b2 = 26; | |
1190 | } else { | |
1191 | b0 = 24; | |
1192 | b1 = 25; | |
1193 | b2 = 26; | |
1194 | } | |
1195 | ||
1196 | #define DSI_FLD_GET(fld, start, end)\ | |
1197 | FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end) | |
1198 | ||
1199 | pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n", | |
1200 | DSI_FLD_GET(PLL_STATUS, 0, 0), | |
1201 | DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29), | |
1202 | DSI_FLD_GET(DSIPHY_CFG5, b0, b0), | |
1203 | DSI_FLD_GET(DSIPHY_CFG5, b1, b1), | |
1204 | DSI_FLD_GET(DSIPHY_CFG5, b2, b2), | |
1205 | DSI_FLD_GET(DSIPHY_CFG5, 29, 29), | |
1206 | DSI_FLD_GET(DSIPHY_CFG5, 30, 30), | |
1207 | DSI_FLD_GET(DSIPHY_CFG5, 31, 31)); | |
1208 | ||
1209 | #undef DSI_FLD_GET | |
1210 | } | |
1211 | ||
1212 | static inline int dsi_if_enable(struct platform_device *dsidev, bool enable) | |
1213 | { | |
1214 | DSSDBG("dsi_if_enable(%d)\n", enable); | |
1215 | ||
1216 | enable = enable ? 1 : 0; | |
1217 | REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */ | |
1218 | ||
1219 | if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) { | |
1220 | DSSERR("Failed to set dsi_if_enable to %d\n", enable); | |
1221 | return -EIO; | |
1222 | } | |
1223 | ||
1224 | return 0; | |
1225 | } | |
1226 | ||
1227 | static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) | |
1228 | { | |
1229 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1230 | ||
1231 | return dsi->pll.cinfo.clkout[HSDIV_DISPC]; | |
1232 | } | |
1233 | ||
1234 | static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev) | |
1235 | { | |
1236 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1237 | ||
1238 | return dsi->pll.cinfo.clkout[HSDIV_DSI]; | |
1239 | } | |
1240 | ||
1241 | static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev) | |
1242 | { | |
1243 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1244 | ||
1245 | return dsi->pll.cinfo.clkdco / 16; | |
1246 | } | |
1247 | ||
1248 | static unsigned long dsi_fclk_rate(struct platform_device *dsidev) | |
1249 | { | |
1250 | unsigned long r; | |
1251 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1252 | ||
1253 | if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { | |
1254 | /* DSI FCLK source is DSS_CLK_FCK */ | |
1255 | r = clk_get_rate(dsi->dss_clk); | |
1256 | } else { | |
1257 | /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ | |
1258 | r = dsi_get_pll_hsdiv_dsi_rate(dsidev); | |
1259 | } | |
1260 | ||
1261 | return r; | |
1262 | } | |
1263 | ||
1264 | static int dsi_lp_clock_calc(unsigned long dsi_fclk, | |
1265 | unsigned long lp_clk_min, unsigned long lp_clk_max, | |
1266 | struct dsi_lp_clock_info *lp_cinfo) | |
1267 | { | |
1268 | unsigned lp_clk_div; | |
1269 | unsigned long lp_clk; | |
1270 | ||
1271 | lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2); | |
1272 | lp_clk = dsi_fclk / 2 / lp_clk_div; | |
1273 | ||
1274 | if (lp_clk < lp_clk_min || lp_clk > lp_clk_max) | |
1275 | return -EINVAL; | |
1276 | ||
1277 | lp_cinfo->lp_clk_div = lp_clk_div; | |
1278 | lp_cinfo->lp_clk = lp_clk; | |
1279 | ||
1280 | return 0; | |
1281 | } | |
1282 | ||
1283 | static int dsi_set_lp_clk_divisor(struct platform_device *dsidev) | |
1284 | { | |
1285 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1286 | unsigned long dsi_fclk; | |
1287 | unsigned lp_clk_div; | |
1288 | unsigned long lp_clk; | |
1289 | unsigned lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); | |
1290 | ||
1291 | ||
1292 | lp_clk_div = dsi->user_lp_cinfo.lp_clk_div; | |
1293 | ||
1294 | if (lp_clk_div == 0 || lp_clk_div > lpdiv_max) | |
1295 | return -EINVAL; | |
1296 | ||
1297 | dsi_fclk = dsi_fclk_rate(dsidev); | |
1298 | ||
1299 | lp_clk = dsi_fclk / 2 / lp_clk_div; | |
1300 | ||
1301 | DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk); | |
1302 | dsi->current_lp_cinfo.lp_clk = lp_clk; | |
1303 | dsi->current_lp_cinfo.lp_clk_div = lp_clk_div; | |
1304 | ||
1305 | /* LP_CLK_DIVISOR */ | |
1306 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0); | |
1307 | ||
1308 | /* LP_RX_SYNCHRO_ENABLE */ | |
1309 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21); | |
1310 | ||
1311 | return 0; | |
1312 | } | |
1313 | ||
1314 | static void dsi_enable_scp_clk(struct platform_device *dsidev) | |
1315 | { | |
1316 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1317 | ||
1318 | if (dsi->scp_clk_refcount++ == 0) | |
1319 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */ | |
1320 | } | |
1321 | ||
1322 | static void dsi_disable_scp_clk(struct platform_device *dsidev) | |
1323 | { | |
1324 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1325 | ||
1326 | WARN_ON(dsi->scp_clk_refcount == 0); | |
1327 | if (--dsi->scp_clk_refcount == 0) | |
1328 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */ | |
1329 | } | |
1330 | ||
1331 | enum dsi_pll_power_state { | |
1332 | DSI_PLL_POWER_OFF = 0x0, | |
1333 | DSI_PLL_POWER_ON_HSCLK = 0x1, | |
1334 | DSI_PLL_POWER_ON_ALL = 0x2, | |
1335 | DSI_PLL_POWER_ON_DIV = 0x3, | |
1336 | }; | |
1337 | ||
1338 | static int dsi_pll_power(struct platform_device *dsidev, | |
1339 | enum dsi_pll_power_state state) | |
1340 | { | |
1341 | int t = 0; | |
1342 | ||
1343 | /* DSI-PLL power command 0x3 is not working */ | |
1344 | if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) && | |
1345 | state == DSI_PLL_POWER_ON_DIV) | |
1346 | state = DSI_PLL_POWER_ON_ALL; | |
1347 | ||
1348 | /* PLL_PWR_CMD */ | |
1349 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30); | |
1350 | ||
1351 | /* PLL_PWR_STATUS */ | |
1352 | while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) { | |
1353 | if (++t > 1000) { | |
1354 | DSSERR("Failed to set DSI PLL power mode to %d\n", | |
1355 | state); | |
1356 | return -ENODEV; | |
1357 | } | |
1358 | udelay(1); | |
1359 | } | |
1360 | ||
1361 | return 0; | |
1362 | } | |
1363 | ||
1364 | ||
1365 | static void dsi_pll_calc_dsi_fck(struct dss_pll_clock_info *cinfo) | |
1366 | { | |
1367 | unsigned long max_dsi_fck; | |
1368 | ||
1369 | max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK); | |
1370 | ||
1371 | cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck); | |
1372 | cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI]; | |
1373 | } | |
1374 | ||
1375 | static int dsi_pll_enable(struct dss_pll *pll) | |
1376 | { | |
1377 | struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); | |
1378 | struct platform_device *dsidev = dsi->pdev; | |
1379 | int r = 0; | |
1380 | ||
1381 | DSSDBG("PLL init\n"); | |
1382 | ||
1383 | r = dsi_regulator_init(dsidev); | |
1384 | if (r) | |
1385 | return r; | |
1386 | ||
1387 | r = dsi_runtime_get(dsidev); | |
1388 | if (r) | |
1389 | return r; | |
1390 | ||
1391 | /* | |
1392 | * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. | |
1393 | */ | |
1394 | dsi_enable_scp_clk(dsidev); | |
1395 | ||
1396 | if (!dsi->vdds_dsi_enabled) { | |
1397 | r = regulator_enable(dsi->vdds_dsi_reg); | |
1398 | if (r) | |
1399 | goto err0; | |
1400 | dsi->vdds_dsi_enabled = true; | |
1401 | } | |
1402 | ||
1403 | /* XXX PLL does not come out of reset without this... */ | |
1404 | dispc_pck_free_enable(1); | |
1405 | ||
1406 | if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) { | |
1407 | DSSERR("PLL not coming out of reset.\n"); | |
1408 | r = -ENODEV; | |
1409 | dispc_pck_free_enable(0); | |
1410 | goto err1; | |
1411 | } | |
1412 | ||
1413 | /* XXX ... but if left on, we get problems when planes do not | |
1414 | * fill the whole display. No idea about this */ | |
1415 | dispc_pck_free_enable(0); | |
1416 | ||
1417 | r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL); | |
1418 | ||
1419 | if (r) | |
1420 | goto err1; | |
1421 | ||
1422 | DSSDBG("PLL init done\n"); | |
1423 | ||
1424 | return 0; | |
1425 | err1: | |
1426 | if (dsi->vdds_dsi_enabled) { | |
1427 | regulator_disable(dsi->vdds_dsi_reg); | |
1428 | dsi->vdds_dsi_enabled = false; | |
1429 | } | |
1430 | err0: | |
1431 | dsi_disable_scp_clk(dsidev); | |
1432 | dsi_runtime_put(dsidev); | |
1433 | return r; | |
1434 | } | |
1435 | ||
1436 | static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes) | |
1437 | { | |
1438 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1439 | ||
1440 | dsi_pll_power(dsidev, DSI_PLL_POWER_OFF); | |
1441 | if (disconnect_lanes) { | |
1442 | WARN_ON(!dsi->vdds_dsi_enabled); | |
1443 | regulator_disable(dsi->vdds_dsi_reg); | |
1444 | dsi->vdds_dsi_enabled = false; | |
1445 | } | |
1446 | ||
1447 | dsi_disable_scp_clk(dsidev); | |
1448 | dsi_runtime_put(dsidev); | |
1449 | ||
1450 | DSSDBG("PLL uninit done\n"); | |
1451 | } | |
1452 | ||
1453 | static void dsi_pll_disable(struct dss_pll *pll) | |
1454 | { | |
1455 | struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); | |
1456 | struct platform_device *dsidev = dsi->pdev; | |
1457 | ||
1458 | dsi_pll_uninit(dsidev, true); | |
1459 | } | |
1460 | ||
1461 | static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, | |
1462 | struct seq_file *s) | |
1463 | { | |
1464 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1465 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; | |
1466 | enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; | |
1467 | int dsi_module = dsi->module_id; | |
1468 | struct dss_pll *pll = &dsi->pll; | |
1469 | ||
1470 | dispc_clk_src = dss_get_dispc_clk_source(); | |
1471 | dsi_clk_src = dss_get_dsi_clk_source(dsi_module); | |
1472 | ||
1473 | if (dsi_runtime_get(dsidev)) | |
1474 | return; | |
1475 | ||
1476 | seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); | |
1477 | ||
1478 | seq_printf(s, "dsi pll clkin\t%lu\n", clk_get_rate(pll->clkin)); | |
1479 | ||
1480 | seq_printf(s, "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n); | |
1481 | ||
1482 | seq_printf(s, "CLKIN4DDR\t%-16lum %u\n", | |
1483 | cinfo->clkdco, cinfo->m); | |
1484 | ||
1485 | seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", | |
1486 | dss_feat_get_clk_source_name(dsi_module == 0 ? | |
1487 | OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : | |
1488 | OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), | |
1489 | cinfo->clkout[HSDIV_DISPC], | |
1490 | cinfo->mX[HSDIV_DISPC], | |
1491 | dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? | |
1492 | "off" : "on"); | |
1493 | ||
1494 | seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", | |
1495 | dss_feat_get_clk_source_name(dsi_module == 0 ? | |
1496 | OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : | |
1497 | OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), | |
1498 | cinfo->clkout[HSDIV_DSI], | |
1499 | cinfo->mX[HSDIV_DSI], | |
1500 | dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? | |
1501 | "off" : "on"); | |
1502 | ||
1503 | seq_printf(s, "- DSI%d -\n", dsi_module + 1); | |
1504 | ||
1505 | seq_printf(s, "dsi fclk source = %s (%s)\n", | |
1506 | dss_get_generic_clk_source_name(dsi_clk_src), | |
1507 | dss_feat_get_clk_source_name(dsi_clk_src)); | |
1508 | ||
1509 | seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); | |
1510 | ||
1511 | seq_printf(s, "DDR_CLK\t\t%lu\n", | |
1512 | cinfo->clkdco / 4); | |
1513 | ||
1514 | seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev)); | |
1515 | ||
1516 | seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk); | |
1517 | ||
1518 | dsi_runtime_put(dsidev); | |
1519 | } | |
1520 | ||
1521 | void dsi_dump_clocks(struct seq_file *s) | |
1522 | { | |
1523 | struct platform_device *dsidev; | |
1524 | int i; | |
1525 | ||
1526 | for (i = 0; i < MAX_NUM_DSI; i++) { | |
1527 | dsidev = dsi_get_dsidev_from_id(i); | |
1528 | if (dsidev) | |
1529 | dsi_dump_dsidev_clocks(dsidev, s); | |
1530 | } | |
1531 | } | |
1532 | ||
35b522cf | 1533 | #ifdef CONFIG_FB_OMAP2_DSS_COLLECT_IRQ_STATS |
f76ee892 TV |
1534 | static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, |
1535 | struct seq_file *s) | |
1536 | { | |
1537 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1538 | unsigned long flags; | |
1539 | struct dsi_irq_stats stats; | |
1540 | ||
1541 | spin_lock_irqsave(&dsi->irq_stats_lock, flags); | |
1542 | ||
1543 | stats = dsi->irq_stats; | |
1544 | memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats)); | |
1545 | dsi->irq_stats.last_reset = jiffies; | |
1546 | ||
1547 | spin_unlock_irqrestore(&dsi->irq_stats_lock, flags); | |
1548 | ||
1549 | seq_printf(s, "period %u ms\n", | |
1550 | jiffies_to_msecs(jiffies - stats.last_reset)); | |
1551 | ||
1552 | seq_printf(s, "irqs %d\n", stats.irq_count); | |
1553 | #define PIS(x) \ | |
1554 | seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); | |
1555 | ||
1556 | seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1); | |
1557 | PIS(VC0); | |
1558 | PIS(VC1); | |
1559 | PIS(VC2); | |
1560 | PIS(VC3); | |
1561 | PIS(WAKEUP); | |
1562 | PIS(RESYNC); | |
1563 | PIS(PLL_LOCK); | |
1564 | PIS(PLL_UNLOCK); | |
1565 | PIS(PLL_RECALL); | |
1566 | PIS(COMPLEXIO_ERR); | |
1567 | PIS(HS_TX_TIMEOUT); | |
1568 | PIS(LP_RX_TIMEOUT); | |
1569 | PIS(TE_TRIGGER); | |
1570 | PIS(ACK_TRIGGER); | |
1571 | PIS(SYNC_LOST); | |
1572 | PIS(LDO_POWER_GOOD); | |
1573 | PIS(TA_TIMEOUT); | |
1574 | #undef PIS | |
1575 | ||
1576 | #define PIS(x) \ | |
1577 | seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \ | |
1578 | stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ | |
1579 | stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ | |
1580 | stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ | |
1581 | stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); | |
1582 | ||
1583 | seq_printf(s, "-- VC interrupts --\n"); | |
1584 | PIS(CS); | |
1585 | PIS(ECC_CORR); | |
1586 | PIS(PACKET_SENT); | |
1587 | PIS(FIFO_TX_OVF); | |
1588 | PIS(FIFO_RX_OVF); | |
1589 | PIS(BTA); | |
1590 | PIS(ECC_NO_CORR); | |
1591 | PIS(FIFO_TX_UDF); | |
1592 | PIS(PP_BUSY_CHANGE); | |
1593 | #undef PIS | |
1594 | ||
1595 | #define PIS(x) \ | |
1596 | seq_printf(s, "%-20s %10d\n", #x, \ | |
1597 | stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); | |
1598 | ||
1599 | seq_printf(s, "-- CIO interrupts --\n"); | |
1600 | PIS(ERRSYNCESC1); | |
1601 | PIS(ERRSYNCESC2); | |
1602 | PIS(ERRSYNCESC3); | |
1603 | PIS(ERRESC1); | |
1604 | PIS(ERRESC2); | |
1605 | PIS(ERRESC3); | |
1606 | PIS(ERRCONTROL1); | |
1607 | PIS(ERRCONTROL2); | |
1608 | PIS(ERRCONTROL3); | |
1609 | PIS(STATEULPS1); | |
1610 | PIS(STATEULPS2); | |
1611 | PIS(STATEULPS3); | |
1612 | PIS(ERRCONTENTIONLP0_1); | |
1613 | PIS(ERRCONTENTIONLP1_1); | |
1614 | PIS(ERRCONTENTIONLP0_2); | |
1615 | PIS(ERRCONTENTIONLP1_2); | |
1616 | PIS(ERRCONTENTIONLP0_3); | |
1617 | PIS(ERRCONTENTIONLP1_3); | |
1618 | PIS(ULPSACTIVENOT_ALL0); | |
1619 | PIS(ULPSACTIVENOT_ALL1); | |
1620 | #undef PIS | |
1621 | } | |
1622 | ||
1623 | static void dsi1_dump_irqs(struct seq_file *s) | |
1624 | { | |
1625 | struct platform_device *dsidev = dsi_get_dsidev_from_id(0); | |
1626 | ||
1627 | dsi_dump_dsidev_irqs(dsidev, s); | |
1628 | } | |
1629 | ||
1630 | static void dsi2_dump_irqs(struct seq_file *s) | |
1631 | { | |
1632 | struct platform_device *dsidev = dsi_get_dsidev_from_id(1); | |
1633 | ||
1634 | dsi_dump_dsidev_irqs(dsidev, s); | |
1635 | } | |
1636 | #endif | |
1637 | ||
1638 | static void dsi_dump_dsidev_regs(struct platform_device *dsidev, | |
1639 | struct seq_file *s) | |
1640 | { | |
1641 | #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r)) | |
1642 | ||
1643 | if (dsi_runtime_get(dsidev)) | |
1644 | return; | |
1645 | dsi_enable_scp_clk(dsidev); | |
1646 | ||
1647 | DUMPREG(DSI_REVISION); | |
1648 | DUMPREG(DSI_SYSCONFIG); | |
1649 | DUMPREG(DSI_SYSSTATUS); | |
1650 | DUMPREG(DSI_IRQSTATUS); | |
1651 | DUMPREG(DSI_IRQENABLE); | |
1652 | DUMPREG(DSI_CTRL); | |
1653 | DUMPREG(DSI_COMPLEXIO_CFG1); | |
1654 | DUMPREG(DSI_COMPLEXIO_IRQ_STATUS); | |
1655 | DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE); | |
1656 | DUMPREG(DSI_CLK_CTRL); | |
1657 | DUMPREG(DSI_TIMING1); | |
1658 | DUMPREG(DSI_TIMING2); | |
1659 | DUMPREG(DSI_VM_TIMING1); | |
1660 | DUMPREG(DSI_VM_TIMING2); | |
1661 | DUMPREG(DSI_VM_TIMING3); | |
1662 | DUMPREG(DSI_CLK_TIMING); | |
1663 | DUMPREG(DSI_TX_FIFO_VC_SIZE); | |
1664 | DUMPREG(DSI_RX_FIFO_VC_SIZE); | |
1665 | DUMPREG(DSI_COMPLEXIO_CFG2); | |
1666 | DUMPREG(DSI_RX_FIFO_VC_FULLNESS); | |
1667 | DUMPREG(DSI_VM_TIMING4); | |
1668 | DUMPREG(DSI_TX_FIFO_VC_EMPTINESS); | |
1669 | DUMPREG(DSI_VM_TIMING5); | |
1670 | DUMPREG(DSI_VM_TIMING6); | |
1671 | DUMPREG(DSI_VM_TIMING7); | |
1672 | DUMPREG(DSI_STOPCLK_TIMING); | |
1673 | ||
1674 | DUMPREG(DSI_VC_CTRL(0)); | |
1675 | DUMPREG(DSI_VC_TE(0)); | |
1676 | DUMPREG(DSI_VC_LONG_PACKET_HEADER(0)); | |
1677 | DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0)); | |
1678 | DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0)); | |
1679 | DUMPREG(DSI_VC_IRQSTATUS(0)); | |
1680 | DUMPREG(DSI_VC_IRQENABLE(0)); | |
1681 | ||
1682 | DUMPREG(DSI_VC_CTRL(1)); | |
1683 | DUMPREG(DSI_VC_TE(1)); | |
1684 | DUMPREG(DSI_VC_LONG_PACKET_HEADER(1)); | |
1685 | DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1)); | |
1686 | DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1)); | |
1687 | DUMPREG(DSI_VC_IRQSTATUS(1)); | |
1688 | DUMPREG(DSI_VC_IRQENABLE(1)); | |
1689 | ||
1690 | DUMPREG(DSI_VC_CTRL(2)); | |
1691 | DUMPREG(DSI_VC_TE(2)); | |
1692 | DUMPREG(DSI_VC_LONG_PACKET_HEADER(2)); | |
1693 | DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2)); | |
1694 | DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2)); | |
1695 | DUMPREG(DSI_VC_IRQSTATUS(2)); | |
1696 | DUMPREG(DSI_VC_IRQENABLE(2)); | |
1697 | ||
1698 | DUMPREG(DSI_VC_CTRL(3)); | |
1699 | DUMPREG(DSI_VC_TE(3)); | |
1700 | DUMPREG(DSI_VC_LONG_PACKET_HEADER(3)); | |
1701 | DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3)); | |
1702 | DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3)); | |
1703 | DUMPREG(DSI_VC_IRQSTATUS(3)); | |
1704 | DUMPREG(DSI_VC_IRQENABLE(3)); | |
1705 | ||
1706 | DUMPREG(DSI_DSIPHY_CFG0); | |
1707 | DUMPREG(DSI_DSIPHY_CFG1); | |
1708 | DUMPREG(DSI_DSIPHY_CFG2); | |
1709 | DUMPREG(DSI_DSIPHY_CFG5); | |
1710 | ||
1711 | DUMPREG(DSI_PLL_CONTROL); | |
1712 | DUMPREG(DSI_PLL_STATUS); | |
1713 | DUMPREG(DSI_PLL_GO); | |
1714 | DUMPREG(DSI_PLL_CONFIGURATION1); | |
1715 | DUMPREG(DSI_PLL_CONFIGURATION2); | |
1716 | ||
1717 | dsi_disable_scp_clk(dsidev); | |
1718 | dsi_runtime_put(dsidev); | |
1719 | #undef DUMPREG | |
1720 | } | |
1721 | ||
1722 | static void dsi1_dump_regs(struct seq_file *s) | |
1723 | { | |
1724 | struct platform_device *dsidev = dsi_get_dsidev_from_id(0); | |
1725 | ||
1726 | dsi_dump_dsidev_regs(dsidev, s); | |
1727 | } | |
1728 | ||
1729 | static void dsi2_dump_regs(struct seq_file *s) | |
1730 | { | |
1731 | struct platform_device *dsidev = dsi_get_dsidev_from_id(1); | |
1732 | ||
1733 | dsi_dump_dsidev_regs(dsidev, s); | |
1734 | } | |
1735 | ||
1736 | enum dsi_cio_power_state { | |
1737 | DSI_COMPLEXIO_POWER_OFF = 0x0, | |
1738 | DSI_COMPLEXIO_POWER_ON = 0x1, | |
1739 | DSI_COMPLEXIO_POWER_ULPS = 0x2, | |
1740 | }; | |
1741 | ||
1742 | static int dsi_cio_power(struct platform_device *dsidev, | |
1743 | enum dsi_cio_power_state state) | |
1744 | { | |
1745 | int t = 0; | |
1746 | ||
1747 | /* PWR_CMD */ | |
1748 | REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27); | |
1749 | ||
1750 | /* PWR_STATUS */ | |
1751 | while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1), | |
1752 | 26, 25) != state) { | |
1753 | if (++t > 1000) { | |
1754 | DSSERR("failed to set complexio power state to " | |
1755 | "%d\n", state); | |
1756 | return -ENODEV; | |
1757 | } | |
1758 | udelay(1); | |
1759 | } | |
1760 | ||
1761 | return 0; | |
1762 | } | |
1763 | ||
1764 | static unsigned dsi_get_line_buf_size(struct platform_device *dsidev) | |
1765 | { | |
1766 | int val; | |
1767 | ||
1768 | /* line buffer on OMAP3 is 1024 x 24bits */ | |
1769 | /* XXX: for some reason using full buffer size causes | |
1770 | * considerable TX slowdown with update sizes that fill the | |
1771 | * whole buffer */ | |
1772 | if (!dss_has_feature(FEAT_DSI_GNQ)) | |
1773 | return 1023 * 3; | |
1774 | ||
1775 | val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */ | |
1776 | ||
1777 | switch (val) { | |
1778 | case 1: | |
1779 | return 512 * 3; /* 512x24 bits */ | |
1780 | case 2: | |
1781 | return 682 * 3; /* 682x24 bits */ | |
1782 | case 3: | |
1783 | return 853 * 3; /* 853x24 bits */ | |
1784 | case 4: | |
1785 | return 1024 * 3; /* 1024x24 bits */ | |
1786 | case 5: | |
1787 | return 1194 * 3; /* 1194x24 bits */ | |
1788 | case 6: | |
1789 | return 1365 * 3; /* 1365x24 bits */ | |
1790 | case 7: | |
1791 | return 1920 * 3; /* 1920x24 bits */ | |
1792 | default: | |
1793 | BUG(); | |
1794 | return 0; | |
1795 | } | |
1796 | } | |
1797 | ||
1798 | static int dsi_set_lane_config(struct platform_device *dsidev) | |
1799 | { | |
1800 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1801 | static const u8 offsets[] = { 0, 4, 8, 12, 16 }; | |
1802 | static const enum dsi_lane_function functions[] = { | |
1803 | DSI_LANE_CLK, | |
1804 | DSI_LANE_DATA1, | |
1805 | DSI_LANE_DATA2, | |
1806 | DSI_LANE_DATA3, | |
1807 | DSI_LANE_DATA4, | |
1808 | }; | |
1809 | u32 r; | |
1810 | int i; | |
1811 | ||
1812 | r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1); | |
1813 | ||
1814 | for (i = 0; i < dsi->num_lanes_used; ++i) { | |
1815 | unsigned offset = offsets[i]; | |
1816 | unsigned polarity, lane_number; | |
1817 | unsigned t; | |
1818 | ||
1819 | for (t = 0; t < dsi->num_lanes_supported; ++t) | |
1820 | if (dsi->lanes[t].function == functions[i]) | |
1821 | break; | |
1822 | ||
1823 | if (t == dsi->num_lanes_supported) | |
1824 | return -EINVAL; | |
1825 | ||
1826 | lane_number = t; | |
1827 | polarity = dsi->lanes[t].polarity; | |
1828 | ||
1829 | r = FLD_MOD(r, lane_number + 1, offset + 2, offset); | |
1830 | r = FLD_MOD(r, polarity, offset + 3, offset + 3); | |
1831 | } | |
1832 | ||
1833 | /* clear the unused lanes */ | |
1834 | for (; i < dsi->num_lanes_supported; ++i) { | |
1835 | unsigned offset = offsets[i]; | |
1836 | ||
1837 | r = FLD_MOD(r, 0, offset + 2, offset); | |
1838 | r = FLD_MOD(r, 0, offset + 3, offset + 3); | |
1839 | } | |
1840 | ||
1841 | dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r); | |
1842 | ||
1843 | return 0; | |
1844 | } | |
1845 | ||
1846 | static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns) | |
1847 | { | |
1848 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1849 | ||
1850 | /* convert time in ns to ddr ticks, rounding up */ | |
1851 | unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4; | |
1852 | return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; | |
1853 | } | |
1854 | ||
1855 | static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr) | |
1856 | { | |
1857 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1858 | ||
1859 | unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4; | |
1860 | return ddr * 1000 * 1000 / (ddr_clk / 1000); | |
1861 | } | |
1862 | ||
1863 | static void dsi_cio_timings(struct platform_device *dsidev) | |
1864 | { | |
1865 | u32 r; | |
1866 | u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; | |
1867 | u32 tlpx_half, tclk_trail, tclk_zero; | |
1868 | u32 tclk_prepare; | |
1869 | ||
1870 | /* calculate timings */ | |
1871 | ||
1872 | /* 1 * DDR_CLK = 2 * UI */ | |
1873 | ||
1874 | /* min 40ns + 4*UI max 85ns + 6*UI */ | |
1875 | ths_prepare = ns2ddr(dsidev, 70) + 2; | |
1876 | ||
1877 | /* min 145ns + 10*UI */ | |
1878 | ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2; | |
1879 | ||
1880 | /* min max(8*UI, 60ns+4*UI) */ | |
1881 | ths_trail = ns2ddr(dsidev, 60) + 5; | |
1882 | ||
1883 | /* min 100ns */ | |
1884 | ths_exit = ns2ddr(dsidev, 145); | |
1885 | ||
1886 | /* tlpx min 50n */ | |
1887 | tlpx_half = ns2ddr(dsidev, 25); | |
1888 | ||
1889 | /* min 60ns */ | |
1890 | tclk_trail = ns2ddr(dsidev, 60) + 2; | |
1891 | ||
1892 | /* min 38ns, max 95ns */ | |
1893 | tclk_prepare = ns2ddr(dsidev, 65); | |
1894 | ||
1895 | /* min tclk-prepare + tclk-zero = 300ns */ | |
1896 | tclk_zero = ns2ddr(dsidev, 260); | |
1897 | ||
1898 | DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", | |
1899 | ths_prepare, ddr2ns(dsidev, ths_prepare), | |
1900 | ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero)); | |
1901 | DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", | |
1902 | ths_trail, ddr2ns(dsidev, ths_trail), | |
1903 | ths_exit, ddr2ns(dsidev, ths_exit)); | |
1904 | ||
1905 | DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " | |
1906 | "tclk_zero %u (%uns)\n", | |
1907 | tlpx_half, ddr2ns(dsidev, tlpx_half), | |
1908 | tclk_trail, ddr2ns(dsidev, tclk_trail), | |
1909 | tclk_zero, ddr2ns(dsidev, tclk_zero)); | |
1910 | DSSDBG("tclk_prepare %u (%uns)\n", | |
1911 | tclk_prepare, ddr2ns(dsidev, tclk_prepare)); | |
1912 | ||
1913 | /* program timings */ | |
1914 | ||
1915 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); | |
1916 | r = FLD_MOD(r, ths_prepare, 31, 24); | |
1917 | r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); | |
1918 | r = FLD_MOD(r, ths_trail, 15, 8); | |
1919 | r = FLD_MOD(r, ths_exit, 7, 0); | |
1920 | dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r); | |
1921 | ||
1922 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); | |
1923 | r = FLD_MOD(r, tlpx_half, 20, 16); | |
1924 | r = FLD_MOD(r, tclk_trail, 15, 8); | |
1925 | r = FLD_MOD(r, tclk_zero, 7, 0); | |
1926 | ||
1927 | if (dss_has_feature(FEAT_DSI_PHY_DCC)) { | |
1928 | r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */ | |
1929 | r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */ | |
1930 | r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */ | |
1931 | } | |
1932 | ||
1933 | dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r); | |
1934 | ||
1935 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); | |
1936 | r = FLD_MOD(r, tclk_prepare, 7, 0); | |
1937 | dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r); | |
1938 | } | |
1939 | ||
1940 | /* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */ | |
1941 | static void dsi_cio_enable_lane_override(struct platform_device *dsidev, | |
1942 | unsigned mask_p, unsigned mask_n) | |
1943 | { | |
1944 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1945 | int i; | |
1946 | u32 l; | |
1947 | u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26; | |
1948 | ||
1949 | l = 0; | |
1950 | ||
1951 | for (i = 0; i < dsi->num_lanes_supported; ++i) { | |
1952 | unsigned p = dsi->lanes[i].polarity; | |
1953 | ||
1954 | if (mask_p & (1 << i)) | |
1955 | l |= 1 << (i * 2 + (p ? 0 : 1)); | |
1956 | ||
1957 | if (mask_n & (1 << i)) | |
1958 | l |= 1 << (i * 2 + (p ? 1 : 0)); | |
1959 | } | |
1960 | ||
1961 | /* | |
1962 | * Bits in REGLPTXSCPDAT4TO0DXDY: | |
1963 | * 17: DY0 18: DX0 | |
1964 | * 19: DY1 20: DX1 | |
1965 | * 21: DY2 22: DX2 | |
1966 | * 23: DY3 24: DX3 | |
1967 | * 25: DY4 26: DX4 | |
1968 | */ | |
1969 | ||
1970 | /* Set the lane override configuration */ | |
1971 | ||
1972 | /* REGLPTXSCPDAT4TO0DXDY */ | |
1973 | REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17); | |
1974 | ||
1975 | /* Enable lane override */ | |
1976 | ||
1977 | /* ENLPTXSCPDAT */ | |
1978 | REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27); | |
1979 | } | |
1980 | ||
1981 | static void dsi_cio_disable_lane_override(struct platform_device *dsidev) | |
1982 | { | |
1983 | /* Disable lane override */ | |
1984 | REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */ | |
1985 | /* Reset the lane override configuration */ | |
1986 | /* REGLPTXSCPDAT4TO0DXDY */ | |
1987 | REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17); | |
1988 | } | |
1989 | ||
1990 | static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev) | |
1991 | { | |
1992 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
1993 | int t, i; | |
1994 | bool in_use[DSI_MAX_NR_LANES]; | |
1995 | static const u8 offsets_old[] = { 28, 27, 26 }; | |
1996 | static const u8 offsets_new[] = { 24, 25, 26, 27, 28 }; | |
1997 | const u8 *offsets; | |
1998 | ||
1999 | if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) | |
2000 | offsets = offsets_old; | |
2001 | else | |
2002 | offsets = offsets_new; | |
2003 | ||
2004 | for (i = 0; i < dsi->num_lanes_supported; ++i) | |
2005 | in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED; | |
2006 | ||
2007 | t = 100000; | |
2008 | while (true) { | |
2009 | u32 l; | |
2010 | int ok; | |
2011 | ||
2012 | l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); | |
2013 | ||
2014 | ok = 0; | |
2015 | for (i = 0; i < dsi->num_lanes_supported; ++i) { | |
2016 | if (!in_use[i] || (l & (1 << offsets[i]))) | |
2017 | ok++; | |
2018 | } | |
2019 | ||
2020 | if (ok == dsi->num_lanes_supported) | |
2021 | break; | |
2022 | ||
2023 | if (--t == 0) { | |
2024 | for (i = 0; i < dsi->num_lanes_supported; ++i) { | |
2025 | if (!in_use[i] || (l & (1 << offsets[i]))) | |
2026 | continue; | |
2027 | ||
2028 | DSSERR("CIO TXCLKESC%d domain not coming " \ | |
2029 | "out of reset\n", i); | |
2030 | } | |
2031 | return -EIO; | |
2032 | } | |
2033 | } | |
2034 | ||
2035 | return 0; | |
2036 | } | |
2037 | ||
2038 | /* return bitmask of enabled lanes, lane0 being the lsb */ | |
2039 | static unsigned dsi_get_lane_mask(struct platform_device *dsidev) | |
2040 | { | |
2041 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2042 | unsigned mask = 0; | |
2043 | int i; | |
2044 | ||
2045 | for (i = 0; i < dsi->num_lanes_supported; ++i) { | |
2046 | if (dsi->lanes[i].function != DSI_LANE_UNUSED) | |
2047 | mask |= 1 << i; | |
2048 | } | |
2049 | ||
2050 | return mask; | |
2051 | } | |
2052 | ||
2053 | static int dsi_cio_init(struct platform_device *dsidev) | |
2054 | { | |
2055 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2056 | int r; | |
2057 | u32 l; | |
2058 | ||
2059 | DSSDBG("DSI CIO init starts"); | |
2060 | ||
2061 | r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev)); | |
2062 | if (r) | |
2063 | return r; | |
2064 | ||
2065 | dsi_enable_scp_clk(dsidev); | |
2066 | ||
2067 | /* A dummy read using the SCP interface to any DSIPHY register is | |
2068 | * required after DSIPHY reset to complete the reset of the DSI complex | |
2069 | * I/O. */ | |
2070 | dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); | |
2071 | ||
2072 | if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) { | |
2073 | DSSERR("CIO SCP Clock domain not coming out of reset.\n"); | |
2074 | r = -EIO; | |
2075 | goto err_scp_clk_dom; | |
2076 | } | |
2077 | ||
2078 | r = dsi_set_lane_config(dsidev); | |
2079 | if (r) | |
2080 | goto err_scp_clk_dom; | |
2081 | ||
2082 | /* set TX STOP MODE timer to maximum for this operation */ | |
2083 | l = dsi_read_reg(dsidev, DSI_TIMING1); | |
2084 | l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ | |
2085 | l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */ | |
2086 | l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */ | |
2087 | l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ | |
2088 | dsi_write_reg(dsidev, DSI_TIMING1, l); | |
2089 | ||
2090 | if (dsi->ulps_enabled) { | |
2091 | unsigned mask_p; | |
2092 | int i; | |
2093 | ||
2094 | DSSDBG("manual ulps exit\n"); | |
2095 | ||
2096 | /* ULPS is exited by Mark-1 state for 1ms, followed by | |
2097 | * stop state. DSS HW cannot do this via the normal | |
2098 | * ULPS exit sequence, as after reset the DSS HW thinks | |
2099 | * that we are not in ULPS mode, and refuses to send the | |
2100 | * sequence. So we need to send the ULPS exit sequence | |
2101 | * manually by setting positive lines high and negative lines | |
2102 | * low for 1ms. | |
2103 | */ | |
2104 | ||
2105 | mask_p = 0; | |
2106 | ||
2107 | for (i = 0; i < dsi->num_lanes_supported; ++i) { | |
2108 | if (dsi->lanes[i].function == DSI_LANE_UNUSED) | |
2109 | continue; | |
2110 | mask_p |= 1 << i; | |
2111 | } | |
2112 | ||
2113 | dsi_cio_enable_lane_override(dsidev, mask_p, 0); | |
2114 | } | |
2115 | ||
2116 | r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON); | |
2117 | if (r) | |
2118 | goto err_cio_pwr; | |
2119 | ||
2120 | if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) { | |
2121 | DSSERR("CIO PWR clock domain not coming out of reset.\n"); | |
2122 | r = -ENODEV; | |
2123 | goto err_cio_pwr_dom; | |
2124 | } | |
2125 | ||
2126 | dsi_if_enable(dsidev, true); | |
2127 | dsi_if_enable(dsidev, false); | |
2128 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ | |
2129 | ||
2130 | r = dsi_cio_wait_tx_clk_esc_reset(dsidev); | |
2131 | if (r) | |
2132 | goto err_tx_clk_esc_rst; | |
2133 | ||
2134 | if (dsi->ulps_enabled) { | |
2135 | /* Keep Mark-1 state for 1ms (as per DSI spec) */ | |
2136 | ktime_t wait = ns_to_ktime(1000 * 1000); | |
2137 | set_current_state(TASK_UNINTERRUPTIBLE); | |
2138 | schedule_hrtimeout(&wait, HRTIMER_MODE_REL); | |
2139 | ||
2140 | /* Disable the override. The lanes should be set to Mark-11 | |
2141 | * state by the HW */ | |
2142 | dsi_cio_disable_lane_override(dsidev); | |
2143 | } | |
2144 | ||
2145 | /* FORCE_TX_STOP_MODE_IO */ | |
2146 | REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15); | |
2147 | ||
2148 | dsi_cio_timings(dsidev); | |
2149 | ||
2150 | if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { | |
2151 | /* DDR_CLK_ALWAYS_ON */ | |
2152 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, | |
2153 | dsi->vm_timings.ddr_clk_always_on, 13, 13); | |
2154 | } | |
2155 | ||
2156 | dsi->ulps_enabled = false; | |
2157 | ||
2158 | DSSDBG("CIO init done\n"); | |
2159 | ||
2160 | return 0; | |
2161 | ||
2162 | err_tx_clk_esc_rst: | |
2163 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */ | |
2164 | err_cio_pwr_dom: | |
2165 | dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); | |
2166 | err_cio_pwr: | |
2167 | if (dsi->ulps_enabled) | |
2168 | dsi_cio_disable_lane_override(dsidev); | |
2169 | err_scp_clk_dom: | |
2170 | dsi_disable_scp_clk(dsidev); | |
2171 | dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev)); | |
2172 | return r; | |
2173 | } | |
2174 | ||
2175 | static void dsi_cio_uninit(struct platform_device *dsidev) | |
2176 | { | |
2177 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2178 | ||
2179 | /* DDR_CLK_ALWAYS_ON */ | |
2180 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13); | |
2181 | ||
2182 | dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); | |
2183 | dsi_disable_scp_clk(dsidev); | |
2184 | dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev)); | |
2185 | } | |
2186 | ||
2187 | static void dsi_config_tx_fifo(struct platform_device *dsidev, | |
2188 | enum fifo_size size1, enum fifo_size size2, | |
2189 | enum fifo_size size3, enum fifo_size size4) | |
2190 | { | |
2191 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2192 | u32 r = 0; | |
2193 | int add = 0; | |
2194 | int i; | |
2195 | ||
2196 | dsi->vc[0].tx_fifo_size = size1; | |
2197 | dsi->vc[1].tx_fifo_size = size2; | |
2198 | dsi->vc[2].tx_fifo_size = size3; | |
2199 | dsi->vc[3].tx_fifo_size = size4; | |
2200 | ||
2201 | for (i = 0; i < 4; i++) { | |
2202 | u8 v; | |
2203 | int size = dsi->vc[i].tx_fifo_size; | |
2204 | ||
2205 | if (add + size > 4) { | |
2206 | DSSERR("Illegal FIFO configuration\n"); | |
2207 | BUG(); | |
2208 | return; | |
2209 | } | |
2210 | ||
2211 | v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); | |
2212 | r |= v << (8 * i); | |
2213 | /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */ | |
2214 | add += size; | |
2215 | } | |
2216 | ||
2217 | dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r); | |
2218 | } | |
2219 | ||
2220 | static void dsi_config_rx_fifo(struct platform_device *dsidev, | |
2221 | enum fifo_size size1, enum fifo_size size2, | |
2222 | enum fifo_size size3, enum fifo_size size4) | |
2223 | { | |
2224 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2225 | u32 r = 0; | |
2226 | int add = 0; | |
2227 | int i; | |
2228 | ||
2229 | dsi->vc[0].rx_fifo_size = size1; | |
2230 | dsi->vc[1].rx_fifo_size = size2; | |
2231 | dsi->vc[2].rx_fifo_size = size3; | |
2232 | dsi->vc[3].rx_fifo_size = size4; | |
2233 | ||
2234 | for (i = 0; i < 4; i++) { | |
2235 | u8 v; | |
2236 | int size = dsi->vc[i].rx_fifo_size; | |
2237 | ||
2238 | if (add + size > 4) { | |
2239 | DSSERR("Illegal FIFO configuration\n"); | |
2240 | BUG(); | |
2241 | return; | |
2242 | } | |
2243 | ||
2244 | v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); | |
2245 | r |= v << (8 * i); | |
2246 | /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */ | |
2247 | add += size; | |
2248 | } | |
2249 | ||
2250 | dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r); | |
2251 | } | |
2252 | ||
2253 | static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev) | |
2254 | { | |
2255 | u32 r; | |
2256 | ||
2257 | r = dsi_read_reg(dsidev, DSI_TIMING1); | |
2258 | r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ | |
2259 | dsi_write_reg(dsidev, DSI_TIMING1, r); | |
2260 | ||
2261 | if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) { | |
2262 | DSSERR("TX_STOP bit not going down\n"); | |
2263 | return -EIO; | |
2264 | } | |
2265 | ||
2266 | return 0; | |
2267 | } | |
2268 | ||
2269 | static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel) | |
2270 | { | |
2271 | return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0); | |
2272 | } | |
2273 | ||
2274 | static void dsi_packet_sent_handler_vp(void *data, u32 mask) | |
2275 | { | |
2276 | struct dsi_packet_sent_handler_data *vp_data = | |
2277 | (struct dsi_packet_sent_handler_data *) data; | |
2278 | struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev); | |
2279 | const int channel = dsi->update_channel; | |
2280 | u8 bit = dsi->te_enabled ? 30 : 31; | |
2281 | ||
2282 | if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0) | |
2283 | complete(vp_data->completion); | |
2284 | } | |
2285 | ||
2286 | static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel) | |
2287 | { | |
2288 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2289 | DECLARE_COMPLETION_ONSTACK(completion); | |
2290 | struct dsi_packet_sent_handler_data vp_data = { | |
2291 | .dsidev = dsidev, | |
2292 | .completion = &completion | |
2293 | }; | |
2294 | int r = 0; | |
2295 | u8 bit; | |
2296 | ||
2297 | bit = dsi->te_enabled ? 30 : 31; | |
2298 | ||
2299 | r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, | |
2300 | &vp_data, DSI_VC_IRQ_PACKET_SENT); | |
2301 | if (r) | |
2302 | goto err0; | |
2303 | ||
2304 | /* Wait for completion only if TE_EN/TE_START is still set */ | |
2305 | if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) { | |
2306 | if (wait_for_completion_timeout(&completion, | |
2307 | msecs_to_jiffies(10)) == 0) { | |
2308 | DSSERR("Failed to complete previous frame transfer\n"); | |
2309 | r = -EIO; | |
2310 | goto err1; | |
2311 | } | |
2312 | } | |
2313 | ||
2314 | dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, | |
2315 | &vp_data, DSI_VC_IRQ_PACKET_SENT); | |
2316 | ||
2317 | return 0; | |
2318 | err1: | |
2319 | dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, | |
2320 | &vp_data, DSI_VC_IRQ_PACKET_SENT); | |
2321 | err0: | |
2322 | return r; | |
2323 | } | |
2324 | ||
2325 | static void dsi_packet_sent_handler_l4(void *data, u32 mask) | |
2326 | { | |
2327 | struct dsi_packet_sent_handler_data *l4_data = | |
2328 | (struct dsi_packet_sent_handler_data *) data; | |
2329 | struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev); | |
2330 | const int channel = dsi->update_channel; | |
2331 | ||
2332 | if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0) | |
2333 | complete(l4_data->completion); | |
2334 | } | |
2335 | ||
2336 | static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel) | |
2337 | { | |
2338 | DECLARE_COMPLETION_ONSTACK(completion); | |
2339 | struct dsi_packet_sent_handler_data l4_data = { | |
2340 | .dsidev = dsidev, | |
2341 | .completion = &completion | |
2342 | }; | |
2343 | int r = 0; | |
2344 | ||
2345 | r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, | |
2346 | &l4_data, DSI_VC_IRQ_PACKET_SENT); | |
2347 | if (r) | |
2348 | goto err0; | |
2349 | ||
2350 | /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */ | |
2351 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) { | |
2352 | if (wait_for_completion_timeout(&completion, | |
2353 | msecs_to_jiffies(10)) == 0) { | |
2354 | DSSERR("Failed to complete previous l4 transfer\n"); | |
2355 | r = -EIO; | |
2356 | goto err1; | |
2357 | } | |
2358 | } | |
2359 | ||
2360 | dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, | |
2361 | &l4_data, DSI_VC_IRQ_PACKET_SENT); | |
2362 | ||
2363 | return 0; | |
2364 | err1: | |
2365 | dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, | |
2366 | &l4_data, DSI_VC_IRQ_PACKET_SENT); | |
2367 | err0: | |
2368 | return r; | |
2369 | } | |
2370 | ||
2371 | static int dsi_sync_vc(struct platform_device *dsidev, int channel) | |
2372 | { | |
2373 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2374 | ||
2375 | WARN_ON(!dsi_bus_is_locked(dsidev)); | |
2376 | ||
2377 | WARN_ON(in_interrupt()); | |
2378 | ||
2379 | if (!dsi_vc_is_enabled(dsidev, channel)) | |
2380 | return 0; | |
2381 | ||
2382 | switch (dsi->vc[channel].source) { | |
2383 | case DSI_VC_SOURCE_VP: | |
2384 | return dsi_sync_vc_vp(dsidev, channel); | |
2385 | case DSI_VC_SOURCE_L4: | |
2386 | return dsi_sync_vc_l4(dsidev, channel); | |
2387 | default: | |
2388 | BUG(); | |
2389 | return -EINVAL; | |
2390 | } | |
2391 | } | |
2392 | ||
2393 | static int dsi_vc_enable(struct platform_device *dsidev, int channel, | |
2394 | bool enable) | |
2395 | { | |
2396 | DSSDBG("dsi_vc_enable channel %d, enable %d\n", | |
2397 | channel, enable); | |
2398 | ||
2399 | enable = enable ? 1 : 0; | |
2400 | ||
2401 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0); | |
2402 | ||
2403 | if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), | |
2404 | 0, enable) != enable) { | |
2405 | DSSERR("Failed to set dsi_vc_enable to %d\n", enable); | |
2406 | return -EIO; | |
2407 | } | |
2408 | ||
2409 | return 0; | |
2410 | } | |
2411 | ||
2412 | static void dsi_vc_initial_config(struct platform_device *dsidev, int channel) | |
2413 | { | |
2414 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2415 | u32 r; | |
2416 | ||
2417 | DSSDBG("Initial config of virtual channel %d", channel); | |
2418 | ||
2419 | r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel)); | |
2420 | ||
2421 | if (FLD_GET(r, 15, 15)) /* VC_BUSY */ | |
2422 | DSSERR("VC(%d) busy when trying to configure it!\n", | |
2423 | channel); | |
2424 | ||
2425 | r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */ | |
2426 | r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */ | |
2427 | r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */ | |
2428 | r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */ | |
2429 | r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */ | |
2430 | r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */ | |
2431 | r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */ | |
2432 | if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH)) | |
2433 | r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */ | |
2434 | ||
2435 | r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ | |
2436 | r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ | |
2437 | ||
2438 | dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r); | |
2439 | ||
2440 | dsi->vc[channel].source = DSI_VC_SOURCE_L4; | |
2441 | } | |
2442 | ||
2443 | static int dsi_vc_config_source(struct platform_device *dsidev, int channel, | |
2444 | enum dsi_vc_source source) | |
2445 | { | |
2446 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2447 | ||
2448 | if (dsi->vc[channel].source == source) | |
2449 | return 0; | |
2450 | ||
2451 | DSSDBG("Source config of virtual channel %d", channel); | |
2452 | ||
2453 | dsi_sync_vc(dsidev, channel); | |
2454 | ||
2455 | dsi_vc_enable(dsidev, channel, 0); | |
2456 | ||
2457 | /* VC_BUSY */ | |
2458 | if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) { | |
2459 | DSSERR("vc(%d) busy when trying to config for VP\n", channel); | |
2460 | return -EIO; | |
2461 | } | |
2462 | ||
2463 | /* SOURCE, 0 = L4, 1 = video port */ | |
2464 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1); | |
2465 | ||
2466 | /* DCS_CMD_ENABLE */ | |
2467 | if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) { | |
2468 | bool enable = source == DSI_VC_SOURCE_VP; | |
2469 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30); | |
2470 | } | |
2471 | ||
2472 | dsi_vc_enable(dsidev, channel, 1); | |
2473 | ||
2474 | dsi->vc[channel].source = source; | |
2475 | ||
2476 | return 0; | |
2477 | } | |
2478 | ||
2479 | static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel, | |
2480 | bool enable) | |
2481 | { | |
2482 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
2483 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2484 | ||
2485 | DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); | |
2486 | ||
2487 | WARN_ON(!dsi_bus_is_locked(dsidev)); | |
2488 | ||
2489 | dsi_vc_enable(dsidev, channel, 0); | |
2490 | dsi_if_enable(dsidev, 0); | |
2491 | ||
2492 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9); | |
2493 | ||
2494 | dsi_vc_enable(dsidev, channel, 1); | |
2495 | dsi_if_enable(dsidev, 1); | |
2496 | ||
2497 | dsi_force_tx_stop_mode_io(dsidev); | |
2498 | ||
2499 | /* start the DDR clock by sending a NULL packet */ | |
2500 | if (dsi->vm_timings.ddr_clk_always_on && enable) | |
2501 | dsi_vc_send_null(dssdev, channel); | |
2502 | } | |
2503 | ||
2504 | static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel) | |
2505 | { | |
2506 | while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { | |
2507 | u32 val; | |
2508 | val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); | |
2509 | DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", | |
2510 | (val >> 0) & 0xff, | |
2511 | (val >> 8) & 0xff, | |
2512 | (val >> 16) & 0xff, | |
2513 | (val >> 24) & 0xff); | |
2514 | } | |
2515 | } | |
2516 | ||
2517 | static void dsi_show_rx_ack_with_err(u16 err) | |
2518 | { | |
2519 | DSSERR("\tACK with ERROR (%#x):\n", err); | |
2520 | if (err & (1 << 0)) | |
2521 | DSSERR("\t\tSoT Error\n"); | |
2522 | if (err & (1 << 1)) | |
2523 | DSSERR("\t\tSoT Sync Error\n"); | |
2524 | if (err & (1 << 2)) | |
2525 | DSSERR("\t\tEoT Sync Error\n"); | |
2526 | if (err & (1 << 3)) | |
2527 | DSSERR("\t\tEscape Mode Entry Command Error\n"); | |
2528 | if (err & (1 << 4)) | |
2529 | DSSERR("\t\tLP Transmit Sync Error\n"); | |
2530 | if (err & (1 << 5)) | |
2531 | DSSERR("\t\tHS Receive Timeout Error\n"); | |
2532 | if (err & (1 << 6)) | |
2533 | DSSERR("\t\tFalse Control Error\n"); | |
2534 | if (err & (1 << 7)) | |
2535 | DSSERR("\t\t(reserved7)\n"); | |
2536 | if (err & (1 << 8)) | |
2537 | DSSERR("\t\tECC Error, single-bit (corrected)\n"); | |
2538 | if (err & (1 << 9)) | |
2539 | DSSERR("\t\tECC Error, multi-bit (not corrected)\n"); | |
2540 | if (err & (1 << 10)) | |
2541 | DSSERR("\t\tChecksum Error\n"); | |
2542 | if (err & (1 << 11)) | |
2543 | DSSERR("\t\tData type not recognized\n"); | |
2544 | if (err & (1 << 12)) | |
2545 | DSSERR("\t\tInvalid VC ID\n"); | |
2546 | if (err & (1 << 13)) | |
2547 | DSSERR("\t\tInvalid Transmission Length\n"); | |
2548 | if (err & (1 << 14)) | |
2549 | DSSERR("\t\t(reserved14)\n"); | |
2550 | if (err & (1 << 15)) | |
2551 | DSSERR("\t\tDSI Protocol Violation\n"); | |
2552 | } | |
2553 | ||
2554 | static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev, | |
2555 | int channel) | |
2556 | { | |
2557 | /* RX_FIFO_NOT_EMPTY */ | |
2558 | while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { | |
2559 | u32 val; | |
2560 | u8 dt; | |
2561 | val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); | |
2562 | DSSERR("\trawval %#08x\n", val); | |
2563 | dt = FLD_GET(val, 5, 0); | |
2564 | if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) { | |
2565 | u16 err = FLD_GET(val, 23, 8); | |
2566 | dsi_show_rx_ack_with_err(err); | |
2567 | } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) { | |
2568 | DSSERR("\tDCS short response, 1 byte: %#x\n", | |
2569 | FLD_GET(val, 23, 8)); | |
2570 | } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) { | |
2571 | DSSERR("\tDCS short response, 2 byte: %#x\n", | |
2572 | FLD_GET(val, 23, 8)); | |
2573 | } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) { | |
2574 | DSSERR("\tDCS long response, len %d\n", | |
2575 | FLD_GET(val, 23, 8)); | |
2576 | dsi_vc_flush_long_data(dsidev, channel); | |
2577 | } else { | |
2578 | DSSERR("\tunknown datatype 0x%02x\n", dt); | |
2579 | } | |
2580 | } | |
2581 | return 0; | |
2582 | } | |
2583 | ||
2584 | static int dsi_vc_send_bta(struct platform_device *dsidev, int channel) | |
2585 | { | |
2586 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2587 | ||
2588 | if (dsi->debug_write || dsi->debug_read) | |
2589 | DSSDBG("dsi_vc_send_bta %d\n", channel); | |
2590 | ||
2591 | WARN_ON(!dsi_bus_is_locked(dsidev)); | |
2592 | ||
2593 | /* RX_FIFO_NOT_EMPTY */ | |
2594 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { | |
2595 | DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); | |
2596 | dsi_vc_flush_receive_data(dsidev, channel); | |
2597 | } | |
2598 | ||
2599 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ | |
2600 | ||
2601 | /* flush posted write */ | |
2602 | dsi_read_reg(dsidev, DSI_VC_CTRL(channel)); | |
2603 | ||
2604 | return 0; | |
2605 | } | |
2606 | ||
2607 | static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) | |
2608 | { | |
2609 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
2610 | DECLARE_COMPLETION_ONSTACK(completion); | |
2611 | int r = 0; | |
2612 | u32 err; | |
2613 | ||
2614 | r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler, | |
2615 | &completion, DSI_VC_IRQ_BTA); | |
2616 | if (r) | |
2617 | goto err0; | |
2618 | ||
2619 | r = dsi_register_isr(dsidev, dsi_completion_handler, &completion, | |
2620 | DSI_IRQ_ERROR_MASK); | |
2621 | if (r) | |
2622 | goto err1; | |
2623 | ||
2624 | r = dsi_vc_send_bta(dsidev, channel); | |
2625 | if (r) | |
2626 | goto err2; | |
2627 | ||
2628 | if (wait_for_completion_timeout(&completion, | |
2629 | msecs_to_jiffies(500)) == 0) { | |
2630 | DSSERR("Failed to receive BTA\n"); | |
2631 | r = -EIO; | |
2632 | goto err2; | |
2633 | } | |
2634 | ||
2635 | err = dsi_get_errors(dsidev); | |
2636 | if (err) { | |
2637 | DSSERR("Error while sending BTA: %x\n", err); | |
2638 | r = -EIO; | |
2639 | goto err2; | |
2640 | } | |
2641 | err2: | |
2642 | dsi_unregister_isr(dsidev, dsi_completion_handler, &completion, | |
2643 | DSI_IRQ_ERROR_MASK); | |
2644 | err1: | |
2645 | dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler, | |
2646 | &completion, DSI_VC_IRQ_BTA); | |
2647 | err0: | |
2648 | return r; | |
2649 | } | |
2650 | ||
2651 | static inline void dsi_vc_write_long_header(struct platform_device *dsidev, | |
2652 | int channel, u8 data_type, u16 len, u8 ecc) | |
2653 | { | |
2654 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2655 | u32 val; | |
2656 | u8 data_id; | |
2657 | ||
2658 | WARN_ON(!dsi_bus_is_locked(dsidev)); | |
2659 | ||
2660 | data_id = data_type | dsi->vc[channel].vc_id << 6; | |
2661 | ||
2662 | val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | | |
2663 | FLD_VAL(ecc, 31, 24); | |
2664 | ||
2665 | dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val); | |
2666 | } | |
2667 | ||
2668 | static inline void dsi_vc_write_long_payload(struct platform_device *dsidev, | |
2669 | int channel, u8 b1, u8 b2, u8 b3, u8 b4) | |
2670 | { | |
2671 | u32 val; | |
2672 | ||
2673 | val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0; | |
2674 | ||
2675 | /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", | |
2676 | b1, b2, b3, b4, val); */ | |
2677 | ||
2678 | dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val); | |
2679 | } | |
2680 | ||
2681 | static int dsi_vc_send_long(struct platform_device *dsidev, int channel, | |
2682 | u8 data_type, u8 *data, u16 len, u8 ecc) | |
2683 | { | |
2684 | /*u32 val; */ | |
2685 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2686 | int i; | |
2687 | u8 *p; | |
2688 | int r = 0; | |
2689 | u8 b1, b2, b3, b4; | |
2690 | ||
2691 | if (dsi->debug_write) | |
2692 | DSSDBG("dsi_vc_send_long, %d bytes\n", len); | |
2693 | ||
2694 | /* len + header */ | |
2695 | if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) { | |
2696 | DSSERR("unable to send long packet: packet too long.\n"); | |
2697 | return -EINVAL; | |
2698 | } | |
2699 | ||
2700 | dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4); | |
2701 | ||
2702 | dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc); | |
2703 | ||
2704 | p = data; | |
2705 | for (i = 0; i < len >> 2; i++) { | |
2706 | if (dsi->debug_write) | |
2707 | DSSDBG("\tsending full packet %d\n", i); | |
2708 | ||
2709 | b1 = *p++; | |
2710 | b2 = *p++; | |
2711 | b3 = *p++; | |
2712 | b4 = *p++; | |
2713 | ||
2714 | dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4); | |
2715 | } | |
2716 | ||
2717 | i = len % 4; | |
2718 | if (i) { | |
2719 | b1 = 0; b2 = 0; b3 = 0; | |
2720 | ||
2721 | if (dsi->debug_write) | |
2722 | DSSDBG("\tsending remainder bytes %d\n", i); | |
2723 | ||
2724 | switch (i) { | |
2725 | case 3: | |
2726 | b1 = *p++; | |
2727 | b2 = *p++; | |
2728 | b3 = *p++; | |
2729 | break; | |
2730 | case 2: | |
2731 | b1 = *p++; | |
2732 | b2 = *p++; | |
2733 | break; | |
2734 | case 1: | |
2735 | b1 = *p++; | |
2736 | break; | |
2737 | } | |
2738 | ||
2739 | dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0); | |
2740 | } | |
2741 | ||
2742 | return r; | |
2743 | } | |
2744 | ||
2745 | static int dsi_vc_send_short(struct platform_device *dsidev, int channel, | |
2746 | u8 data_type, u16 data, u8 ecc) | |
2747 | { | |
2748 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2749 | u32 r; | |
2750 | u8 data_id; | |
2751 | ||
2752 | WARN_ON(!dsi_bus_is_locked(dsidev)); | |
2753 | ||
2754 | if (dsi->debug_write) | |
2755 | DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", | |
2756 | channel, | |
2757 | data_type, data & 0xff, (data >> 8) & 0xff); | |
2758 | ||
2759 | dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4); | |
2760 | ||
2761 | if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) { | |
2762 | DSSERR("ERROR FIFO FULL, aborting transfer\n"); | |
2763 | return -EINVAL; | |
2764 | } | |
2765 | ||
2766 | data_id = data_type | dsi->vc[channel].vc_id << 6; | |
2767 | ||
2768 | r = (data_id << 0) | (data << 8) | (ecc << 24); | |
2769 | ||
2770 | dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r); | |
2771 | ||
2772 | return 0; | |
2773 | } | |
2774 | ||
2775 | static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel) | |
2776 | { | |
2777 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
2778 | ||
2779 | return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL, | |
2780 | 0, 0); | |
2781 | } | |
2782 | ||
2783 | static int dsi_vc_write_nosync_common(struct platform_device *dsidev, | |
2784 | int channel, u8 *data, int len, enum dss_dsi_content_type type) | |
2785 | { | |
2786 | int r; | |
2787 | ||
2788 | if (len == 0) { | |
2789 | BUG_ON(type == DSS_DSI_CONTENT_DCS); | |
2790 | r = dsi_vc_send_short(dsidev, channel, | |
2791 | MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0); | |
2792 | } else if (len == 1) { | |
2793 | r = dsi_vc_send_short(dsidev, channel, | |
2794 | type == DSS_DSI_CONTENT_GENERIC ? | |
2795 | MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM : | |
2796 | MIPI_DSI_DCS_SHORT_WRITE, data[0], 0); | |
2797 | } else if (len == 2) { | |
2798 | r = dsi_vc_send_short(dsidev, channel, | |
2799 | type == DSS_DSI_CONTENT_GENERIC ? | |
2800 | MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM : | |
2801 | MIPI_DSI_DCS_SHORT_WRITE_PARAM, | |
2802 | data[0] | (data[1] << 8), 0); | |
2803 | } else { | |
2804 | r = dsi_vc_send_long(dsidev, channel, | |
2805 | type == DSS_DSI_CONTENT_GENERIC ? | |
2806 | MIPI_DSI_GENERIC_LONG_WRITE : | |
2807 | MIPI_DSI_DCS_LONG_WRITE, data, len, 0); | |
2808 | } | |
2809 | ||
2810 | return r; | |
2811 | } | |
2812 | ||
2813 | static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel, | |
2814 | u8 *data, int len) | |
2815 | { | |
2816 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
2817 | ||
2818 | return dsi_vc_write_nosync_common(dsidev, channel, data, len, | |
2819 | DSS_DSI_CONTENT_DCS); | |
2820 | } | |
2821 | ||
2822 | static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel, | |
2823 | u8 *data, int len) | |
2824 | { | |
2825 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
2826 | ||
2827 | return dsi_vc_write_nosync_common(dsidev, channel, data, len, | |
2828 | DSS_DSI_CONTENT_GENERIC); | |
2829 | } | |
2830 | ||
2831 | static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel, | |
2832 | u8 *data, int len, enum dss_dsi_content_type type) | |
2833 | { | |
2834 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
2835 | int r; | |
2836 | ||
2837 | r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type); | |
2838 | if (r) | |
2839 | goto err; | |
2840 | ||
2841 | r = dsi_vc_send_bta_sync(dssdev, channel); | |
2842 | if (r) | |
2843 | goto err; | |
2844 | ||
2845 | /* RX_FIFO_NOT_EMPTY */ | |
2846 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { | |
2847 | DSSERR("rx fifo not empty after write, dumping data:\n"); | |
2848 | dsi_vc_flush_receive_data(dsidev, channel); | |
2849 | r = -EIO; | |
2850 | goto err; | |
2851 | } | |
2852 | ||
2853 | return 0; | |
2854 | err: | |
2855 | DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n", | |
2856 | channel, data[0], len); | |
2857 | return r; | |
2858 | } | |
2859 | ||
2860 | static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data, | |
2861 | int len) | |
2862 | { | |
2863 | return dsi_vc_write_common(dssdev, channel, data, len, | |
2864 | DSS_DSI_CONTENT_DCS); | |
2865 | } | |
2866 | ||
2867 | static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data, | |
2868 | int len) | |
2869 | { | |
2870 | return dsi_vc_write_common(dssdev, channel, data, len, | |
2871 | DSS_DSI_CONTENT_GENERIC); | |
2872 | } | |
2873 | ||
2874 | static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev, | |
2875 | int channel, u8 dcs_cmd) | |
2876 | { | |
2877 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2878 | int r; | |
2879 | ||
2880 | if (dsi->debug_read) | |
2881 | DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n", | |
2882 | channel, dcs_cmd); | |
2883 | ||
2884 | r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0); | |
2885 | if (r) { | |
2886 | DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)" | |
2887 | " failed\n", channel, dcs_cmd); | |
2888 | return r; | |
2889 | } | |
2890 | ||
2891 | return 0; | |
2892 | } | |
2893 | ||
2894 | static int dsi_vc_generic_send_read_request(struct platform_device *dsidev, | |
2895 | int channel, u8 *reqdata, int reqlen) | |
2896 | { | |
2897 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2898 | u16 data; | |
2899 | u8 data_type; | |
2900 | int r; | |
2901 | ||
2902 | if (dsi->debug_read) | |
2903 | DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n", | |
2904 | channel, reqlen); | |
2905 | ||
2906 | if (reqlen == 0) { | |
2907 | data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM; | |
2908 | data = 0; | |
2909 | } else if (reqlen == 1) { | |
2910 | data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM; | |
2911 | data = reqdata[0]; | |
2912 | } else if (reqlen == 2) { | |
2913 | data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM; | |
2914 | data = reqdata[0] | (reqdata[1] << 8); | |
2915 | } else { | |
2916 | BUG(); | |
2917 | return -EINVAL; | |
2918 | } | |
2919 | ||
2920 | r = dsi_vc_send_short(dsidev, channel, data_type, data, 0); | |
2921 | if (r) { | |
2922 | DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)" | |
2923 | " failed\n", channel, reqlen); | |
2924 | return r; | |
2925 | } | |
2926 | ||
2927 | return 0; | |
2928 | } | |
2929 | ||
2930 | static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel, | |
2931 | u8 *buf, int buflen, enum dss_dsi_content_type type) | |
2932 | { | |
2933 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
2934 | u32 val; | |
2935 | u8 dt; | |
2936 | int r; | |
2937 | ||
2938 | /* RX_FIFO_NOT_EMPTY */ | |
2939 | if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) { | |
2940 | DSSERR("RX fifo empty when trying to read.\n"); | |
2941 | r = -EIO; | |
2942 | goto err; | |
2943 | } | |
2944 | ||
2945 | val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); | |
2946 | if (dsi->debug_read) | |
2947 | DSSDBG("\theader: %08x\n", val); | |
2948 | dt = FLD_GET(val, 5, 0); | |
2949 | if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) { | |
2950 | u16 err = FLD_GET(val, 23, 8); | |
2951 | dsi_show_rx_ack_with_err(err); | |
2952 | r = -EIO; | |
2953 | goto err; | |
2954 | ||
2955 | } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? | |
2956 | MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE : | |
2957 | MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) { | |
2958 | u8 data = FLD_GET(val, 15, 8); | |
2959 | if (dsi->debug_read) | |
2960 | DSSDBG("\t%s short response, 1 byte: %02x\n", | |
2961 | type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : | |
2962 | "DCS", data); | |
2963 | ||
2964 | if (buflen < 1) { | |
2965 | r = -EIO; | |
2966 | goto err; | |
2967 | } | |
2968 | ||
2969 | buf[0] = data; | |
2970 | ||
2971 | return 1; | |
2972 | } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? | |
2973 | MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE : | |
2974 | MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) { | |
2975 | u16 data = FLD_GET(val, 23, 8); | |
2976 | if (dsi->debug_read) | |
2977 | DSSDBG("\t%s short response, 2 byte: %04x\n", | |
2978 | type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : | |
2979 | "DCS", data); | |
2980 | ||
2981 | if (buflen < 2) { | |
2982 | r = -EIO; | |
2983 | goto err; | |
2984 | } | |
2985 | ||
2986 | buf[0] = data & 0xff; | |
2987 | buf[1] = (data >> 8) & 0xff; | |
2988 | ||
2989 | return 2; | |
2990 | } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? | |
2991 | MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE : | |
2992 | MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) { | |
2993 | int w; | |
2994 | int len = FLD_GET(val, 23, 8); | |
2995 | if (dsi->debug_read) | |
2996 | DSSDBG("\t%s long response, len %d\n", | |
2997 | type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : | |
2998 | "DCS", len); | |
2999 | ||
3000 | if (len > buflen) { | |
3001 | r = -EIO; | |
3002 | goto err; | |
3003 | } | |
3004 | ||
3005 | /* two byte checksum ends the packet, not included in len */ | |
3006 | for (w = 0; w < len + 2;) { | |
3007 | int b; | |
3008 | val = dsi_read_reg(dsidev, | |
3009 | DSI_VC_SHORT_PACKET_HEADER(channel)); | |
3010 | if (dsi->debug_read) | |
3011 | DSSDBG("\t\t%02x %02x %02x %02x\n", | |
3012 | (val >> 0) & 0xff, | |
3013 | (val >> 8) & 0xff, | |
3014 | (val >> 16) & 0xff, | |
3015 | (val >> 24) & 0xff); | |
3016 | ||
3017 | for (b = 0; b < 4; ++b) { | |
3018 | if (w < len) | |
3019 | buf[w] = (val >> (b * 8)) & 0xff; | |
3020 | /* we discard the 2 byte checksum */ | |
3021 | ++w; | |
3022 | } | |
3023 | } | |
3024 | ||
3025 | return len; | |
3026 | } else { | |
3027 | DSSERR("\tunknown datatype 0x%02x\n", dt); | |
3028 | r = -EIO; | |
3029 | goto err; | |
3030 | } | |
3031 | ||
3032 | err: | |
3033 | DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel, | |
3034 | type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS"); | |
3035 | ||
3036 | return r; | |
3037 | } | |
3038 | ||
3039 | static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, | |
3040 | u8 *buf, int buflen) | |
3041 | { | |
3042 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
3043 | int r; | |
3044 | ||
3045 | r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd); | |
3046 | if (r) | |
3047 | goto err; | |
3048 | ||
3049 | r = dsi_vc_send_bta_sync(dssdev, channel); | |
3050 | if (r) | |
3051 | goto err; | |
3052 | ||
3053 | r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen, | |
3054 | DSS_DSI_CONTENT_DCS); | |
3055 | if (r < 0) | |
3056 | goto err; | |
3057 | ||
3058 | if (r != buflen) { | |
3059 | r = -EIO; | |
3060 | goto err; | |
3061 | } | |
3062 | ||
3063 | return 0; | |
3064 | err: | |
3065 | DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd); | |
3066 | return r; | |
3067 | } | |
3068 | ||
3069 | static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel, | |
3070 | u8 *reqdata, int reqlen, u8 *buf, int buflen) | |
3071 | { | |
3072 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
3073 | int r; | |
3074 | ||
3075 | r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen); | |
3076 | if (r) | |
3077 | return r; | |
3078 | ||
3079 | r = dsi_vc_send_bta_sync(dssdev, channel); | |
3080 | if (r) | |
3081 | return r; | |
3082 | ||
3083 | r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen, | |
3084 | DSS_DSI_CONTENT_GENERIC); | |
3085 | if (r < 0) | |
3086 | return r; | |
3087 | ||
3088 | if (r != buflen) { | |
3089 | r = -EIO; | |
3090 | return r; | |
3091 | } | |
3092 | ||
3093 | return 0; | |
3094 | } | |
3095 | ||
3096 | static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel, | |
3097 | u16 len) | |
3098 | { | |
3099 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
3100 | ||
3101 | return dsi_vc_send_short(dsidev, channel, | |
3102 | MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0); | |
3103 | } | |
3104 | ||
3105 | static int dsi_enter_ulps(struct platform_device *dsidev) | |
3106 | { | |
3107 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3108 | DECLARE_COMPLETION_ONSTACK(completion); | |
3109 | int r, i; | |
3110 | unsigned mask; | |
3111 | ||
3112 | DSSDBG("Entering ULPS"); | |
3113 | ||
3114 | WARN_ON(!dsi_bus_is_locked(dsidev)); | |
3115 | ||
3116 | WARN_ON(dsi->ulps_enabled); | |
3117 | ||
3118 | if (dsi->ulps_enabled) | |
3119 | return 0; | |
3120 | ||
3121 | /* DDR_CLK_ALWAYS_ON */ | |
3122 | if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) { | |
3123 | dsi_if_enable(dsidev, 0); | |
3124 | REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13); | |
3125 | dsi_if_enable(dsidev, 1); | |
3126 | } | |
3127 | ||
3128 | dsi_sync_vc(dsidev, 0); | |
3129 | dsi_sync_vc(dsidev, 1); | |
3130 | dsi_sync_vc(dsidev, 2); | |
3131 | dsi_sync_vc(dsidev, 3); | |
3132 | ||
3133 | dsi_force_tx_stop_mode_io(dsidev); | |
3134 | ||
3135 | dsi_vc_enable(dsidev, 0, false); | |
3136 | dsi_vc_enable(dsidev, 1, false); | |
3137 | dsi_vc_enable(dsidev, 2, false); | |
3138 | dsi_vc_enable(dsidev, 3, false); | |
3139 | ||
3140 | if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */ | |
3141 | DSSERR("HS busy when enabling ULPS\n"); | |
3142 | return -EIO; | |
3143 | } | |
3144 | ||
3145 | if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */ | |
3146 | DSSERR("LP busy when enabling ULPS\n"); | |
3147 | return -EIO; | |
3148 | } | |
3149 | ||
3150 | r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion, | |
3151 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); | |
3152 | if (r) | |
3153 | return r; | |
3154 | ||
3155 | mask = 0; | |
3156 | ||
3157 | for (i = 0; i < dsi->num_lanes_supported; ++i) { | |
3158 | if (dsi->lanes[i].function == DSI_LANE_UNUSED) | |
3159 | continue; | |
3160 | mask |= 1 << i; | |
3161 | } | |
3162 | /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */ | |
3163 | /* LANEx_ULPS_SIG2 */ | |
3164 | REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5); | |
3165 | ||
3166 | /* flush posted write and wait for SCP interface to finish the write */ | |
3167 | dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2); | |
3168 | ||
3169 | if (wait_for_completion_timeout(&completion, | |
3170 | msecs_to_jiffies(1000)) == 0) { | |
3171 | DSSERR("ULPS enable timeout\n"); | |
3172 | r = -EIO; | |
3173 | goto err; | |
3174 | } | |
3175 | ||
3176 | dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, | |
3177 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); | |
3178 | ||
3179 | /* Reset LANEx_ULPS_SIG2 */ | |
3180 | REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5); | |
3181 | ||
3182 | /* flush posted write and wait for SCP interface to finish the write */ | |
3183 | dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2); | |
3184 | ||
3185 | dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS); | |
3186 | ||
3187 | dsi_if_enable(dsidev, false); | |
3188 | ||
3189 | dsi->ulps_enabled = true; | |
3190 | ||
3191 | return 0; | |
3192 | ||
3193 | err: | |
3194 | dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, | |
3195 | DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); | |
3196 | return r; | |
3197 | } | |
3198 | ||
3199 | static void dsi_set_lp_rx_timeout(struct platform_device *dsidev, | |
3200 | unsigned ticks, bool x4, bool x16) | |
3201 | { | |
3202 | unsigned long fck; | |
3203 | unsigned long total_ticks; | |
3204 | u32 r; | |
3205 | ||
3206 | BUG_ON(ticks > 0x1fff); | |
3207 | ||
3208 | /* ticks in DSI_FCK */ | |
3209 | fck = dsi_fclk_rate(dsidev); | |
3210 | ||
3211 | r = dsi_read_reg(dsidev, DSI_TIMING2); | |
3212 | r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ | |
3213 | r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ | |
3214 | r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ | |
3215 | r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ | |
3216 | dsi_write_reg(dsidev, DSI_TIMING2, r); | |
3217 | ||
3218 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); | |
3219 | ||
3220 | DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n", | |
3221 | total_ticks, | |
3222 | ticks, x4 ? " x4" : "", x16 ? " x16" : "", | |
3223 | (total_ticks * 1000) / (fck / 1000 / 1000)); | |
3224 | } | |
3225 | ||
3226 | static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks, | |
3227 | bool x8, bool x16) | |
3228 | { | |
3229 | unsigned long fck; | |
3230 | unsigned long total_ticks; | |
3231 | u32 r; | |
3232 | ||
3233 | BUG_ON(ticks > 0x1fff); | |
3234 | ||
3235 | /* ticks in DSI_FCK */ | |
3236 | fck = dsi_fclk_rate(dsidev); | |
3237 | ||
3238 | r = dsi_read_reg(dsidev, DSI_TIMING1); | |
3239 | r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ | |
3240 | r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */ | |
3241 | r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */ | |
3242 | r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ | |
3243 | dsi_write_reg(dsidev, DSI_TIMING1, r); | |
3244 | ||
3245 | total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1); | |
3246 | ||
3247 | DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n", | |
3248 | total_ticks, | |
3249 | ticks, x8 ? " x8" : "", x16 ? " x16" : "", | |
3250 | (total_ticks * 1000) / (fck / 1000 / 1000)); | |
3251 | } | |
3252 | ||
3253 | static void dsi_set_stop_state_counter(struct platform_device *dsidev, | |
3254 | unsigned ticks, bool x4, bool x16) | |
3255 | { | |
3256 | unsigned long fck; | |
3257 | unsigned long total_ticks; | |
3258 | u32 r; | |
3259 | ||
3260 | BUG_ON(ticks > 0x1fff); | |
3261 | ||
3262 | /* ticks in DSI_FCK */ | |
3263 | fck = dsi_fclk_rate(dsidev); | |
3264 | ||
3265 | r = dsi_read_reg(dsidev, DSI_TIMING1); | |
3266 | r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ | |
3267 | r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */ | |
3268 | r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */ | |
3269 | r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ | |
3270 | dsi_write_reg(dsidev, DSI_TIMING1, r); | |
3271 | ||
3272 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); | |
3273 | ||
3274 | DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n", | |
3275 | total_ticks, | |
3276 | ticks, x4 ? " x4" : "", x16 ? " x16" : "", | |
3277 | (total_ticks * 1000) / (fck / 1000 / 1000)); | |
3278 | } | |
3279 | ||
3280 | static void dsi_set_hs_tx_timeout(struct platform_device *dsidev, | |
3281 | unsigned ticks, bool x4, bool x16) | |
3282 | { | |
3283 | unsigned long fck; | |
3284 | unsigned long total_ticks; | |
3285 | u32 r; | |
3286 | ||
3287 | BUG_ON(ticks > 0x1fff); | |
3288 | ||
3289 | /* ticks in TxByteClkHS */ | |
3290 | fck = dsi_get_txbyteclkhs(dsidev); | |
3291 | ||
3292 | r = dsi_read_reg(dsidev, DSI_TIMING2); | |
3293 | r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ | |
3294 | r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */ | |
3295 | r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */ | |
3296 | r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ | |
3297 | dsi_write_reg(dsidev, DSI_TIMING2, r); | |
3298 | ||
3299 | total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); | |
3300 | ||
3301 | DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n", | |
3302 | total_ticks, | |
3303 | ticks, x4 ? " x4" : "", x16 ? " x16" : "", | |
3304 | (total_ticks * 1000) / (fck / 1000 / 1000)); | |
3305 | } | |
3306 | ||
3307 | static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev) | |
3308 | { | |
3309 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3310 | int num_line_buffers; | |
3311 | ||
3312 | if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { | |
3313 | int bpp = dsi_get_pixel_size(dsi->pix_fmt); | |
3314 | struct omap_video_timings *timings = &dsi->timings; | |
3315 | /* | |
3316 | * Don't use line buffers if width is greater than the video | |
3317 | * port's line buffer size | |
3318 | */ | |
3319 | if (dsi->line_buffer_size <= timings->x_res * bpp / 8) | |
3320 | num_line_buffers = 0; | |
3321 | else | |
3322 | num_line_buffers = 2; | |
3323 | } else { | |
3324 | /* Use maximum number of line buffers in command mode */ | |
3325 | num_line_buffers = 2; | |
3326 | } | |
3327 | ||
3328 | /* LINE_BUFFER */ | |
3329 | REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12); | |
3330 | } | |
3331 | ||
3332 | static void dsi_config_vp_sync_events(struct platform_device *dsidev) | |
3333 | { | |
3334 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3335 | bool sync_end; | |
3336 | u32 r; | |
3337 | ||
3338 | if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE) | |
3339 | sync_end = true; | |
3340 | else | |
3341 | sync_end = false; | |
3342 | ||
3343 | r = dsi_read_reg(dsidev, DSI_CTRL); | |
3344 | r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */ | |
3345 | r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */ | |
3346 | r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */ | |
3347 | r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */ | |
3348 | r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */ | |
3349 | r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */ | |
3350 | r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */ | |
3351 | dsi_write_reg(dsidev, DSI_CTRL, r); | |
3352 | } | |
3353 | ||
3354 | static void dsi_config_blanking_modes(struct platform_device *dsidev) | |
3355 | { | |
3356 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3357 | int blanking_mode = dsi->vm_timings.blanking_mode; | |
3358 | int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode; | |
3359 | int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode; | |
3360 | int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode; | |
3361 | u32 r; | |
3362 | ||
3363 | /* | |
3364 | * 0 = TX FIFO packets sent or LPS in corresponding blanking periods | |
3365 | * 1 = Long blanking packets are sent in corresponding blanking periods | |
3366 | */ | |
3367 | r = dsi_read_reg(dsidev, DSI_CTRL); | |
3368 | r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */ | |
3369 | r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */ | |
3370 | r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */ | |
3371 | r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */ | |
3372 | dsi_write_reg(dsidev, DSI_CTRL, r); | |
3373 | } | |
3374 | ||
3375 | /* | |
3376 | * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3 | |
3377 | * results in maximum transition time for data and clock lanes to enter and | |
3378 | * exit HS mode. Hence, this is the scenario where the least amount of command | |
3379 | * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS | |
3380 | * clock cycles that can be used to interleave command mode data in HS so that | |
3381 | * all scenarios are satisfied. | |
3382 | */ | |
3383 | static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs, | |
3384 | int exit_hs, int exiths_clk, int ddr_pre, int ddr_post) | |
3385 | { | |
3386 | int transition; | |
3387 | ||
3388 | /* | |
3389 | * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition | |
3390 | * time of data lanes only, if it isn't set, we need to consider HS | |
3391 | * transition time of both data and clock lanes. HS transition time | |
3392 | * of Scenario 3 is considered. | |
3393 | */ | |
3394 | if (ddr_alwon) { | |
3395 | transition = enter_hs + exit_hs + max(enter_hs, 2) + 1; | |
3396 | } else { | |
3397 | int trans1, trans2; | |
3398 | trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1; | |
3399 | trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre + | |
3400 | enter_hs + 1; | |
3401 | transition = max(trans1, trans2); | |
3402 | } | |
3403 | ||
3404 | return blank > transition ? blank - transition : 0; | |
3405 | } | |
3406 | ||
3407 | /* | |
3408 | * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1 | |
3409 | * results in maximum transition time for data lanes to enter and exit LP mode. | |
3410 | * Hence, this is the scenario where the least amount of command mode data can | |
3411 | * be interleaved. We program the minimum amount of bytes that can be | |
3412 | * interleaved in LP so that all scenarios are satisfied. | |
3413 | */ | |
3414 | static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs, | |
3415 | int lp_clk_div, int tdsi_fclk) | |
3416 | { | |
3417 | int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */ | |
3418 | int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */ | |
3419 | int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */ | |
3420 | int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */ | |
3421 | int lp_inter; /* cmd mode data that can be interleaved, in bytes */ | |
3422 | ||
3423 | /* maximum LP transition time according to Scenario 1 */ | |
3424 | trans_lp = exit_hs + max(enter_hs, 2) + 1; | |
3425 | ||
3426 | /* CLKIN4DDR = 16 * TXBYTECLKHS */ | |
3427 | tlp_avail = thsbyte_clk * (blank - trans_lp); | |
3428 | ||
3429 | ttxclkesc = tdsi_fclk * lp_clk_div; | |
3430 | ||
3431 | lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - | |
3432 | 26) / 16; | |
3433 | ||
3434 | return max(lp_inter, 0); | |
3435 | } | |
3436 | ||
3437 | static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev) | |
3438 | { | |
3439 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3440 | int blanking_mode; | |
3441 | int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode; | |
3442 | int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div; | |
3443 | int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat; | |
3444 | int tclk_trail, ths_exit, exiths_clk; | |
3445 | bool ddr_alwon; | |
3446 | struct omap_video_timings *timings = &dsi->timings; | |
3447 | int bpp = dsi_get_pixel_size(dsi->pix_fmt); | |
3448 | int ndl = dsi->num_lanes_used - 1; | |
3449 | int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1; | |
3450 | int hsa_interleave_hs = 0, hsa_interleave_lp = 0; | |
3451 | int hfp_interleave_hs = 0, hfp_interleave_lp = 0; | |
3452 | int hbp_interleave_hs = 0, hbp_interleave_lp = 0; | |
3453 | int bl_interleave_hs = 0, bl_interleave_lp = 0; | |
3454 | u32 r; | |
3455 | ||
3456 | r = dsi_read_reg(dsidev, DSI_CTRL); | |
3457 | blanking_mode = FLD_GET(r, 20, 20); | |
3458 | hfp_blanking_mode = FLD_GET(r, 21, 21); | |
3459 | hbp_blanking_mode = FLD_GET(r, 22, 22); | |
3460 | hsa_blanking_mode = FLD_GET(r, 23, 23); | |
3461 | ||
3462 | r = dsi_read_reg(dsidev, DSI_VM_TIMING1); | |
3463 | hbp = FLD_GET(r, 11, 0); | |
3464 | hfp = FLD_GET(r, 23, 12); | |
3465 | hsa = FLD_GET(r, 31, 24); | |
3466 | ||
3467 | r = dsi_read_reg(dsidev, DSI_CLK_TIMING); | |
3468 | ddr_clk_post = FLD_GET(r, 7, 0); | |
3469 | ddr_clk_pre = FLD_GET(r, 15, 8); | |
3470 | ||
3471 | r = dsi_read_reg(dsidev, DSI_VM_TIMING7); | |
3472 | exit_hs_mode_lat = FLD_GET(r, 15, 0); | |
3473 | enter_hs_mode_lat = FLD_GET(r, 31, 16); | |
3474 | ||
3475 | r = dsi_read_reg(dsidev, DSI_CLK_CTRL); | |
3476 | lp_clk_div = FLD_GET(r, 12, 0); | |
3477 | ddr_alwon = FLD_GET(r, 13, 13); | |
3478 | ||
3479 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); | |
3480 | ths_exit = FLD_GET(r, 7, 0); | |
3481 | ||
3482 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); | |
3483 | tclk_trail = FLD_GET(r, 15, 8); | |
3484 | ||
3485 | exiths_clk = ths_exit + tclk_trail; | |
3486 | ||
3487 | width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8); | |
3488 | bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl); | |
3489 | ||
3490 | if (!hsa_blanking_mode) { | |
3491 | hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon, | |
3492 | enter_hs_mode_lat, exit_hs_mode_lat, | |
3493 | exiths_clk, ddr_clk_pre, ddr_clk_post); | |
3494 | hsa_interleave_lp = dsi_compute_interleave_lp(hsa, | |
3495 | enter_hs_mode_lat, exit_hs_mode_lat, | |
3496 | lp_clk_div, dsi_fclk_hsdiv); | |
3497 | } | |
3498 | ||
3499 | if (!hfp_blanking_mode) { | |
3500 | hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon, | |
3501 | enter_hs_mode_lat, exit_hs_mode_lat, | |
3502 | exiths_clk, ddr_clk_pre, ddr_clk_post); | |
3503 | hfp_interleave_lp = dsi_compute_interleave_lp(hfp, | |
3504 | enter_hs_mode_lat, exit_hs_mode_lat, | |
3505 | lp_clk_div, dsi_fclk_hsdiv); | |
3506 | } | |
3507 | ||
3508 | if (!hbp_blanking_mode) { | |
3509 | hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon, | |
3510 | enter_hs_mode_lat, exit_hs_mode_lat, | |
3511 | exiths_clk, ddr_clk_pre, ddr_clk_post); | |
3512 | ||
3513 | hbp_interleave_lp = dsi_compute_interleave_lp(hbp, | |
3514 | enter_hs_mode_lat, exit_hs_mode_lat, | |
3515 | lp_clk_div, dsi_fclk_hsdiv); | |
3516 | } | |
3517 | ||
3518 | if (!blanking_mode) { | |
3519 | bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon, | |
3520 | enter_hs_mode_lat, exit_hs_mode_lat, | |
3521 | exiths_clk, ddr_clk_pre, ddr_clk_post); | |
3522 | ||
3523 | bl_interleave_lp = dsi_compute_interleave_lp(bllp, | |
3524 | enter_hs_mode_lat, exit_hs_mode_lat, | |
3525 | lp_clk_div, dsi_fclk_hsdiv); | |
3526 | } | |
3527 | ||
3528 | DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n", | |
3529 | hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs, | |
3530 | bl_interleave_hs); | |
3531 | ||
3532 | DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n", | |
3533 | hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp, | |
3534 | bl_interleave_lp); | |
3535 | ||
3536 | r = dsi_read_reg(dsidev, DSI_VM_TIMING4); | |
3537 | r = FLD_MOD(r, hsa_interleave_hs, 23, 16); | |
3538 | r = FLD_MOD(r, hfp_interleave_hs, 15, 8); | |
3539 | r = FLD_MOD(r, hbp_interleave_hs, 7, 0); | |
3540 | dsi_write_reg(dsidev, DSI_VM_TIMING4, r); | |
3541 | ||
3542 | r = dsi_read_reg(dsidev, DSI_VM_TIMING5); | |
3543 | r = FLD_MOD(r, hsa_interleave_lp, 23, 16); | |
3544 | r = FLD_MOD(r, hfp_interleave_lp, 15, 8); | |
3545 | r = FLD_MOD(r, hbp_interleave_lp, 7, 0); | |
3546 | dsi_write_reg(dsidev, DSI_VM_TIMING5, r); | |
3547 | ||
3548 | r = dsi_read_reg(dsidev, DSI_VM_TIMING6); | |
3549 | r = FLD_MOD(r, bl_interleave_hs, 31, 15); | |
3550 | r = FLD_MOD(r, bl_interleave_lp, 16, 0); | |
3551 | dsi_write_reg(dsidev, DSI_VM_TIMING6, r); | |
3552 | } | |
3553 | ||
3554 | static int dsi_proto_config(struct platform_device *dsidev) | |
3555 | { | |
3556 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3557 | u32 r; | |
3558 | int buswidth = 0; | |
3559 | ||
3560 | dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32, | |
3561 | DSI_FIFO_SIZE_32, | |
3562 | DSI_FIFO_SIZE_32, | |
3563 | DSI_FIFO_SIZE_32); | |
3564 | ||
3565 | dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32, | |
3566 | DSI_FIFO_SIZE_32, | |
3567 | DSI_FIFO_SIZE_32, | |
3568 | DSI_FIFO_SIZE_32); | |
3569 | ||
3570 | /* XXX what values for the timeouts? */ | |
3571 | dsi_set_stop_state_counter(dsidev, 0x1000, false, false); | |
3572 | dsi_set_ta_timeout(dsidev, 0x1fff, true, true); | |
3573 | dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true); | |
3574 | dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true); | |
3575 | ||
3576 | switch (dsi_get_pixel_size(dsi->pix_fmt)) { | |
3577 | case 16: | |
3578 | buswidth = 0; | |
3579 | break; | |
3580 | case 18: | |
3581 | buswidth = 1; | |
3582 | break; | |
3583 | case 24: | |
3584 | buswidth = 2; | |
3585 | break; | |
3586 | default: | |
3587 | BUG(); | |
3588 | return -EINVAL; | |
3589 | } | |
3590 | ||
3591 | r = dsi_read_reg(dsidev, DSI_CTRL); | |
3592 | r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ | |
3593 | r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ | |
3594 | r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ | |
3595 | r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/ | |
3596 | r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */ | |
3597 | r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */ | |
3598 | r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */ | |
3599 | r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */ | |
3600 | if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) { | |
3601 | r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */ | |
3602 | /* DCS_CMD_CODE, 1=start, 0=continue */ | |
3603 | r = FLD_MOD(r, 0, 25, 25); | |
3604 | } | |
3605 | ||
3606 | dsi_write_reg(dsidev, DSI_CTRL, r); | |
3607 | ||
3608 | dsi_config_vp_num_line_buffers(dsidev); | |
3609 | ||
3610 | if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { | |
3611 | dsi_config_vp_sync_events(dsidev); | |
3612 | dsi_config_blanking_modes(dsidev); | |
3613 | dsi_config_cmd_mode_interleaving(dsidev); | |
3614 | } | |
3615 | ||
3616 | dsi_vc_initial_config(dsidev, 0); | |
3617 | dsi_vc_initial_config(dsidev, 1); | |
3618 | dsi_vc_initial_config(dsidev, 2); | |
3619 | dsi_vc_initial_config(dsidev, 3); | |
3620 | ||
3621 | return 0; | |
3622 | } | |
3623 | ||
3624 | static void dsi_proto_timings(struct platform_device *dsidev) | |
3625 | { | |
3626 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3627 | unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; | |
3628 | unsigned tclk_pre, tclk_post; | |
3629 | unsigned ths_prepare, ths_prepare_ths_zero, ths_zero; | |
3630 | unsigned ths_trail, ths_exit; | |
3631 | unsigned ddr_clk_pre, ddr_clk_post; | |
3632 | unsigned enter_hs_mode_lat, exit_hs_mode_lat; | |
3633 | unsigned ths_eot; | |
3634 | int ndl = dsi->num_lanes_used - 1; | |
3635 | u32 r; | |
3636 | ||
3637 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); | |
3638 | ths_prepare = FLD_GET(r, 31, 24); | |
3639 | ths_prepare_ths_zero = FLD_GET(r, 23, 16); | |
3640 | ths_zero = ths_prepare_ths_zero - ths_prepare; | |
3641 | ths_trail = FLD_GET(r, 15, 8); | |
3642 | ths_exit = FLD_GET(r, 7, 0); | |
3643 | ||
3644 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); | |
3645 | tlpx = FLD_GET(r, 20, 16) * 2; | |
3646 | tclk_trail = FLD_GET(r, 15, 8); | |
3647 | tclk_zero = FLD_GET(r, 7, 0); | |
3648 | ||
3649 | r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); | |
3650 | tclk_prepare = FLD_GET(r, 7, 0); | |
3651 | ||
3652 | /* min 8*UI */ | |
3653 | tclk_pre = 20; | |
3654 | /* min 60ns + 52*UI */ | |
3655 | tclk_post = ns2ddr(dsidev, 60) + 26; | |
3656 | ||
3657 | ths_eot = DIV_ROUND_UP(4, ndl); | |
3658 | ||
3659 | ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare, | |
3660 | 4); | |
3661 | ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot; | |
3662 | ||
3663 | BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); | |
3664 | BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); | |
3665 | ||
3666 | r = dsi_read_reg(dsidev, DSI_CLK_TIMING); | |
3667 | r = FLD_MOD(r, ddr_clk_pre, 15, 8); | |
3668 | r = FLD_MOD(r, ddr_clk_post, 7, 0); | |
3669 | dsi_write_reg(dsidev, DSI_CLK_TIMING, r); | |
3670 | ||
3671 | DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", | |
3672 | ddr_clk_pre, | |
3673 | ddr_clk_post); | |
3674 | ||
3675 | enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) + | |
3676 | DIV_ROUND_UP(ths_prepare, 4) + | |
3677 | DIV_ROUND_UP(ths_zero + 3, 4); | |
3678 | ||
3679 | exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot; | |
3680 | ||
3681 | r = FLD_VAL(enter_hs_mode_lat, 31, 16) | | |
3682 | FLD_VAL(exit_hs_mode_lat, 15, 0); | |
3683 | dsi_write_reg(dsidev, DSI_VM_TIMING7, r); | |
3684 | ||
3685 | DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", | |
3686 | enter_hs_mode_lat, exit_hs_mode_lat); | |
3687 | ||
3688 | if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { | |
3689 | /* TODO: Implement a video mode check_timings function */ | |
3690 | int hsa = dsi->vm_timings.hsa; | |
3691 | int hfp = dsi->vm_timings.hfp; | |
3692 | int hbp = dsi->vm_timings.hbp; | |
3693 | int vsa = dsi->vm_timings.vsa; | |
3694 | int vfp = dsi->vm_timings.vfp; | |
3695 | int vbp = dsi->vm_timings.vbp; | |
3696 | int window_sync = dsi->vm_timings.window_sync; | |
3697 | bool hsync_end; | |
3698 | struct omap_video_timings *timings = &dsi->timings; | |
3699 | int bpp = dsi_get_pixel_size(dsi->pix_fmt); | |
3700 | int tl, t_he, width_bytes; | |
3701 | ||
3702 | hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE; | |
3703 | t_he = hsync_end ? | |
3704 | ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0; | |
3705 | ||
3706 | width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8); | |
3707 | ||
3708 | /* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */ | |
3709 | tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp + | |
3710 | DIV_ROUND_UP(width_bytes + 6, ndl) + hbp; | |
3711 | ||
3712 | DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp, | |
3713 | hfp, hsync_end ? hsa : 0, tl); | |
3714 | DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp, | |
3715 | vsa, timings->y_res); | |
3716 | ||
3717 | r = dsi_read_reg(dsidev, DSI_VM_TIMING1); | |
3718 | r = FLD_MOD(r, hbp, 11, 0); /* HBP */ | |
3719 | r = FLD_MOD(r, hfp, 23, 12); /* HFP */ | |
3720 | r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */ | |
3721 | dsi_write_reg(dsidev, DSI_VM_TIMING1, r); | |
3722 | ||
3723 | r = dsi_read_reg(dsidev, DSI_VM_TIMING2); | |
3724 | r = FLD_MOD(r, vbp, 7, 0); /* VBP */ | |
3725 | r = FLD_MOD(r, vfp, 15, 8); /* VFP */ | |
3726 | r = FLD_MOD(r, vsa, 23, 16); /* VSA */ | |
3727 | r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */ | |
3728 | dsi_write_reg(dsidev, DSI_VM_TIMING2, r); | |
3729 | ||
3730 | r = dsi_read_reg(dsidev, DSI_VM_TIMING3); | |
3731 | r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */ | |
3732 | r = FLD_MOD(r, tl, 31, 16); /* TL */ | |
3733 | dsi_write_reg(dsidev, DSI_VM_TIMING3, r); | |
3734 | } | |
3735 | } | |
3736 | ||
3737 | static int dsi_configure_pins(struct omap_dss_device *dssdev, | |
3738 | const struct omap_dsi_pin_config *pin_cfg) | |
3739 | { | |
3740 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
3741 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3742 | int num_pins; | |
3743 | const int *pins; | |
3744 | struct dsi_lane_config lanes[DSI_MAX_NR_LANES]; | |
3745 | int num_lanes; | |
3746 | int i; | |
3747 | ||
3748 | static const enum dsi_lane_function functions[] = { | |
3749 | DSI_LANE_CLK, | |
3750 | DSI_LANE_DATA1, | |
3751 | DSI_LANE_DATA2, | |
3752 | DSI_LANE_DATA3, | |
3753 | DSI_LANE_DATA4, | |
3754 | }; | |
3755 | ||
3756 | num_pins = pin_cfg->num_pins; | |
3757 | pins = pin_cfg->pins; | |
3758 | ||
3759 | if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2 | |
3760 | || num_pins % 2 != 0) | |
3761 | return -EINVAL; | |
3762 | ||
3763 | for (i = 0; i < DSI_MAX_NR_LANES; ++i) | |
3764 | lanes[i].function = DSI_LANE_UNUSED; | |
3765 | ||
3766 | num_lanes = 0; | |
3767 | ||
3768 | for (i = 0; i < num_pins; i += 2) { | |
3769 | u8 lane, pol; | |
3770 | int dx, dy; | |
3771 | ||
3772 | dx = pins[i]; | |
3773 | dy = pins[i + 1]; | |
3774 | ||
3775 | if (dx < 0 || dx >= dsi->num_lanes_supported * 2) | |
3776 | return -EINVAL; | |
3777 | ||
3778 | if (dy < 0 || dy >= dsi->num_lanes_supported * 2) | |
3779 | return -EINVAL; | |
3780 | ||
3781 | if (dx & 1) { | |
3782 | if (dy != dx - 1) | |
3783 | return -EINVAL; | |
3784 | pol = 1; | |
3785 | } else { | |
3786 | if (dy != dx + 1) | |
3787 | return -EINVAL; | |
3788 | pol = 0; | |
3789 | } | |
3790 | ||
3791 | lane = dx / 2; | |
3792 | ||
3793 | lanes[lane].function = functions[i / 2]; | |
3794 | lanes[lane].polarity = pol; | |
3795 | num_lanes++; | |
3796 | } | |
3797 | ||
3798 | memcpy(dsi->lanes, lanes, sizeof(dsi->lanes)); | |
3799 | dsi->num_lanes_used = num_lanes; | |
3800 | ||
3801 | return 0; | |
3802 | } | |
3803 | ||
3804 | static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel) | |
3805 | { | |
3806 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
3807 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3808 | struct omap_overlay_manager *mgr = dsi->output.manager; | |
3809 | int bpp = dsi_get_pixel_size(dsi->pix_fmt); | |
3810 | struct omap_dss_device *out = &dsi->output; | |
3811 | u8 data_type; | |
3812 | u16 word_count; | |
3813 | int r; | |
3814 | ||
3815 | if (out->manager == NULL) { | |
3816 | DSSERR("failed to enable display: no output/manager\n"); | |
3817 | return -ENODEV; | |
3818 | } | |
3819 | ||
3820 | r = dsi_display_init_dispc(dsidev, mgr); | |
3821 | if (r) | |
3822 | goto err_init_dispc; | |
3823 | ||
3824 | if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { | |
3825 | switch (dsi->pix_fmt) { | |
3826 | case OMAP_DSS_DSI_FMT_RGB888: | |
3827 | data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; | |
3828 | break; | |
3829 | case OMAP_DSS_DSI_FMT_RGB666: | |
3830 | data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; | |
3831 | break; | |
3832 | case OMAP_DSS_DSI_FMT_RGB666_PACKED: | |
3833 | data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; | |
3834 | break; | |
3835 | case OMAP_DSS_DSI_FMT_RGB565: | |
3836 | data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; | |
3837 | break; | |
3838 | default: | |
3839 | r = -EINVAL; | |
3840 | goto err_pix_fmt; | |
3841 | } | |
3842 | ||
3843 | dsi_if_enable(dsidev, false); | |
3844 | dsi_vc_enable(dsidev, channel, false); | |
3845 | ||
3846 | /* MODE, 1 = video mode */ | |
3847 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4); | |
3848 | ||
3849 | word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8); | |
3850 | ||
3851 | dsi_vc_write_long_header(dsidev, channel, data_type, | |
3852 | word_count, 0); | |
3853 | ||
3854 | dsi_vc_enable(dsidev, channel, true); | |
3855 | dsi_if_enable(dsidev, true); | |
3856 | } | |
3857 | ||
3858 | r = dss_mgr_enable(mgr); | |
3859 | if (r) | |
3860 | goto err_mgr_enable; | |
3861 | ||
3862 | return 0; | |
3863 | ||
3864 | err_mgr_enable: | |
3865 | if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { | |
3866 | dsi_if_enable(dsidev, false); | |
3867 | dsi_vc_enable(dsidev, channel, false); | |
3868 | } | |
3869 | err_pix_fmt: | |
3870 | dsi_display_uninit_dispc(dsidev, mgr); | |
3871 | err_init_dispc: | |
3872 | return r; | |
3873 | } | |
3874 | ||
3875 | static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel) | |
3876 | { | |
3877 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
3878 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3879 | struct omap_overlay_manager *mgr = dsi->output.manager; | |
3880 | ||
3881 | if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { | |
3882 | dsi_if_enable(dsidev, false); | |
3883 | dsi_vc_enable(dsidev, channel, false); | |
3884 | ||
3885 | /* MODE, 0 = command mode */ | |
3886 | REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4); | |
3887 | ||
3888 | dsi_vc_enable(dsidev, channel, true); | |
3889 | dsi_if_enable(dsidev, true); | |
3890 | } | |
3891 | ||
3892 | dss_mgr_disable(mgr); | |
3893 | ||
3894 | dsi_display_uninit_dispc(dsidev, mgr); | |
3895 | } | |
3896 | ||
3897 | static void dsi_update_screen_dispc(struct platform_device *dsidev) | |
3898 | { | |
3899 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3900 | struct omap_overlay_manager *mgr = dsi->output.manager; | |
3901 | unsigned bytespp; | |
3902 | unsigned bytespl; | |
3903 | unsigned bytespf; | |
3904 | unsigned total_len; | |
3905 | unsigned packet_payload; | |
3906 | unsigned packet_len; | |
3907 | u32 l; | |
3908 | int r; | |
3909 | const unsigned channel = dsi->update_channel; | |
3910 | const unsigned line_buf_size = dsi->line_buffer_size; | |
3911 | u16 w = dsi->timings.x_res; | |
3912 | u16 h = dsi->timings.y_res; | |
3913 | ||
3914 | DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h); | |
3915 | ||
3916 | dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP); | |
3917 | ||
3918 | bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8; | |
3919 | bytespl = w * bytespp; | |
3920 | bytespf = bytespl * h; | |
3921 | ||
3922 | /* NOTE: packet_payload has to be equal to N * bytespl, where N is | |
3923 | * number of lines in a packet. See errata about VP_CLK_RATIO */ | |
3924 | ||
3925 | if (bytespf < line_buf_size) | |
3926 | packet_payload = bytespf; | |
3927 | else | |
3928 | packet_payload = (line_buf_size) / bytespl * bytespl; | |
3929 | ||
3930 | packet_len = packet_payload + 1; /* 1 byte for DCS cmd */ | |
3931 | total_len = (bytespf / packet_payload) * packet_len; | |
3932 | ||
3933 | if (bytespf % packet_payload) | |
3934 | total_len += (bytespf % packet_payload) + 1; | |
3935 | ||
3936 | l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ | |
3937 | dsi_write_reg(dsidev, DSI_VC_TE(channel), l); | |
3938 | ||
3939 | dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE, | |
3940 | packet_len, 0); | |
3941 | ||
3942 | if (dsi->te_enabled) | |
3943 | l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ | |
3944 | else | |
3945 | l = FLD_MOD(l, 1, 31, 31); /* TE_START */ | |
3946 | dsi_write_reg(dsidev, DSI_VC_TE(channel), l); | |
3947 | ||
3948 | /* We put SIDLEMODE to no-idle for the duration of the transfer, | |
3949 | * because DSS interrupts are not capable of waking up the CPU and the | |
3950 | * framedone interrupt could be delayed for quite a long time. I think | |
3951 | * the same goes for any DSS interrupts, but for some reason I have not | |
3952 | * seen the problem anywhere else than here. | |
3953 | */ | |
3954 | dispc_disable_sidle(); | |
3955 | ||
3956 | dsi_perf_mark_start(dsidev); | |
3957 | ||
3958 | r = schedule_delayed_work(&dsi->framedone_timeout_work, | |
3959 | msecs_to_jiffies(250)); | |
3960 | BUG_ON(r == 0); | |
3961 | ||
3962 | dss_mgr_set_timings(mgr, &dsi->timings); | |
3963 | ||
3964 | dss_mgr_start_update(mgr); | |
3965 | ||
3966 | if (dsi->te_enabled) { | |
3967 | /* disable LP_RX_TO, so that we can receive TE. Time to wait | |
3968 | * for TE is longer than the timer allows */ | |
3969 | REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ | |
3970 | ||
3971 | dsi_vc_send_bta(dsidev, channel); | |
3972 | ||
3973 | #ifdef DSI_CATCH_MISSING_TE | |
3974 | mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250)); | |
3975 | #endif | |
3976 | } | |
3977 | } | |
3978 | ||
3979 | #ifdef DSI_CATCH_MISSING_TE | |
6c789357 | 3980 | static void dsi_te_timeout(struct timer_list *unused) |
f76ee892 TV |
3981 | { |
3982 | DSSERR("TE not received for 250ms!\n"); | |
3983 | } | |
3984 | #endif | |
3985 | ||
3986 | static void dsi_handle_framedone(struct platform_device *dsidev, int error) | |
3987 | { | |
3988 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
3989 | ||
3990 | /* SIDLEMODE back to smart-idle */ | |
3991 | dispc_enable_sidle(); | |
3992 | ||
3993 | if (dsi->te_enabled) { | |
3994 | /* enable LP_RX_TO again after the TE */ | |
3995 | REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ | |
3996 | } | |
3997 | ||
3998 | dsi->framedone_callback(error, dsi->framedone_data); | |
3999 | ||
4000 | if (!error) | |
4001 | dsi_perf_show(dsidev, "DISPC"); | |
4002 | } | |
4003 | ||
4004 | static void dsi_framedone_timeout_work_callback(struct work_struct *work) | |
4005 | { | |
4006 | struct dsi_data *dsi = container_of(work, struct dsi_data, | |
4007 | framedone_timeout_work.work); | |
4008 | /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after | |
4009 | * 250ms which would conflict with this timeout work. What should be | |
4010 | * done is first cancel the transfer on the HW, and then cancel the | |
4011 | * possibly scheduled framedone work. However, cancelling the transfer | |
4012 | * on the HW is buggy, and would probably require resetting the whole | |
4013 | * DSI */ | |
4014 | ||
4015 | DSSERR("Framedone not received for 250ms!\n"); | |
4016 | ||
4017 | dsi_handle_framedone(dsi->pdev, -ETIMEDOUT); | |
4018 | } | |
4019 | ||
4020 | static void dsi_framedone_irq_callback(void *data) | |
4021 | { | |
4022 | struct platform_device *dsidev = (struct platform_device *) data; | |
4023 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4024 | ||
4025 | /* Note: We get FRAMEDONE when DISPC has finished sending pixels and | |
4026 | * turns itself off. However, DSI still has the pixels in its buffers, | |
4027 | * and is sending the data. | |
4028 | */ | |
4029 | ||
4030 | cancel_delayed_work(&dsi->framedone_timeout_work); | |
4031 | ||
4032 | dsi_handle_framedone(dsidev, 0); | |
4033 | } | |
4034 | ||
4035 | static int dsi_update(struct omap_dss_device *dssdev, int channel, | |
4036 | void (*callback)(int, void *), void *data) | |
4037 | { | |
4038 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4039 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4040 | u16 dw, dh; | |
4041 | ||
4042 | dsi_perf_mark_setup(dsidev); | |
4043 | ||
4044 | dsi->update_channel = channel; | |
4045 | ||
4046 | dsi->framedone_callback = callback; | |
4047 | dsi->framedone_data = data; | |
4048 | ||
4049 | dw = dsi->timings.x_res; | |
4050 | dh = dsi->timings.y_res; | |
4051 | ||
4052 | #ifdef DSI_PERF_MEASURE | |
4053 | dsi->update_bytes = dw * dh * | |
4054 | dsi_get_pixel_size(dsi->pix_fmt) / 8; | |
4055 | #endif | |
4056 | dsi_update_screen_dispc(dsidev); | |
4057 | ||
4058 | return 0; | |
4059 | } | |
4060 | ||
4061 | /* Display funcs */ | |
4062 | ||
4063 | static int dsi_configure_dispc_clocks(struct platform_device *dsidev) | |
4064 | { | |
4065 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4066 | struct dispc_clock_info dispc_cinfo; | |
4067 | int r; | |
4068 | unsigned long fck; | |
4069 | ||
4070 | fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); | |
4071 | ||
4072 | dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div; | |
4073 | dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div; | |
4074 | ||
4075 | r = dispc_calc_clock_rates(fck, &dispc_cinfo); | |
4076 | if (r) { | |
4077 | DSSERR("Failed to calc dispc clocks\n"); | |
4078 | return r; | |
4079 | } | |
4080 | ||
4081 | dsi->mgr_config.clock_info = dispc_cinfo; | |
4082 | ||
4083 | return 0; | |
4084 | } | |
4085 | ||
4086 | static int dsi_display_init_dispc(struct platform_device *dsidev, | |
4087 | struct omap_overlay_manager *mgr) | |
4088 | { | |
4089 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4090 | int r; | |
4091 | ||
4092 | dss_select_lcd_clk_source(mgr->id, dsi->module_id == 0 ? | |
4093 | OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : | |
4094 | OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC); | |
4095 | ||
4096 | if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { | |
4097 | r = dss_mgr_register_framedone_handler(mgr, | |
4098 | dsi_framedone_irq_callback, dsidev); | |
4099 | if (r) { | |
4100 | DSSERR("can't register FRAMEDONE handler\n"); | |
4101 | goto err; | |
4102 | } | |
4103 | ||
4104 | dsi->mgr_config.stallmode = true; | |
4105 | dsi->mgr_config.fifohandcheck = true; | |
4106 | } else { | |
4107 | dsi->mgr_config.stallmode = false; | |
4108 | dsi->mgr_config.fifohandcheck = false; | |
4109 | } | |
4110 | ||
4111 | /* | |
4112 | * override interlace, logic level and edge related parameters in | |
4113 | * omap_video_timings with default values | |
4114 | */ | |
4115 | dsi->timings.interlace = false; | |
4116 | dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; | |
4117 | dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; | |
4118 | dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; | |
4119 | dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH; | |
4120 | dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE; | |
4121 | ||
4122 | dss_mgr_set_timings(mgr, &dsi->timings); | |
4123 | ||
4124 | r = dsi_configure_dispc_clocks(dsidev); | |
4125 | if (r) | |
4126 | goto err1; | |
4127 | ||
4128 | dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; | |
4129 | dsi->mgr_config.video_port_width = | |
4130 | dsi_get_pixel_size(dsi->pix_fmt); | |
4131 | dsi->mgr_config.lcden_sig_polarity = 0; | |
4132 | ||
4133 | dss_mgr_set_lcd_config(mgr, &dsi->mgr_config); | |
4134 | ||
4135 | return 0; | |
4136 | err1: | |
4137 | if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) | |
4138 | dss_mgr_unregister_framedone_handler(mgr, | |
4139 | dsi_framedone_irq_callback, dsidev); | |
4140 | err: | |
4141 | dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK); | |
4142 | return r; | |
4143 | } | |
4144 | ||
4145 | static void dsi_display_uninit_dispc(struct platform_device *dsidev, | |
4146 | struct omap_overlay_manager *mgr) | |
4147 | { | |
4148 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4149 | ||
4150 | if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) | |
4151 | dss_mgr_unregister_framedone_handler(mgr, | |
4152 | dsi_framedone_irq_callback, dsidev); | |
4153 | ||
4154 | dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK); | |
4155 | } | |
4156 | ||
4157 | static int dsi_configure_dsi_clocks(struct platform_device *dsidev) | |
4158 | { | |
4159 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4160 | struct dss_pll_clock_info cinfo; | |
4161 | int r; | |
4162 | ||
4163 | cinfo = dsi->user_dsi_cinfo; | |
4164 | ||
4165 | r = dss_pll_set_config(&dsi->pll, &cinfo); | |
4166 | if (r) { | |
4167 | DSSERR("Failed to set dsi clocks\n"); | |
4168 | return r; | |
4169 | } | |
4170 | ||
4171 | return 0; | |
4172 | } | |
4173 | ||
4174 | static int dsi_display_init_dsi(struct platform_device *dsidev) | |
4175 | { | |
4176 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4177 | int r; | |
4178 | ||
4179 | r = dss_pll_enable(&dsi->pll); | |
4180 | if (r) | |
4181 | goto err0; | |
4182 | ||
4183 | r = dsi_configure_dsi_clocks(dsidev); | |
4184 | if (r) | |
4185 | goto err1; | |
4186 | ||
4187 | dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ? | |
4188 | OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : | |
4189 | OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI); | |
4190 | ||
4191 | DSSDBG("PLL OK\n"); | |
4192 | ||
4193 | r = dsi_cio_init(dsidev); | |
4194 | if (r) | |
4195 | goto err2; | |
4196 | ||
4197 | _dsi_print_reset_status(dsidev); | |
4198 | ||
4199 | dsi_proto_timings(dsidev); | |
4200 | dsi_set_lp_clk_divisor(dsidev); | |
4201 | ||
4202 | if (1) | |
4203 | _dsi_print_reset_status(dsidev); | |
4204 | ||
4205 | r = dsi_proto_config(dsidev); | |
4206 | if (r) | |
4207 | goto err3; | |
4208 | ||
4209 | /* enable interface */ | |
4210 | dsi_vc_enable(dsidev, 0, 1); | |
4211 | dsi_vc_enable(dsidev, 1, 1); | |
4212 | dsi_vc_enable(dsidev, 2, 1); | |
4213 | dsi_vc_enable(dsidev, 3, 1); | |
4214 | dsi_if_enable(dsidev, 1); | |
4215 | dsi_force_tx_stop_mode_io(dsidev); | |
4216 | ||
4217 | return 0; | |
4218 | err3: | |
4219 | dsi_cio_uninit(dsidev); | |
4220 | err2: | |
4221 | dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); | |
4222 | err1: | |
4223 | dss_pll_disable(&dsi->pll); | |
4224 | err0: | |
4225 | return r; | |
4226 | } | |
4227 | ||
4228 | static void dsi_display_uninit_dsi(struct platform_device *dsidev, | |
4229 | bool disconnect_lanes, bool enter_ulps) | |
4230 | { | |
4231 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4232 | ||
4233 | if (enter_ulps && !dsi->ulps_enabled) | |
4234 | dsi_enter_ulps(dsidev); | |
4235 | ||
4236 | /* disable interface */ | |
4237 | dsi_if_enable(dsidev, 0); | |
4238 | dsi_vc_enable(dsidev, 0, 0); | |
4239 | dsi_vc_enable(dsidev, 1, 0); | |
4240 | dsi_vc_enable(dsidev, 2, 0); | |
4241 | dsi_vc_enable(dsidev, 3, 0); | |
4242 | ||
4243 | dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); | |
4244 | dsi_cio_uninit(dsidev); | |
4245 | dsi_pll_uninit(dsidev, disconnect_lanes); | |
4246 | } | |
4247 | ||
4248 | static int dsi_display_enable(struct omap_dss_device *dssdev) | |
4249 | { | |
4250 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4251 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4252 | int r = 0; | |
4253 | ||
4254 | DSSDBG("dsi_display_enable\n"); | |
4255 | ||
4256 | WARN_ON(!dsi_bus_is_locked(dsidev)); | |
4257 | ||
4258 | mutex_lock(&dsi->lock); | |
4259 | ||
4260 | r = dsi_runtime_get(dsidev); | |
4261 | if (r) | |
4262 | goto err_get_dsi; | |
4263 | ||
4264 | _dsi_initialize_irq(dsidev); | |
4265 | ||
4266 | r = dsi_display_init_dsi(dsidev); | |
4267 | if (r) | |
4268 | goto err_init_dsi; | |
4269 | ||
4270 | mutex_unlock(&dsi->lock); | |
4271 | ||
4272 | return 0; | |
4273 | ||
4274 | err_init_dsi: | |
4275 | dsi_runtime_put(dsidev); | |
4276 | err_get_dsi: | |
4277 | mutex_unlock(&dsi->lock); | |
4278 | DSSDBG("dsi_display_enable FAILED\n"); | |
4279 | return r; | |
4280 | } | |
4281 | ||
4282 | static void dsi_display_disable(struct omap_dss_device *dssdev, | |
4283 | bool disconnect_lanes, bool enter_ulps) | |
4284 | { | |
4285 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4286 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4287 | ||
4288 | DSSDBG("dsi_display_disable\n"); | |
4289 | ||
4290 | WARN_ON(!dsi_bus_is_locked(dsidev)); | |
4291 | ||
4292 | mutex_lock(&dsi->lock); | |
4293 | ||
4294 | dsi_sync_vc(dsidev, 0); | |
4295 | dsi_sync_vc(dsidev, 1); | |
4296 | dsi_sync_vc(dsidev, 2); | |
4297 | dsi_sync_vc(dsidev, 3); | |
4298 | ||
4299 | dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps); | |
4300 | ||
4301 | dsi_runtime_put(dsidev); | |
4302 | ||
4303 | mutex_unlock(&dsi->lock); | |
4304 | } | |
4305 | ||
4306 | static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable) | |
4307 | { | |
4308 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4309 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4310 | ||
4311 | dsi->te_enabled = enable; | |
4312 | return 0; | |
4313 | } | |
4314 | ||
4315 | #ifdef PRINT_VERBOSE_VM_TIMINGS | |
4316 | static void print_dsi_vm(const char *str, | |
4317 | const struct omap_dss_dsi_videomode_timings *t) | |
4318 | { | |
4319 | unsigned long byteclk = t->hsclk / 4; | |
4320 | int bl, wc, pps, tot; | |
4321 | ||
4322 | wc = DIV_ROUND_UP(t->hact * t->bitspp, 8); | |
4323 | pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */ | |
4324 | bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp; | |
4325 | tot = bl + pps; | |
4326 | ||
4327 | #define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk)) | |
4328 | ||
4329 | pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, " | |
4330 | "%u/%u/%u/%u/%u/%u = %u + %u = %u\n", | |
4331 | str, | |
4332 | byteclk, | |
4333 | t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp, | |
4334 | bl, pps, tot, | |
4335 | TO_DSI_T(t->hss), | |
4336 | TO_DSI_T(t->hsa), | |
4337 | TO_DSI_T(t->hse), | |
4338 | TO_DSI_T(t->hbp), | |
4339 | TO_DSI_T(pps), | |
4340 | TO_DSI_T(t->hfp), | |
4341 | ||
4342 | TO_DSI_T(bl), | |
4343 | TO_DSI_T(pps), | |
4344 | ||
4345 | TO_DSI_T(tot)); | |
4346 | #undef TO_DSI_T | |
4347 | } | |
4348 | ||
4349 | static void print_dispc_vm(const char *str, const struct omap_video_timings *t) | |
4350 | { | |
4351 | unsigned long pck = t->pixelclock; | |
4352 | int hact, bl, tot; | |
4353 | ||
4354 | hact = t->x_res; | |
4355 | bl = t->hsw + t->hbp + t->hfp; | |
4356 | tot = hact + bl; | |
4357 | ||
4358 | #define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck)) | |
4359 | ||
4360 | pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, " | |
4361 | "%u/%u/%u/%u = %u + %u = %u\n", | |
4362 | str, | |
4363 | pck, | |
4364 | t->hsw, t->hbp, hact, t->hfp, | |
4365 | bl, hact, tot, | |
4366 | TO_DISPC_T(t->hsw), | |
4367 | TO_DISPC_T(t->hbp), | |
4368 | TO_DISPC_T(hact), | |
4369 | TO_DISPC_T(t->hfp), | |
4370 | TO_DISPC_T(bl), | |
4371 | TO_DISPC_T(hact), | |
4372 | TO_DISPC_T(tot)); | |
4373 | #undef TO_DISPC_T | |
4374 | } | |
4375 | ||
4376 | /* note: this is not quite accurate */ | |
4377 | static void print_dsi_dispc_vm(const char *str, | |
4378 | const struct omap_dss_dsi_videomode_timings *t) | |
4379 | { | |
4380 | struct omap_video_timings vm = { 0 }; | |
4381 | unsigned long byteclk = t->hsclk / 4; | |
4382 | unsigned long pck; | |
4383 | u64 dsi_tput; | |
4384 | int dsi_hact, dsi_htot; | |
4385 | ||
4386 | dsi_tput = (u64)byteclk * t->ndl * 8; | |
4387 | pck = (u32)div64_u64(dsi_tput, t->bitspp); | |
4388 | dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl); | |
4389 | dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp; | |
4390 | ||
4391 | vm.pixelclock = pck; | |
4392 | vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk); | |
4393 | vm.hbp = div64_u64((u64)t->hbp * pck, byteclk); | |
4394 | vm.hfp = div64_u64((u64)t->hfp * pck, byteclk); | |
4395 | vm.x_res = t->hact; | |
4396 | ||
4397 | print_dispc_vm(str, &vm); | |
4398 | } | |
4399 | #endif /* PRINT_VERBOSE_VM_TIMINGS */ | |
4400 | ||
4401 | static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck, | |
4402 | unsigned long pck, void *data) | |
4403 | { | |
4404 | struct dsi_clk_calc_ctx *ctx = data; | |
4405 | struct omap_video_timings *t = &ctx->dispc_vm; | |
4406 | ||
4407 | ctx->dispc_cinfo.lck_div = lckd; | |
4408 | ctx->dispc_cinfo.pck_div = pckd; | |
4409 | ctx->dispc_cinfo.lck = lck; | |
4410 | ctx->dispc_cinfo.pck = pck; | |
4411 | ||
4412 | *t = *ctx->config->timings; | |
4413 | t->pixelclock = pck; | |
4414 | t->x_res = ctx->config->timings->x_res; | |
4415 | t->y_res = ctx->config->timings->y_res; | |
4416 | t->hsw = t->hfp = t->hbp = t->vsw = 1; | |
4417 | t->vfp = t->vbp = 0; | |
4418 | ||
4419 | return true; | |
4420 | } | |
4421 | ||
4422 | static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc, | |
4423 | void *data) | |
4424 | { | |
4425 | struct dsi_clk_calc_ctx *ctx = data; | |
4426 | ||
4427 | ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; | |
4428 | ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; | |
4429 | ||
4430 | return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max, | |
4431 | dsi_cm_calc_dispc_cb, ctx); | |
4432 | } | |
4433 | ||
4434 | static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint, | |
4435 | unsigned long clkdco, void *data) | |
4436 | { | |
4437 | struct dsi_clk_calc_ctx *ctx = data; | |
4438 | ||
4439 | ctx->dsi_cinfo.n = n; | |
4440 | ctx->dsi_cinfo.m = m; | |
4441 | ctx->dsi_cinfo.fint = fint; | |
4442 | ctx->dsi_cinfo.clkdco = clkdco; | |
4443 | ||
4444 | return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, | |
4445 | dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), | |
4446 | dsi_cm_calc_hsdiv_cb, ctx); | |
4447 | } | |
4448 | ||
4449 | static bool dsi_cm_calc(struct dsi_data *dsi, | |
4450 | const struct omap_dss_dsi_config *cfg, | |
4451 | struct dsi_clk_calc_ctx *ctx) | |
4452 | { | |
4453 | unsigned long clkin; | |
4454 | int bitspp, ndl; | |
4455 | unsigned long pll_min, pll_max; | |
4456 | unsigned long pck, txbyteclk; | |
4457 | ||
4458 | clkin = clk_get_rate(dsi->pll.clkin); | |
4459 | bitspp = dsi_get_pixel_size(cfg->pixel_format); | |
4460 | ndl = dsi->num_lanes_used - 1; | |
4461 | ||
4462 | /* | |
4463 | * Here we should calculate minimum txbyteclk to be able to send the | |
4464 | * frame in time, and also to handle TE. That's not very simple, though, | |
4465 | * especially as we go to LP between each pixel packet due to HW | |
4466 | * "feature". So let's just estimate very roughly and multiply by 1.5. | |
4467 | */ | |
4468 | pck = cfg->timings->pixelclock; | |
4469 | pck = pck * 3 / 2; | |
4470 | txbyteclk = pck * bitspp / 8 / ndl; | |
4471 | ||
4472 | memset(ctx, 0, sizeof(*ctx)); | |
4473 | ctx->dsidev = dsi->pdev; | |
4474 | ctx->pll = &dsi->pll; | |
4475 | ctx->config = cfg; | |
4476 | ctx->req_pck_min = pck; | |
4477 | ctx->req_pck_nom = pck; | |
4478 | ctx->req_pck_max = pck * 3 / 2; | |
4479 | ||
4480 | pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); | |
4481 | pll_max = cfg->hs_clk_max * 4; | |
4482 | ||
4483 | return dss_pll_calc(ctx->pll, clkin, | |
4484 | pll_min, pll_max, | |
4485 | dsi_cm_calc_pll_cb, ctx); | |
4486 | } | |
4487 | ||
4488 | static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx) | |
4489 | { | |
4490 | struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev); | |
4491 | const struct omap_dss_dsi_config *cfg = ctx->config; | |
4492 | int bitspp = dsi_get_pixel_size(cfg->pixel_format); | |
4493 | int ndl = dsi->num_lanes_used - 1; | |
4494 | unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4; | |
4495 | unsigned long byteclk = hsclk / 4; | |
4496 | ||
4497 | unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max; | |
4498 | int xres; | |
4499 | int panel_htot, panel_hbl; /* pixels */ | |
4500 | int dispc_htot, dispc_hbl; /* pixels */ | |
4501 | int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */ | |
4502 | int hfp, hsa, hbp; | |
4503 | const struct omap_video_timings *req_vm; | |
4504 | struct omap_video_timings *dispc_vm; | |
4505 | struct omap_dss_dsi_videomode_timings *dsi_vm; | |
4506 | u64 dsi_tput, dispc_tput; | |
4507 | ||
4508 | dsi_tput = (u64)byteclk * ndl * 8; | |
4509 | ||
4510 | req_vm = cfg->timings; | |
4511 | req_pck_min = ctx->req_pck_min; | |
4512 | req_pck_max = ctx->req_pck_max; | |
4513 | req_pck_nom = ctx->req_pck_nom; | |
4514 | ||
4515 | dispc_pck = ctx->dispc_cinfo.pck; | |
4516 | dispc_tput = (u64)dispc_pck * bitspp; | |
4517 | ||
4518 | xres = req_vm->x_res; | |
4519 | ||
4520 | panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw; | |
4521 | panel_htot = xres + panel_hbl; | |
4522 | ||
4523 | dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl); | |
4524 | ||
4525 | /* | |
4526 | * When there are no line buffers, DISPC and DSI must have the | |
4527 | * same tput. Otherwise DISPC tput needs to be higher than DSI's. | |
4528 | */ | |
4529 | if (dsi->line_buffer_size < xres * bitspp / 8) { | |
4530 | if (dispc_tput != dsi_tput) | |
4531 | return false; | |
4532 | } else { | |
4533 | if (dispc_tput < dsi_tput) | |
4534 | return false; | |
4535 | } | |
4536 | ||
4537 | /* DSI tput must be over the min requirement */ | |
4538 | if (dsi_tput < (u64)bitspp * req_pck_min) | |
4539 | return false; | |
4540 | ||
4541 | /* When non-burst mode, DSI tput must be below max requirement. */ | |
4542 | if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) { | |
4543 | if (dsi_tput > (u64)bitspp * req_pck_max) | |
4544 | return false; | |
4545 | } | |
4546 | ||
4547 | hss = DIV_ROUND_UP(4, ndl); | |
4548 | ||
4549 | if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) { | |
4550 | if (ndl == 3 && req_vm->hsw == 0) | |
4551 | hse = 1; | |
4552 | else | |
4553 | hse = DIV_ROUND_UP(4, ndl); | |
4554 | } else { | |
4555 | hse = 0; | |
4556 | } | |
4557 | ||
4558 | /* DSI htot to match the panel's nominal pck */ | |
4559 | dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom); | |
4560 | ||
4561 | /* fail if there would be no time for blanking */ | |
4562 | if (dsi_htot < hss + hse + dsi_hact) | |
4563 | return false; | |
4564 | ||
4565 | /* total DSI blanking needed to achieve panel's TL */ | |
4566 | dsi_hbl = dsi_htot - dsi_hact; | |
4567 | ||
4568 | /* DISPC htot to match the DSI TL */ | |
4569 | dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk); | |
4570 | ||
4571 | /* verify that the DSI and DISPC TLs are the same */ | |
4572 | if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk) | |
4573 | return false; | |
4574 | ||
4575 | dispc_hbl = dispc_htot - xres; | |
4576 | ||
4577 | /* setup DSI videomode */ | |
4578 | ||
4579 | dsi_vm = &ctx->dsi_vm; | |
4580 | memset(dsi_vm, 0, sizeof(*dsi_vm)); | |
4581 | ||
4582 | dsi_vm->hsclk = hsclk; | |
4583 | ||
4584 | dsi_vm->ndl = ndl; | |
4585 | dsi_vm->bitspp = bitspp; | |
4586 | ||
4587 | if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) { | |
4588 | hsa = 0; | |
4589 | } else if (ndl == 3 && req_vm->hsw == 0) { | |
4590 | hsa = 0; | |
4591 | } else { | |
4592 | hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom); | |
4593 | hsa = max(hsa - hse, 1); | |
4594 | } | |
4595 | ||
4596 | hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom); | |
4597 | hbp = max(hbp, 1); | |
4598 | ||
4599 | hfp = dsi_hbl - (hss + hsa + hse + hbp); | |
4600 | if (hfp < 1) { | |
4601 | int t; | |
4602 | /* we need to take cycles from hbp */ | |
4603 | ||
4604 | t = 1 - hfp; | |
4605 | hbp = max(hbp - t, 1); | |
4606 | hfp = dsi_hbl - (hss + hsa + hse + hbp); | |
4607 | ||
4608 | if (hfp < 1 && hsa > 0) { | |
4609 | /* we need to take cycles from hsa */ | |
4610 | t = 1 - hfp; | |
4611 | hsa = max(hsa - t, 1); | |
4612 | hfp = dsi_hbl - (hss + hsa + hse + hbp); | |
4613 | } | |
4614 | } | |
4615 | ||
4616 | if (hfp < 1) | |
4617 | return false; | |
4618 | ||
4619 | dsi_vm->hss = hss; | |
4620 | dsi_vm->hsa = hsa; | |
4621 | dsi_vm->hse = hse; | |
4622 | dsi_vm->hbp = hbp; | |
4623 | dsi_vm->hact = xres; | |
4624 | dsi_vm->hfp = hfp; | |
4625 | ||
4626 | dsi_vm->vsa = req_vm->vsw; | |
4627 | dsi_vm->vbp = req_vm->vbp; | |
4628 | dsi_vm->vact = req_vm->y_res; | |
4629 | dsi_vm->vfp = req_vm->vfp; | |
4630 | ||
4631 | dsi_vm->trans_mode = cfg->trans_mode; | |
4632 | ||
4633 | dsi_vm->blanking_mode = 0; | |
4634 | dsi_vm->hsa_blanking_mode = 1; | |
4635 | dsi_vm->hfp_blanking_mode = 1; | |
4636 | dsi_vm->hbp_blanking_mode = 1; | |
4637 | ||
4638 | dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on; | |
4639 | dsi_vm->window_sync = 4; | |
4640 | ||
4641 | /* setup DISPC videomode */ | |
4642 | ||
4643 | dispc_vm = &ctx->dispc_vm; | |
4644 | *dispc_vm = *req_vm; | |
4645 | dispc_vm->pixelclock = dispc_pck; | |
4646 | ||
4647 | if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) { | |
4648 | hsa = div64_u64((u64)req_vm->hsw * dispc_pck, | |
4649 | req_pck_nom); | |
4650 | hsa = max(hsa, 1); | |
4651 | } else { | |
4652 | hsa = 1; | |
4653 | } | |
4654 | ||
4655 | hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom); | |
4656 | hbp = max(hbp, 1); | |
4657 | ||
4658 | hfp = dispc_hbl - hsa - hbp; | |
4659 | if (hfp < 1) { | |
4660 | int t; | |
4661 | /* we need to take cycles from hbp */ | |
4662 | ||
4663 | t = 1 - hfp; | |
4664 | hbp = max(hbp - t, 1); | |
4665 | hfp = dispc_hbl - hsa - hbp; | |
4666 | ||
4667 | if (hfp < 1) { | |
4668 | /* we need to take cycles from hsa */ | |
4669 | t = 1 - hfp; | |
4670 | hsa = max(hsa - t, 1); | |
4671 | hfp = dispc_hbl - hsa - hbp; | |
4672 | } | |
4673 | } | |
4674 | ||
4675 | if (hfp < 1) | |
4676 | return false; | |
4677 | ||
4678 | dispc_vm->hfp = hfp; | |
4679 | dispc_vm->hsw = hsa; | |
4680 | dispc_vm->hbp = hbp; | |
4681 | ||
4682 | return true; | |
4683 | } | |
4684 | ||
4685 | ||
4686 | static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck, | |
4687 | unsigned long pck, void *data) | |
4688 | { | |
4689 | struct dsi_clk_calc_ctx *ctx = data; | |
4690 | ||
4691 | ctx->dispc_cinfo.lck_div = lckd; | |
4692 | ctx->dispc_cinfo.pck_div = pckd; | |
4693 | ctx->dispc_cinfo.lck = lck; | |
4694 | ctx->dispc_cinfo.pck = pck; | |
4695 | ||
4696 | if (dsi_vm_calc_blanking(ctx) == false) | |
4697 | return false; | |
4698 | ||
4699 | #ifdef PRINT_VERBOSE_VM_TIMINGS | |
4700 | print_dispc_vm("dispc", &ctx->dispc_vm); | |
4701 | print_dsi_vm("dsi ", &ctx->dsi_vm); | |
4702 | print_dispc_vm("req ", ctx->config->timings); | |
4703 | print_dsi_dispc_vm("act ", &ctx->dsi_vm); | |
4704 | #endif | |
4705 | ||
4706 | return true; | |
4707 | } | |
4708 | ||
4709 | static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc, | |
4710 | void *data) | |
4711 | { | |
4712 | struct dsi_clk_calc_ctx *ctx = data; | |
4713 | unsigned long pck_max; | |
4714 | ||
4715 | ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; | |
4716 | ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; | |
4717 | ||
4718 | /* | |
4719 | * In burst mode we can let the dispc pck be arbitrarily high, but it | |
4720 | * limits our scaling abilities. So for now, don't aim too high. | |
4721 | */ | |
4722 | ||
4723 | if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE) | |
4724 | pck_max = ctx->req_pck_max + 10000000; | |
4725 | else | |
4726 | pck_max = ctx->req_pck_max; | |
4727 | ||
4728 | return dispc_div_calc(dispc, ctx->req_pck_min, pck_max, | |
4729 | dsi_vm_calc_dispc_cb, ctx); | |
4730 | } | |
4731 | ||
4732 | static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint, | |
4733 | unsigned long clkdco, void *data) | |
4734 | { | |
4735 | struct dsi_clk_calc_ctx *ctx = data; | |
4736 | ||
4737 | ctx->dsi_cinfo.n = n; | |
4738 | ctx->dsi_cinfo.m = m; | |
4739 | ctx->dsi_cinfo.fint = fint; | |
4740 | ctx->dsi_cinfo.clkdco = clkdco; | |
4741 | ||
4742 | return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, | |
4743 | dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), | |
4744 | dsi_vm_calc_hsdiv_cb, ctx); | |
4745 | } | |
4746 | ||
4747 | static bool dsi_vm_calc(struct dsi_data *dsi, | |
4748 | const struct omap_dss_dsi_config *cfg, | |
4749 | struct dsi_clk_calc_ctx *ctx) | |
4750 | { | |
4751 | const struct omap_video_timings *t = cfg->timings; | |
4752 | unsigned long clkin; | |
4753 | unsigned long pll_min; | |
4754 | unsigned long pll_max; | |
4755 | int ndl = dsi->num_lanes_used - 1; | |
4756 | int bitspp = dsi_get_pixel_size(cfg->pixel_format); | |
4757 | unsigned long byteclk_min; | |
4758 | ||
4759 | clkin = clk_get_rate(dsi->pll.clkin); | |
4760 | ||
4761 | memset(ctx, 0, sizeof(*ctx)); | |
4762 | ctx->dsidev = dsi->pdev; | |
4763 | ctx->pll = &dsi->pll; | |
4764 | ctx->config = cfg; | |
4765 | ||
4766 | /* these limits should come from the panel driver */ | |
4767 | ctx->req_pck_min = t->pixelclock - 1000; | |
4768 | ctx->req_pck_nom = t->pixelclock; | |
4769 | ctx->req_pck_max = t->pixelclock + 1000; | |
4770 | ||
4771 | byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8); | |
4772 | pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4); | |
4773 | ||
4774 | if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) { | |
4775 | pll_max = cfg->hs_clk_max * 4; | |
4776 | } else { | |
4777 | unsigned long byteclk_max; | |
4778 | byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp, | |
4779 | ndl * 8); | |
4780 | ||
4781 | pll_max = byteclk_max * 4 * 4; | |
4782 | } | |
4783 | ||
4784 | return dss_pll_calc(ctx->pll, clkin, | |
4785 | pll_min, pll_max, | |
4786 | dsi_vm_calc_pll_cb, ctx); | |
4787 | } | |
4788 | ||
4789 | static int dsi_set_config(struct omap_dss_device *dssdev, | |
4790 | const struct omap_dss_dsi_config *config) | |
4791 | { | |
4792 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4793 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4794 | struct dsi_clk_calc_ctx ctx; | |
4795 | bool ok; | |
4796 | int r; | |
4797 | ||
4798 | mutex_lock(&dsi->lock); | |
4799 | ||
4800 | dsi->pix_fmt = config->pixel_format; | |
4801 | dsi->mode = config->mode; | |
4802 | ||
4803 | if (config->mode == OMAP_DSS_DSI_VIDEO_MODE) | |
4804 | ok = dsi_vm_calc(dsi, config, &ctx); | |
4805 | else | |
4806 | ok = dsi_cm_calc(dsi, config, &ctx); | |
4807 | ||
4808 | if (!ok) { | |
4809 | DSSERR("failed to find suitable DSI clock settings\n"); | |
4810 | r = -EINVAL; | |
4811 | goto err; | |
4812 | } | |
4813 | ||
4814 | dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo); | |
4815 | ||
4816 | r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI], | |
4817 | config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo); | |
4818 | if (r) { | |
4819 | DSSERR("failed to find suitable DSI LP clock settings\n"); | |
4820 | goto err; | |
4821 | } | |
4822 | ||
4823 | dsi->user_dsi_cinfo = ctx.dsi_cinfo; | |
4824 | dsi->user_dispc_cinfo = ctx.dispc_cinfo; | |
4825 | ||
4826 | dsi->timings = ctx.dispc_vm; | |
4827 | dsi->vm_timings = ctx.dsi_vm; | |
4828 | ||
4829 | mutex_unlock(&dsi->lock); | |
4830 | ||
4831 | return 0; | |
4832 | err: | |
4833 | mutex_unlock(&dsi->lock); | |
4834 | ||
4835 | return r; | |
4836 | } | |
4837 | ||
4838 | /* | |
4839 | * Return a hardcoded channel for the DSI output. This should work for | |
4840 | * current use cases, but this can be later expanded to either resolve | |
4841 | * the channel in some more dynamic manner, or get the channel as a user | |
4842 | * parameter. | |
4843 | */ | |
4844 | static enum omap_channel dsi_get_channel(int module_id) | |
4845 | { | |
4846 | switch (omapdss_get_version()) { | |
4847 | case OMAPDSS_VER_OMAP24xx: | |
4848 | case OMAPDSS_VER_AM43xx: | |
4849 | DSSWARN("DSI not supported\n"); | |
4850 | return OMAP_DSS_CHANNEL_LCD; | |
4851 | ||
4852 | case OMAPDSS_VER_OMAP34xx_ES1: | |
4853 | case OMAPDSS_VER_OMAP34xx_ES3: | |
4854 | case OMAPDSS_VER_OMAP3630: | |
4855 | case OMAPDSS_VER_AM35xx: | |
4856 | return OMAP_DSS_CHANNEL_LCD; | |
4857 | ||
4858 | case OMAPDSS_VER_OMAP4430_ES1: | |
4859 | case OMAPDSS_VER_OMAP4430_ES2: | |
4860 | case OMAPDSS_VER_OMAP4: | |
4861 | switch (module_id) { | |
4862 | case 0: | |
4863 | return OMAP_DSS_CHANNEL_LCD; | |
4864 | case 1: | |
4865 | return OMAP_DSS_CHANNEL_LCD2; | |
4866 | default: | |
4867 | DSSWARN("unsupported module id\n"); | |
4868 | return OMAP_DSS_CHANNEL_LCD; | |
4869 | } | |
4870 | ||
4871 | case OMAPDSS_VER_OMAP5: | |
4872 | switch (module_id) { | |
4873 | case 0: | |
4874 | return OMAP_DSS_CHANNEL_LCD; | |
4875 | case 1: | |
4876 | return OMAP_DSS_CHANNEL_LCD3; | |
4877 | default: | |
4878 | DSSWARN("unsupported module id\n"); | |
4879 | return OMAP_DSS_CHANNEL_LCD; | |
4880 | } | |
4881 | ||
4882 | default: | |
4883 | DSSWARN("unsupported DSS version\n"); | |
4884 | return OMAP_DSS_CHANNEL_LCD; | |
4885 | } | |
4886 | } | |
4887 | ||
4888 | static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel) | |
4889 | { | |
4890 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4891 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4892 | int i; | |
4893 | ||
4894 | for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { | |
4895 | if (!dsi->vc[i].dssdev) { | |
4896 | dsi->vc[i].dssdev = dssdev; | |
4897 | *channel = i; | |
4898 | return 0; | |
4899 | } | |
4900 | } | |
4901 | ||
4902 | DSSERR("cannot get VC for display %s", dssdev->name); | |
4903 | return -ENOSPC; | |
4904 | } | |
4905 | ||
4906 | static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id) | |
4907 | { | |
4908 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4909 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4910 | ||
4911 | if (vc_id < 0 || vc_id > 3) { | |
4912 | DSSERR("VC ID out of range\n"); | |
4913 | return -EINVAL; | |
4914 | } | |
4915 | ||
4916 | if (channel < 0 || channel > 3) { | |
4917 | DSSERR("Virtual Channel out of range\n"); | |
4918 | return -EINVAL; | |
4919 | } | |
4920 | ||
4921 | if (dsi->vc[channel].dssdev != dssdev) { | |
4922 | DSSERR("Virtual Channel not allocated to display %s\n", | |
4923 | dssdev->name); | |
4924 | return -EINVAL; | |
4925 | } | |
4926 | ||
4927 | dsi->vc[channel].vc_id = vc_id; | |
4928 | ||
4929 | return 0; | |
4930 | } | |
4931 | ||
4932 | static void dsi_release_vc(struct omap_dss_device *dssdev, int channel) | |
4933 | { | |
4934 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4935 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4936 | ||
4937 | if ((channel >= 0 && channel <= 3) && | |
4938 | dsi->vc[channel].dssdev == dssdev) { | |
4939 | dsi->vc[channel].dssdev = NULL; | |
4940 | dsi->vc[channel].vc_id = 0; | |
4941 | } | |
4942 | } | |
4943 | ||
4944 | ||
4945 | static int dsi_get_clocks(struct platform_device *dsidev) | |
4946 | { | |
4947 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
4948 | struct clk *clk; | |
4949 | ||
4950 | clk = devm_clk_get(&dsidev->dev, "fck"); | |
4951 | if (IS_ERR(clk)) { | |
4952 | DSSERR("can't get fck\n"); | |
4953 | return PTR_ERR(clk); | |
4954 | } | |
4955 | ||
4956 | dsi->dss_clk = clk; | |
4957 | ||
4958 | return 0; | |
4959 | } | |
4960 | ||
4961 | static int dsi_connect(struct omap_dss_device *dssdev, | |
4962 | struct omap_dss_device *dst) | |
4963 | { | |
4964 | struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); | |
4965 | struct omap_overlay_manager *mgr; | |
4966 | int r; | |
4967 | ||
4968 | r = dsi_regulator_init(dsidev); | |
4969 | if (r) | |
4970 | return r; | |
4971 | ||
4972 | mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel); | |
4973 | if (!mgr) | |
4974 | return -ENODEV; | |
4975 | ||
4976 | r = dss_mgr_connect(mgr, dssdev); | |
4977 | if (r) | |
4978 | return r; | |
4979 | ||
4980 | r = omapdss_output_set_device(dssdev, dst); | |
4981 | if (r) { | |
4982 | DSSERR("failed to connect output to new device: %s\n", | |
4983 | dssdev->name); | |
4984 | dss_mgr_disconnect(mgr, dssdev); | |
4985 | return r; | |
4986 | } | |
4987 | ||
4988 | return 0; | |
4989 | } | |
4990 | ||
4991 | static void dsi_disconnect(struct omap_dss_device *dssdev, | |
4992 | struct omap_dss_device *dst) | |
4993 | { | |
4994 | WARN_ON(dst != dssdev->dst); | |
4995 | ||
4996 | if (dst != dssdev->dst) | |
4997 | return; | |
4998 | ||
4999 | omapdss_output_unset_device(dssdev); | |
5000 | ||
5001 | if (dssdev->manager) | |
5002 | dss_mgr_disconnect(dssdev->manager, dssdev); | |
5003 | } | |
5004 | ||
5005 | static const struct omapdss_dsi_ops dsi_ops = { | |
5006 | .connect = dsi_connect, | |
5007 | .disconnect = dsi_disconnect, | |
5008 | ||
5009 | .bus_lock = dsi_bus_lock, | |
5010 | .bus_unlock = dsi_bus_unlock, | |
5011 | ||
5012 | .enable = dsi_display_enable, | |
5013 | .disable = dsi_display_disable, | |
5014 | ||
5015 | .enable_hs = dsi_vc_enable_hs, | |
5016 | ||
5017 | .configure_pins = dsi_configure_pins, | |
5018 | .set_config = dsi_set_config, | |
5019 | ||
5020 | .enable_video_output = dsi_enable_video_output, | |
5021 | .disable_video_output = dsi_disable_video_output, | |
5022 | ||
5023 | .update = dsi_update, | |
5024 | ||
5025 | .enable_te = dsi_enable_te, | |
5026 | ||
5027 | .request_vc = dsi_request_vc, | |
5028 | .set_vc_id = dsi_set_vc_id, | |
5029 | .release_vc = dsi_release_vc, | |
5030 | ||
5031 | .dcs_write = dsi_vc_dcs_write, | |
5032 | .dcs_write_nosync = dsi_vc_dcs_write_nosync, | |
5033 | .dcs_read = dsi_vc_dcs_read, | |
5034 | ||
5035 | .gen_write = dsi_vc_generic_write, | |
5036 | .gen_write_nosync = dsi_vc_generic_write_nosync, | |
5037 | .gen_read = dsi_vc_generic_read, | |
5038 | ||
5039 | .bta_sync = dsi_vc_send_bta_sync, | |
5040 | ||
5041 | .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size, | |
5042 | }; | |
5043 | ||
5044 | static void dsi_init_output(struct platform_device *dsidev) | |
5045 | { | |
5046 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
5047 | struct omap_dss_device *out = &dsi->output; | |
5048 | ||
5049 | out->dev = &dsidev->dev; | |
5050 | out->id = dsi->module_id == 0 ? | |
5051 | OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; | |
5052 | ||
5053 | out->output_type = OMAP_DISPLAY_TYPE_DSI; | |
5054 | out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; | |
5055 | out->dispc_channel = dsi_get_channel(dsi->module_id); | |
5056 | out->ops.dsi = &dsi_ops; | |
5057 | out->owner = THIS_MODULE; | |
5058 | ||
5059 | omapdss_register_output(out); | |
5060 | } | |
5061 | ||
5062 | static void dsi_uninit_output(struct platform_device *dsidev) | |
5063 | { | |
5064 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
5065 | struct omap_dss_device *out = &dsi->output; | |
5066 | ||
5067 | omapdss_unregister_output(out); | |
5068 | } | |
5069 | ||
5070 | static int dsi_probe_of(struct platform_device *pdev) | |
5071 | { | |
5072 | struct device_node *node = pdev->dev.of_node; | |
5073 | struct dsi_data *dsi = dsi_get_dsidrv_data(pdev); | |
5074 | struct property *prop; | |
5075 | u32 lane_arr[10]; | |
5076 | int len, num_pins; | |
5077 | int r, i; | |
5078 | struct device_node *ep; | |
5079 | struct omap_dsi_pin_config pin_cfg; | |
5080 | ||
5081 | ep = omapdss_of_get_first_endpoint(node); | |
5082 | if (!ep) | |
5083 | return 0; | |
5084 | ||
5085 | prop = of_find_property(ep, "lanes", &len); | |
5086 | if (prop == NULL) { | |
5087 | dev_err(&pdev->dev, "failed to find lane data\n"); | |
5088 | r = -EINVAL; | |
5089 | goto err; | |
5090 | } | |
5091 | ||
5092 | num_pins = len / sizeof(u32); | |
5093 | ||
5094 | if (num_pins < 4 || num_pins % 2 != 0 || | |
5095 | num_pins > dsi->num_lanes_supported * 2) { | |
5096 | dev_err(&pdev->dev, "bad number of lanes\n"); | |
5097 | r = -EINVAL; | |
5098 | goto err; | |
5099 | } | |
5100 | ||
5101 | r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins); | |
5102 | if (r) { | |
5103 | dev_err(&pdev->dev, "failed to read lane data\n"); | |
5104 | goto err; | |
5105 | } | |
5106 | ||
5107 | pin_cfg.num_pins = num_pins; | |
5108 | for (i = 0; i < num_pins; ++i) | |
5109 | pin_cfg.pins[i] = (int)lane_arr[i]; | |
5110 | ||
5111 | r = dsi_configure_pins(&dsi->output, &pin_cfg); | |
5112 | if (r) { | |
5113 | dev_err(&pdev->dev, "failed to configure pins"); | |
5114 | goto err; | |
5115 | } | |
5116 | ||
5117 | of_node_put(ep); | |
5118 | ||
5119 | return 0; | |
5120 | ||
5121 | err: | |
5122 | of_node_put(ep); | |
5123 | return r; | |
5124 | } | |
5125 | ||
5126 | static const struct dss_pll_ops dsi_pll_ops = { | |
5127 | .enable = dsi_pll_enable, | |
5128 | .disable = dsi_pll_disable, | |
5129 | .set_config = dss_pll_write_config_type_a, | |
5130 | }; | |
5131 | ||
5132 | static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { | |
5133 | .n_max = (1 << 7) - 1, | |
5134 | .m_max = (1 << 11) - 1, | |
5135 | .mX_max = (1 << 4) - 1, | |
5136 | .fint_min = 750000, | |
5137 | .fint_max = 2100000, | |
5138 | .clkdco_low = 1000000000, | |
5139 | .clkdco_max = 1800000000, | |
5140 | ||
5141 | .n_msb = 7, | |
5142 | .n_lsb = 1, | |
5143 | .m_msb = 18, | |
5144 | .m_lsb = 8, | |
5145 | ||
5146 | .mX_msb[0] = 22, | |
5147 | .mX_lsb[0] = 19, | |
5148 | .mX_msb[1] = 26, | |
5149 | .mX_lsb[1] = 23, | |
5150 | ||
5151 | .has_stopmode = true, | |
5152 | .has_freqsel = true, | |
5153 | .has_selfreqdco = false, | |
5154 | .has_refsel = false, | |
5155 | }; | |
5156 | ||
5157 | static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { | |
5158 | .n_max = (1 << 8) - 1, | |
5159 | .m_max = (1 << 12) - 1, | |
5160 | .mX_max = (1 << 5) - 1, | |
5161 | .fint_min = 500000, | |
5162 | .fint_max = 2500000, | |
5163 | .clkdco_low = 1000000000, | |
5164 | .clkdco_max = 1800000000, | |
5165 | ||
5166 | .n_msb = 8, | |
5167 | .n_lsb = 1, | |
5168 | .m_msb = 20, | |
5169 | .m_lsb = 9, | |
5170 | ||
5171 | .mX_msb[0] = 25, | |
5172 | .mX_lsb[0] = 21, | |
5173 | .mX_msb[1] = 30, | |
5174 | .mX_lsb[1] = 26, | |
5175 | ||
5176 | .has_stopmode = true, | |
5177 | .has_freqsel = false, | |
5178 | .has_selfreqdco = false, | |
5179 | .has_refsel = false, | |
5180 | }; | |
5181 | ||
5182 | static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { | |
5183 | .n_max = (1 << 8) - 1, | |
5184 | .m_max = (1 << 12) - 1, | |
5185 | .mX_max = (1 << 5) - 1, | |
5186 | .fint_min = 150000, | |
5187 | .fint_max = 52000000, | |
5188 | .clkdco_low = 1000000000, | |
5189 | .clkdco_max = 1800000000, | |
5190 | ||
5191 | .n_msb = 8, | |
5192 | .n_lsb = 1, | |
5193 | .m_msb = 20, | |
5194 | .m_lsb = 9, | |
5195 | ||
5196 | .mX_msb[0] = 25, | |
5197 | .mX_lsb[0] = 21, | |
5198 | .mX_msb[1] = 30, | |
5199 | .mX_lsb[1] = 26, | |
5200 | ||
5201 | .has_stopmode = true, | |
5202 | .has_freqsel = false, | |
5203 | .has_selfreqdco = true, | |
5204 | .has_refsel = true, | |
5205 | }; | |
5206 | ||
5207 | static int dsi_init_pll_data(struct platform_device *dsidev) | |
5208 | { | |
5209 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
5210 | struct dss_pll *pll = &dsi->pll; | |
5211 | struct clk *clk; | |
5212 | int r; | |
5213 | ||
5214 | clk = devm_clk_get(&dsidev->dev, "sys_clk"); | |
5215 | if (IS_ERR(clk)) { | |
5216 | DSSERR("can't get sys_clk\n"); | |
5217 | return PTR_ERR(clk); | |
5218 | } | |
5219 | ||
5220 | pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1"; | |
5221 | pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2; | |
5222 | pll->clkin = clk; | |
5223 | pll->base = dsi->pll_base; | |
5224 | ||
5225 | switch (omapdss_get_version()) { | |
5226 | case OMAPDSS_VER_OMAP34xx_ES1: | |
5227 | case OMAPDSS_VER_OMAP34xx_ES3: | |
5228 | case OMAPDSS_VER_OMAP3630: | |
5229 | case OMAPDSS_VER_AM35xx: | |
5230 | pll->hw = &dss_omap3_dsi_pll_hw; | |
5231 | break; | |
5232 | ||
5233 | case OMAPDSS_VER_OMAP4430_ES1: | |
5234 | case OMAPDSS_VER_OMAP4430_ES2: | |
5235 | case OMAPDSS_VER_OMAP4: | |
5236 | pll->hw = &dss_omap4_dsi_pll_hw; | |
5237 | break; | |
5238 | ||
5239 | case OMAPDSS_VER_OMAP5: | |
5240 | pll->hw = &dss_omap5_dsi_pll_hw; | |
5241 | break; | |
5242 | ||
5243 | default: | |
5244 | return -ENODEV; | |
5245 | } | |
5246 | ||
5247 | pll->ops = &dsi_pll_ops; | |
5248 | ||
5249 | r = dss_pll_register(pll); | |
5250 | if (r) | |
5251 | return r; | |
5252 | ||
5253 | return 0; | |
5254 | } | |
5255 | ||
5256 | /* DSI1 HW IP initialisation */ | |
5257 | static int dsi_bind(struct device *dev, struct device *master, void *data) | |
5258 | { | |
5259 | struct platform_device *dsidev = to_platform_device(dev); | |
5260 | u32 rev; | |
5261 | int r, i; | |
5262 | struct dsi_data *dsi; | |
5263 | struct resource *dsi_mem; | |
5264 | struct resource *res; | |
5265 | struct resource temp_res; | |
5266 | ||
5267 | dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL); | |
5268 | if (!dsi) | |
5269 | return -ENOMEM; | |
5270 | ||
5271 | dsi->pdev = dsidev; | |
5272 | dev_set_drvdata(&dsidev->dev, dsi); | |
5273 | ||
5274 | spin_lock_init(&dsi->irq_lock); | |
5275 | spin_lock_init(&dsi->errors_lock); | |
5276 | dsi->errors = 0; | |
5277 | ||
35b522cf | 5278 | #ifdef CONFIG_FB_OMAP2_DSS_COLLECT_IRQ_STATS |
f76ee892 TV |
5279 | spin_lock_init(&dsi->irq_stats_lock); |
5280 | dsi->irq_stats.last_reset = jiffies; | |
5281 | #endif | |
5282 | ||
5283 | mutex_init(&dsi->lock); | |
5284 | sema_init(&dsi->bus_lock, 1); | |
5285 | ||
5286 | INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work, | |
5287 | dsi_framedone_timeout_work_callback); | |
5288 | ||
5289 | #ifdef DSI_CATCH_MISSING_TE | |
6c789357 | 5290 | timer_setup(&dsi->te_timer, dsi_te_timeout, 0); |
f76ee892 TV |
5291 | #endif |
5292 | ||
5293 | res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto"); | |
5294 | if (!res) { | |
5295 | res = platform_get_resource(dsidev, IORESOURCE_MEM, 0); | |
5296 | if (!res) { | |
5297 | DSSERR("can't get IORESOURCE_MEM DSI\n"); | |
5298 | return -EINVAL; | |
5299 | } | |
5300 | ||
5301 | temp_res.start = res->start; | |
5302 | temp_res.end = temp_res.start + DSI_PROTO_SZ - 1; | |
5303 | res = &temp_res; | |
5304 | } | |
5305 | ||
5306 | dsi_mem = res; | |
5307 | ||
5308 | dsi->proto_base = devm_ioremap(&dsidev->dev, res->start, | |
5309 | resource_size(res)); | |
5310 | if (!dsi->proto_base) { | |
5311 | DSSERR("can't ioremap DSI protocol engine\n"); | |
5312 | return -ENOMEM; | |
5313 | } | |
5314 | ||
5315 | res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy"); | |
5316 | if (!res) { | |
5317 | res = platform_get_resource(dsidev, IORESOURCE_MEM, 0); | |
5318 | if (!res) { | |
5319 | DSSERR("can't get IORESOURCE_MEM DSI\n"); | |
5320 | return -EINVAL; | |
5321 | } | |
5322 | ||
5323 | temp_res.start = res->start + DSI_PHY_OFFSET; | |
5324 | temp_res.end = temp_res.start + DSI_PHY_SZ - 1; | |
5325 | res = &temp_res; | |
5326 | } | |
5327 | ||
5328 | dsi->phy_base = devm_ioremap(&dsidev->dev, res->start, | |
5329 | resource_size(res)); | |
43da7575 | 5330 | if (!dsi->phy_base) { |
f76ee892 TV |
5331 | DSSERR("can't ioremap DSI PHY\n"); |
5332 | return -ENOMEM; | |
5333 | } | |
5334 | ||
5335 | res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll"); | |
5336 | if (!res) { | |
5337 | res = platform_get_resource(dsidev, IORESOURCE_MEM, 0); | |
5338 | if (!res) { | |
5339 | DSSERR("can't get IORESOURCE_MEM DSI\n"); | |
5340 | return -EINVAL; | |
5341 | } | |
5342 | ||
5343 | temp_res.start = res->start + DSI_PLL_OFFSET; | |
5344 | temp_res.end = temp_res.start + DSI_PLL_SZ - 1; | |
5345 | res = &temp_res; | |
5346 | } | |
5347 | ||
5348 | dsi->pll_base = devm_ioremap(&dsidev->dev, res->start, | |
5349 | resource_size(res)); | |
43da7575 | 5350 | if (!dsi->pll_base) { |
f76ee892 TV |
5351 | DSSERR("can't ioremap DSI PLL\n"); |
5352 | return -ENOMEM; | |
5353 | } | |
5354 | ||
5355 | dsi->irq = platform_get_irq(dsi->pdev, 0); | |
5356 | if (dsi->irq < 0) { | |
5357 | DSSERR("platform_get_irq failed\n"); | |
5358 | return -ENODEV; | |
5359 | } | |
5360 | ||
5361 | r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler, | |
5362 | IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev); | |
5363 | if (r < 0) { | |
5364 | DSSERR("request_irq failed\n"); | |
5365 | return r; | |
5366 | } | |
5367 | ||
5368 | if (dsidev->dev.of_node) { | |
5369 | const struct of_device_id *match; | |
5370 | const struct dsi_module_id_data *d; | |
5371 | ||
5372 | match = of_match_node(dsi_of_match, dsidev->dev.of_node); | |
5373 | if (!match) { | |
5374 | DSSERR("unsupported DSI module\n"); | |
5375 | return -ENODEV; | |
5376 | } | |
5377 | ||
5378 | d = match->data; | |
5379 | ||
5380 | while (d->address != 0 && d->address != dsi_mem->start) | |
5381 | d++; | |
5382 | ||
5383 | if (d->address == 0) { | |
5384 | DSSERR("unsupported DSI module\n"); | |
5385 | return -ENODEV; | |
5386 | } | |
5387 | ||
5388 | dsi->module_id = d->id; | |
5389 | } else { | |
5390 | dsi->module_id = dsidev->id; | |
5391 | } | |
5392 | ||
5393 | /* DSI VCs initialization */ | |
5394 | for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { | |
5395 | dsi->vc[i].source = DSI_VC_SOURCE_L4; | |
5396 | dsi->vc[i].dssdev = NULL; | |
5397 | dsi->vc[i].vc_id = 0; | |
5398 | } | |
5399 | ||
5400 | r = dsi_get_clocks(dsidev); | |
5401 | if (r) | |
5402 | return r; | |
5403 | ||
5404 | dsi_init_pll_data(dsidev); | |
5405 | ||
5406 | pm_runtime_enable(&dsidev->dev); | |
5407 | ||
5408 | r = dsi_runtime_get(dsidev); | |
5409 | if (r) | |
5410 | goto err_runtime_get; | |
5411 | ||
5412 | rev = dsi_read_reg(dsidev, DSI_REVISION); | |
5413 | dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", | |
5414 | FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); | |
5415 | ||
5416 | /* DSI on OMAP3 doesn't have register DSI_GNQ, set number | |
5417 | * of data to 3 by default */ | |
5418 | if (dss_has_feature(FEAT_DSI_GNQ)) | |
5419 | /* NB_DATA_LANES */ | |
5420 | dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9); | |
5421 | else | |
5422 | dsi->num_lanes_supported = 3; | |
5423 | ||
5424 | dsi->line_buffer_size = dsi_get_line_buf_size(dsidev); | |
5425 | ||
5426 | dsi_init_output(dsidev); | |
5427 | ||
5428 | if (dsidev->dev.of_node) { | |
5429 | r = dsi_probe_of(dsidev); | |
5430 | if (r) { | |
5431 | DSSERR("Invalid DSI DT data\n"); | |
5432 | goto err_probe_of; | |
5433 | } | |
5434 | ||
5435 | r = of_platform_populate(dsidev->dev.of_node, NULL, NULL, | |
5436 | &dsidev->dev); | |
5437 | if (r) | |
5438 | DSSERR("Failed to populate DSI child devices: %d\n", r); | |
5439 | } | |
5440 | ||
5441 | dsi_runtime_put(dsidev); | |
5442 | ||
5443 | if (dsi->module_id == 0) | |
5444 | dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs); | |
5445 | else if (dsi->module_id == 1) | |
5446 | dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs); | |
5447 | ||
35b522cf | 5448 | #ifdef CONFIG_FB_OMAP2_DSS_COLLECT_IRQ_STATS |
f76ee892 TV |
5449 | if (dsi->module_id == 0) |
5450 | dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs); | |
5451 | else if (dsi->module_id == 1) | |
5452 | dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs); | |
5453 | #endif | |
5454 | ||
5455 | return 0; | |
5456 | ||
5457 | err_probe_of: | |
5458 | dsi_uninit_output(dsidev); | |
5459 | dsi_runtime_put(dsidev); | |
5460 | ||
5461 | err_runtime_get: | |
5462 | pm_runtime_disable(&dsidev->dev); | |
5463 | return r; | |
5464 | } | |
5465 | ||
5466 | static void dsi_unbind(struct device *dev, struct device *master, void *data) | |
5467 | { | |
5468 | struct platform_device *dsidev = to_platform_device(dev); | |
5469 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | |
5470 | ||
5471 | of_platform_depopulate(&dsidev->dev); | |
5472 | ||
5473 | WARN_ON(dsi->scp_clk_refcount > 0); | |
5474 | ||
5475 | dss_pll_unregister(&dsi->pll); | |
5476 | ||
5477 | dsi_uninit_output(dsidev); | |
5478 | ||
5479 | pm_runtime_disable(&dsidev->dev); | |
5480 | ||
5481 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { | |
5482 | regulator_disable(dsi->vdds_dsi_reg); | |
5483 | dsi->vdds_dsi_enabled = false; | |
5484 | } | |
5485 | } | |
5486 | ||
5487 | static const struct component_ops dsi_component_ops = { | |
5488 | .bind = dsi_bind, | |
5489 | .unbind = dsi_unbind, | |
5490 | }; | |
5491 | ||
5492 | static int dsi_probe(struct platform_device *pdev) | |
5493 | { | |
5494 | return component_add(&pdev->dev, &dsi_component_ops); | |
5495 | } | |
5496 | ||
5497 | static int dsi_remove(struct platform_device *pdev) | |
5498 | { | |
5499 | component_del(&pdev->dev, &dsi_component_ops); | |
5500 | return 0; | |
5501 | } | |
5502 | ||
5503 | static int dsi_runtime_suspend(struct device *dev) | |
5504 | { | |
5505 | struct platform_device *pdev = to_platform_device(dev); | |
5506 | struct dsi_data *dsi = dsi_get_dsidrv_data(pdev); | |
5507 | ||
5508 | dsi->is_enabled = false; | |
5509 | /* ensure the irq handler sees the is_enabled value */ | |
5510 | smp_wmb(); | |
5511 | /* wait for current handler to finish before turning the DSI off */ | |
5512 | synchronize_irq(dsi->irq); | |
5513 | ||
5514 | dispc_runtime_put(); | |
5515 | ||
5516 | return 0; | |
5517 | } | |
5518 | ||
5519 | static int dsi_runtime_resume(struct device *dev) | |
5520 | { | |
5521 | struct platform_device *pdev = to_platform_device(dev); | |
5522 | struct dsi_data *dsi = dsi_get_dsidrv_data(pdev); | |
5523 | int r; | |
5524 | ||
5525 | r = dispc_runtime_get(); | |
5526 | if (r) | |
5527 | return r; | |
5528 | ||
5529 | dsi->is_enabled = true; | |
5530 | /* ensure the irq handler sees the is_enabled value */ | |
5531 | smp_wmb(); | |
5532 | ||
5533 | return 0; | |
5534 | } | |
5535 | ||
5536 | static const struct dev_pm_ops dsi_pm_ops = { | |
5537 | .runtime_suspend = dsi_runtime_suspend, | |
5538 | .runtime_resume = dsi_runtime_resume, | |
5539 | }; | |
5540 | ||
5541 | static const struct dsi_module_id_data dsi_of_data_omap3[] = { | |
5542 | { .address = 0x4804fc00, .id = 0, }, | |
5543 | { }, | |
5544 | }; | |
5545 | ||
5546 | static const struct dsi_module_id_data dsi_of_data_omap4[] = { | |
5547 | { .address = 0x58004000, .id = 0, }, | |
5548 | { .address = 0x58005000, .id = 1, }, | |
5549 | { }, | |
5550 | }; | |
5551 | ||
5552 | static const struct dsi_module_id_data dsi_of_data_omap5[] = { | |
5553 | { .address = 0x58004000, .id = 0, }, | |
5554 | { .address = 0x58009000, .id = 1, }, | |
5555 | { }, | |
5556 | }; | |
5557 | ||
5558 | static const struct of_device_id dsi_of_match[] = { | |
5559 | { .compatible = "ti,omap3-dsi", .data = dsi_of_data_omap3, }, | |
5560 | { .compatible = "ti,omap4-dsi", .data = dsi_of_data_omap4, }, | |
5561 | { .compatible = "ti,omap5-dsi", .data = dsi_of_data_omap5, }, | |
5562 | {}, | |
5563 | }; | |
5564 | ||
5565 | static struct platform_driver omap_dsihw_driver = { | |
5566 | .probe = dsi_probe, | |
5567 | .remove = dsi_remove, | |
5568 | .driver = { | |
5569 | .name = "omapdss_dsi", | |
5570 | .pm = &dsi_pm_ops, | |
5571 | .of_match_table = dsi_of_match, | |
5572 | .suppress_bind_attrs = true, | |
5573 | }, | |
5574 | }; | |
5575 | ||
5576 | int __init dsi_init_platform_driver(void) | |
5577 | { | |
5578 | return platform_driver_register(&omap_dsihw_driver); | |
5579 | } | |
5580 | ||
5581 | void dsi_uninit_platform_driver(void) | |
5582 | { | |
5583 | platform_driver_unregister(&omap_dsihw_driver); | |
5584 | } |