Commit | Line | Data |
---|---|---|
25fdd593 JS |
1 | /* |
2 | * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. | |
3 | * Copyright (C) 2013 Red Hat | |
4 | * Author: Rob Clark <robdclark@gmail.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published by | |
8 | * the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ | |
20 | #include <linux/kthread.h> | |
21 | #include <linux/debugfs.h> | |
22 | #include <linux/seq_file.h> | |
23 | ||
24 | #include "msm_drv.h" | |
25 | #include "dpu_kms.h" | |
26 | #include <drm/drm_crtc.h> | |
27 | #include <drm/drm_crtc_helper.h> | |
28 | #include "dpu_hwio.h" | |
29 | #include "dpu_hw_catalog.h" | |
30 | #include "dpu_hw_intf.h" | |
31 | #include "dpu_hw_ctl.h" | |
32 | #include "dpu_formats.h" | |
33 | #include "dpu_encoder_phys.h" | |
34 | #include "dpu_crtc.h" | |
35 | #include "dpu_trace.h" | |
36 | #include "dpu_core_irq.h" | |
37 | ||
38 | #define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\ | |
39 | (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) | |
40 | ||
41 | #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ | |
42 | (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) | |
43 | ||
44 | #define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\ | |
45 | (p) ? (p)->parent->base.id : -1, \ | |
46 | (p) ? (p)->intf_idx - INTF_0 : -1, \ | |
47 | (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \ | |
48 | ##__VA_ARGS__) | |
49 | ||
50 | #define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\ | |
51 | (p) ? (p)->parent->base.id : -1, \ | |
52 | (p) ? (p)->intf_idx - INTF_0 : -1, \ | |
53 | (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \ | |
54 | ##__VA_ARGS__) | |
55 | ||
56 | /* | |
57 | * Two to anticipate panels that can do cmd/vid dynamic switching | |
58 | * plan is to create all possible physical encoder types, and switch between | |
59 | * them at runtime | |
60 | */ | |
61 | #define NUM_PHYS_ENCODER_TYPES 2 | |
62 | ||
63 | #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ | |
64 | (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) | |
65 | ||
66 | #define MAX_CHANNELS_PER_ENC 2 | |
67 | ||
25fdd593 JS |
68 | #define IDLE_SHORT_TIMEOUT 1 |
69 | ||
70 | #define MAX_VDISPLAY_SPLIT 1080 | |
71 | ||
72 | /** | |
73 | * enum dpu_enc_rc_events - events for resource control state machine | |
74 | * @DPU_ENC_RC_EVENT_KICKOFF: | |
75 | * This event happens at NORMAL priority. | |
76 | * Event that signals the start of the transfer. When this event is | |
77 | * received, enable MDP/DSI core clocks. Regardless of the previous | |
78 | * state, the resource should be in ON state at the end of this event. | |
79 | * @DPU_ENC_RC_EVENT_FRAME_DONE: | |
80 | * This event happens at INTERRUPT level. | |
81 | * Event signals the end of the data transfer after the PP FRAME_DONE | |
82 | * event. At the end of this event, a delayed work is scheduled to go to | |
83 | * IDLE_PC state after IDLE_TIMEOUT time. | |
84 | * @DPU_ENC_RC_EVENT_PRE_STOP: | |
85 | * This event happens at NORMAL priority. | |
86 | * This event, when received during the ON state, leave the RC STATE | |
87 | * in the PRE_OFF state. It should be followed by the STOP event as | |
88 | * part of encoder disable. | |
89 | * If received during IDLE or OFF states, it will do nothing. | |
90 | * @DPU_ENC_RC_EVENT_STOP: | |
91 | * This event happens at NORMAL priority. | |
92 | * When this event is received, disable all the MDP/DSI core clocks, and | |
93 | * disable IRQs. It should be called from the PRE_OFF or IDLE states. | |
94 | * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. | |
95 | * PRE_OFF is expected when PRE_STOP was executed during the ON state. | |
96 | * Resource state should be in OFF at the end of the event. | |
97 | * @DPU_ENC_RC_EVENT_ENTER_IDLE: | |
98 | * This event happens at NORMAL priority from a work item. | |
99 | * Event signals that there were no frame updates for IDLE_TIMEOUT time. | |
100 | * This would disable MDP/DSI core clocks and change the resource state | |
101 | * to IDLE. | |
102 | */ | |
103 | enum dpu_enc_rc_events { | |
104 | DPU_ENC_RC_EVENT_KICKOFF = 1, | |
105 | DPU_ENC_RC_EVENT_FRAME_DONE, | |
106 | DPU_ENC_RC_EVENT_PRE_STOP, | |
107 | DPU_ENC_RC_EVENT_STOP, | |
108 | DPU_ENC_RC_EVENT_ENTER_IDLE | |
109 | }; | |
110 | ||
111 | /* | |
112 | * enum dpu_enc_rc_states - states that the resource control maintains | |
113 | * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state | |
114 | * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state | |
115 | * @DPU_ENC_RC_STATE_ON: Resource is in ON state | |
116 | * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state | |
117 | * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state | |
118 | */ | |
119 | enum dpu_enc_rc_states { | |
120 | DPU_ENC_RC_STATE_OFF, | |
121 | DPU_ENC_RC_STATE_PRE_OFF, | |
122 | DPU_ENC_RC_STATE_ON, | |
123 | DPU_ENC_RC_STATE_IDLE | |
124 | }; | |
125 | ||
126 | /** | |
127 | * struct dpu_encoder_virt - virtual encoder. Container of one or more physical | |
128 | * encoders. Virtual encoder manages one "logical" display. Physical | |
129 | * encoders manage one intf block, tied to a specific panel/sub-panel. | |
130 | * Virtual encoder defers as much as possible to the physical encoders. | |
131 | * Virtual encoder registers itself with the DRM Framework as the encoder. | |
132 | * @base: drm_encoder base class for registration with DRM | |
133 | * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes | |
134 | * @bus_scaling_client: Client handle to the bus scaling interface | |
135 | * @num_phys_encs: Actual number of physical encoders contained. | |
136 | * @phys_encs: Container of physical encoders managed. | |
137 | * @cur_master: Pointer to the current master in this mode. Optimization | |
138 | * Only valid after enable. Cleared as disable. | |
139 | * @hw_pp Handle to the pingpong blocks used for the display. No. | |
140 | * pingpong blocks can be different than num_phys_encs. | |
141 | * @intfs_swapped Whether or not the phys_enc interfaces have been swapped | |
142 | * for partial update right-only cases, such as pingpong | |
143 | * split where virtual pingpong does not generate IRQs | |
144 | * @crtc_vblank_cb: Callback into the upper layer / CRTC for | |
145 | * notification of the VBLANK | |
146 | * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification | |
147 | * @crtc_kickoff_cb: Callback into CRTC that will flush & start | |
148 | * all CTL paths | |
149 | * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb | |
150 | * @debugfs_root: Debug file system root file node | |
151 | * @enc_lock: Lock around physical encoder create/destroy and | |
152 | access. | |
153 | * @frame_busy_mask: Bitmask tracking which phys_enc we are still | |
154 | * busy processing current command. | |
155 | * Bit0 = phys_encs[0] etc. | |
156 | * @crtc_frame_event_cb: callback handler for frame event | |
157 | * @crtc_frame_event_cb_data: callback handler private data | |
158 | * @frame_done_timeout: frame done timeout in Hz | |
159 | * @frame_done_timer: watchdog timer for frame done event | |
160 | * @vsync_event_timer: vsync timer | |
161 | * @disp_info: local copy of msm_display_info struct | |
25fdd593 JS |
162 | * @idle_pc_supported: indicate if idle power collaps is supported |
163 | * @rc_lock: resource control mutex lock to protect | |
164 | * virt encoder over various state changes | |
165 | * @rc_state: resource controller state | |
166 | * @delayed_off_work: delayed worker to schedule disabling of | |
167 | * clks and resources after IDLE_TIMEOUT time. | |
168 | * @vsync_event_work: worker to handle vsync event for autorefresh | |
169 | * @topology: topology of the display | |
170 | * @mode_set_complete: flag to indicate modeset completion | |
171 | * @idle_timeout: idle timeout duration in milliseconds | |
172 | */ | |
173 | struct dpu_encoder_virt { | |
174 | struct drm_encoder base; | |
175 | spinlock_t enc_spinlock; | |
176 | uint32_t bus_scaling_client; | |
177 | ||
25fdd593 JS |
178 | unsigned int num_phys_encs; |
179 | struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; | |
180 | struct dpu_encoder_phys *cur_master; | |
86b89080 | 181 | struct dpu_encoder_phys *cur_slave; |
25fdd593 JS |
182 | struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; |
183 | ||
184 | bool intfs_swapped; | |
185 | ||
186 | void (*crtc_vblank_cb)(void *); | |
187 | void *crtc_vblank_cb_data; | |
188 | ||
189 | struct dentry *debugfs_root; | |
190 | struct mutex enc_lock; | |
191 | DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); | |
192 | void (*crtc_frame_event_cb)(void *, u32 event); | |
193 | void *crtc_frame_event_cb_data; | |
194 | ||
195 | atomic_t frame_done_timeout; | |
196 | struct timer_list frame_done_timer; | |
197 | struct timer_list vsync_event_timer; | |
198 | ||
199 | struct msm_display_info disp_info; | |
25fdd593 JS |
200 | |
201 | bool idle_pc_supported; | |
202 | struct mutex rc_lock; | |
203 | enum dpu_enc_rc_states rc_state; | |
204 | struct kthread_delayed_work delayed_off_work; | |
205 | struct kthread_work vsync_event_work; | |
206 | struct msm_display_topology topology; | |
207 | bool mode_set_complete; | |
208 | ||
209 | u32 idle_timeout; | |
210 | }; | |
211 | ||
212 | #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) | |
213 | static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc, | |
214 | bool enable) | |
215 | { | |
216 | struct drm_encoder *drm_enc; | |
217 | struct msm_drm_private *priv; | |
218 | struct dpu_kms *dpu_kms; | |
219 | ||
220 | if (!dpu_enc) { | |
221 | DPU_ERROR("invalid dpu enc\n"); | |
222 | return -EINVAL; | |
223 | } | |
224 | ||
225 | drm_enc = &dpu_enc->base; | |
226 | if (!drm_enc->dev || !drm_enc->dev->dev_private) { | |
227 | DPU_ERROR("drm device invalid\n"); | |
228 | return -EINVAL; | |
229 | } | |
230 | ||
231 | priv = drm_enc->dev->dev_private; | |
232 | if (!priv->kms) { | |
233 | DPU_ERROR("invalid kms\n"); | |
234 | return -EINVAL; | |
235 | } | |
236 | ||
237 | dpu_kms = to_dpu_kms(priv->kms); | |
238 | ||
239 | if (enable) | |
240 | pm_runtime_get_sync(&dpu_kms->pdev->dev); | |
241 | else | |
242 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | |
243 | ||
244 | return 0; | |
245 | } | |
246 | ||
247 | void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, | |
248 | enum dpu_intr_idx intr_idx) | |
249 | { | |
250 | DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n", | |
251 | DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0, | |
252 | phys_enc->hw_pp->idx - PINGPONG_0, intr_idx); | |
253 | ||
254 | if (phys_enc->parent_ops->handle_frame_done) | |
255 | phys_enc->parent_ops->handle_frame_done( | |
256 | phys_enc->parent, phys_enc, | |
257 | DPU_ENCODER_FRAME_EVENT_ERROR); | |
258 | } | |
259 | ||
fba33cae JC |
260 | static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, |
261 | int32_t hw_id, struct dpu_encoder_wait_info *info); | |
262 | ||
25fdd593 JS |
263 | int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, |
264 | enum dpu_intr_idx intr_idx, | |
265 | struct dpu_encoder_wait_info *wait_info) | |
266 | { | |
267 | struct dpu_encoder_irq *irq; | |
268 | u32 irq_status; | |
269 | int ret; | |
270 | ||
271 | if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) { | |
272 | DPU_ERROR("invalid params\n"); | |
273 | return -EINVAL; | |
274 | } | |
275 | irq = &phys_enc->irq[intr_idx]; | |
276 | ||
277 | /* note: do master / slave checking outside */ | |
278 | ||
279 | /* return EWOULDBLOCK since we know the wait isn't necessary */ | |
280 | if (phys_enc->enable_state == DPU_ENC_DISABLED) { | |
281 | DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d", | |
282 | DRMID(phys_enc->parent), intr_idx, irq->hw_idx, | |
283 | irq->irq_idx); | |
284 | return -EWOULDBLOCK; | |
285 | } | |
286 | ||
287 | if (irq->irq_idx < 0) { | |
288 | DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s", | |
289 | DRMID(phys_enc->parent), intr_idx, irq->hw_idx, | |
290 | irq->name); | |
291 | return 0; | |
292 | } | |
293 | ||
294 | DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d", | |
295 | DRMID(phys_enc->parent), intr_idx, irq->hw_idx, | |
296 | irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0, | |
297 | atomic_read(wait_info->atomic_cnt)); | |
298 | ||
299 | ret = dpu_encoder_helper_wait_event_timeout( | |
300 | DRMID(phys_enc->parent), | |
301 | irq->hw_idx, | |
302 | wait_info); | |
303 | ||
304 | if (ret <= 0) { | |
305 | irq_status = dpu_core_irq_read(phys_enc->dpu_kms, | |
306 | irq->irq_idx, true); | |
307 | if (irq_status) { | |
308 | unsigned long flags; | |
309 | ||
310 | DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, " | |
311 | "hw=%d, irq=%d, pp=%d, atomic_cnt=%d", | |
312 | DRMID(phys_enc->parent), intr_idx, | |
313 | irq->hw_idx, irq->irq_idx, | |
314 | phys_enc->hw_pp->idx - PINGPONG_0, | |
315 | atomic_read(wait_info->atomic_cnt)); | |
316 | local_irq_save(flags); | |
317 | irq->cb.func(phys_enc, irq->irq_idx); | |
318 | local_irq_restore(flags); | |
319 | ret = 0; | |
320 | } else { | |
321 | ret = -ETIMEDOUT; | |
322 | DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, " | |
323 | "hw=%d, irq=%d, pp=%d, atomic_cnt=%d", | |
324 | DRMID(phys_enc->parent), intr_idx, | |
325 | irq->hw_idx, irq->irq_idx, | |
326 | phys_enc->hw_pp->idx - PINGPONG_0, | |
327 | atomic_read(wait_info->atomic_cnt)); | |
328 | } | |
329 | } else { | |
330 | ret = 0; | |
331 | trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent), | |
332 | intr_idx, irq->hw_idx, irq->irq_idx, | |
333 | phys_enc->hw_pp->idx - PINGPONG_0, | |
334 | atomic_read(wait_info->atomic_cnt)); | |
335 | } | |
336 | ||
337 | return ret; | |
338 | } | |
339 | ||
340 | int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc, | |
341 | enum dpu_intr_idx intr_idx) | |
342 | { | |
343 | struct dpu_encoder_irq *irq; | |
344 | int ret = 0; | |
345 | ||
346 | if (!phys_enc || intr_idx >= INTR_IDX_MAX) { | |
347 | DPU_ERROR("invalid params\n"); | |
348 | return -EINVAL; | |
349 | } | |
350 | irq = &phys_enc->irq[intr_idx]; | |
351 | ||
352 | if (irq->irq_idx >= 0) { | |
353 | DPU_DEBUG_PHYS(phys_enc, | |
354 | "skipping already registered irq %s type %d\n", | |
355 | irq->name, irq->intr_type); | |
356 | return 0; | |
357 | } | |
358 | ||
359 | irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms, | |
360 | irq->intr_type, irq->hw_idx); | |
361 | if (irq->irq_idx < 0) { | |
362 | DPU_ERROR_PHYS(phys_enc, | |
363 | "failed to lookup IRQ index for %s type:%d\n", | |
364 | irq->name, irq->intr_type); | |
365 | return -EINVAL; | |
366 | } | |
367 | ||
368 | ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx, | |
369 | &irq->cb); | |
370 | if (ret) { | |
371 | DPU_ERROR_PHYS(phys_enc, | |
372 | "failed to register IRQ callback for %s\n", | |
373 | irq->name); | |
374 | irq->irq_idx = -EINVAL; | |
375 | return ret; | |
376 | } | |
377 | ||
378 | ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1); | |
379 | if (ret) { | |
380 | DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d", | |
381 | DRMID(phys_enc->parent), intr_idx, irq->hw_idx, | |
382 | irq->irq_idx); | |
383 | dpu_core_irq_unregister_callback(phys_enc->dpu_kms, | |
384 | irq->irq_idx, &irq->cb); | |
385 | irq->irq_idx = -EINVAL; | |
386 | return ret; | |
387 | } | |
388 | ||
389 | trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx, | |
390 | irq->hw_idx, irq->irq_idx); | |
391 | ||
392 | return ret; | |
393 | } | |
394 | ||
395 | int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc, | |
396 | enum dpu_intr_idx intr_idx) | |
397 | { | |
398 | struct dpu_encoder_irq *irq; | |
399 | int ret; | |
400 | ||
401 | if (!phys_enc) { | |
402 | DPU_ERROR("invalid encoder\n"); | |
403 | return -EINVAL; | |
404 | } | |
405 | irq = &phys_enc->irq[intr_idx]; | |
406 | ||
407 | /* silently skip irqs that weren't registered */ | |
408 | if (irq->irq_idx < 0) { | |
409 | DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d", | |
410 | DRMID(phys_enc->parent), intr_idx, irq->hw_idx, | |
411 | irq->irq_idx); | |
412 | return 0; | |
413 | } | |
414 | ||
415 | ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1); | |
416 | if (ret) { | |
3e91a8b5 | 417 | DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d", |
25fdd593 JS |
418 | DRMID(phys_enc->parent), intr_idx, irq->hw_idx, |
419 | irq->irq_idx, ret); | |
420 | } | |
421 | ||
422 | ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx, | |
423 | &irq->cb); | |
424 | if (ret) { | |
425 | DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d", | |
426 | DRMID(phys_enc->parent), intr_idx, irq->hw_idx, | |
427 | irq->irq_idx, ret); | |
428 | } | |
429 | ||
430 | trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx, | |
431 | irq->hw_idx, irq->irq_idx); | |
432 | ||
433 | irq->irq_idx = -EINVAL; | |
434 | ||
435 | return 0; | |
436 | } | |
437 | ||
438 | void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc, | |
32ecf92a | 439 | struct dpu_encoder_hw_resources *hw_res) |
25fdd593 JS |
440 | { |
441 | struct dpu_encoder_virt *dpu_enc = NULL; | |
442 | int i = 0; | |
443 | ||
25fdd593 JS |
444 | dpu_enc = to_dpu_encoder_virt(drm_enc); |
445 | DPU_DEBUG_ENC(dpu_enc, "\n"); | |
446 | ||
447 | /* Query resources used by phys encs, expected to be without overlap */ | |
448 | memset(hw_res, 0, sizeof(*hw_res)); | |
25fdd593 JS |
449 | |
450 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
451 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
452 | ||
453 | if (phys && phys->ops.get_hw_resources) | |
32ecf92a | 454 | phys->ops.get_hw_resources(phys, hw_res); |
25fdd593 JS |
455 | } |
456 | } | |
457 | ||
fba33cae | 458 | static void dpu_encoder_destroy(struct drm_encoder *drm_enc) |
25fdd593 JS |
459 | { |
460 | struct dpu_encoder_virt *dpu_enc = NULL; | |
461 | int i = 0; | |
462 | ||
463 | if (!drm_enc) { | |
464 | DPU_ERROR("invalid encoder\n"); | |
465 | return; | |
466 | } | |
467 | ||
468 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
469 | DPU_DEBUG_ENC(dpu_enc, "\n"); | |
470 | ||
471 | mutex_lock(&dpu_enc->enc_lock); | |
472 | ||
473 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
474 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
475 | ||
476 | if (phys && phys->ops.destroy) { | |
477 | phys->ops.destroy(phys); | |
478 | --dpu_enc->num_phys_encs; | |
479 | dpu_enc->phys_encs[i] = NULL; | |
480 | } | |
481 | } | |
482 | ||
483 | if (dpu_enc->num_phys_encs) | |
484 | DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n", | |
485 | dpu_enc->num_phys_encs); | |
486 | dpu_enc->num_phys_encs = 0; | |
487 | mutex_unlock(&dpu_enc->enc_lock); | |
488 | ||
489 | drm_encoder_cleanup(drm_enc); | |
490 | mutex_destroy(&dpu_enc->enc_lock); | |
25fdd593 JS |
491 | } |
492 | ||
493 | void dpu_encoder_helper_split_config( | |
494 | struct dpu_encoder_phys *phys_enc, | |
495 | enum dpu_intf interface) | |
496 | { | |
497 | struct dpu_encoder_virt *dpu_enc; | |
498 | struct split_pipe_cfg cfg = { 0 }; | |
499 | struct dpu_hw_mdp *hw_mdptop; | |
500 | struct msm_display_info *disp_info; | |
501 | ||
502 | if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) { | |
503 | DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); | |
504 | return; | |
505 | } | |
506 | ||
507 | dpu_enc = to_dpu_encoder_virt(phys_enc->parent); | |
508 | hw_mdptop = phys_enc->hw_mdptop; | |
509 | disp_info = &dpu_enc->disp_info; | |
510 | ||
1e53ac92 | 511 | if (disp_info->intf_type != DRM_MODE_ENCODER_DSI) |
25fdd593 JS |
512 | return; |
513 | ||
514 | /** | |
515 | * disable split modes since encoder will be operating in as the only | |
516 | * encoder, either for the entire use case in the case of, for example, | |
517 | * single DSI, or for this frame in the case of left/right only partial | |
518 | * update. | |
519 | */ | |
520 | if (phys_enc->split_role == ENC_ROLE_SOLO) { | |
521 | if (hw_mdptop->ops.setup_split_pipe) | |
522 | hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); | |
523 | return; | |
524 | } | |
525 | ||
526 | cfg.en = true; | |
527 | cfg.mode = phys_enc->intf_mode; | |
528 | cfg.intf = interface; | |
529 | ||
530 | if (cfg.en && phys_enc->ops.needs_single_flush && | |
531 | phys_enc->ops.needs_single_flush(phys_enc)) | |
532 | cfg.split_flush_en = true; | |
533 | ||
534 | if (phys_enc->split_role == ENC_ROLE_MASTER) { | |
535 | DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en); | |
536 | ||
537 | if (hw_mdptop->ops.setup_split_pipe) | |
538 | hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); | |
539 | } | |
540 | } | |
541 | ||
542 | static void _dpu_encoder_adjust_mode(struct drm_connector *connector, | |
543 | struct drm_display_mode *adj_mode) | |
544 | { | |
545 | struct drm_display_mode *cur_mode; | |
546 | ||
547 | if (!connector || !adj_mode) | |
548 | return; | |
549 | ||
550 | list_for_each_entry(cur_mode, &connector->modes, head) { | |
551 | if (cur_mode->vdisplay == adj_mode->vdisplay && | |
552 | cur_mode->hdisplay == adj_mode->hdisplay && | |
553 | cur_mode->vrefresh == adj_mode->vrefresh) { | |
554 | adj_mode->private = cur_mode->private; | |
555 | adj_mode->private_flags |= cur_mode->private_flags; | |
556 | } | |
557 | } | |
558 | } | |
559 | ||
560 | static struct msm_display_topology dpu_encoder_get_topology( | |
561 | struct dpu_encoder_virt *dpu_enc, | |
562 | struct dpu_kms *dpu_kms, | |
563 | struct drm_display_mode *mode) | |
564 | { | |
565 | struct msm_display_topology topology; | |
566 | int i, intf_count = 0; | |
567 | ||
568 | for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) | |
569 | if (dpu_enc->phys_encs[i]) | |
570 | intf_count++; | |
571 | ||
572 | /* User split topology for width > 1080 */ | |
573 | topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1; | |
574 | topology.num_enc = 0; | |
575 | topology.num_intf = intf_count; | |
576 | ||
577 | return topology; | |
578 | } | |
579 | static int dpu_encoder_virt_atomic_check( | |
580 | struct drm_encoder *drm_enc, | |
581 | struct drm_crtc_state *crtc_state, | |
582 | struct drm_connector_state *conn_state) | |
583 | { | |
584 | struct dpu_encoder_virt *dpu_enc; | |
585 | struct msm_drm_private *priv; | |
586 | struct dpu_kms *dpu_kms; | |
587 | const struct drm_display_mode *mode; | |
588 | struct drm_display_mode *adj_mode; | |
589 | struct msm_display_topology topology; | |
590 | int i = 0; | |
591 | int ret = 0; | |
592 | ||
593 | if (!drm_enc || !crtc_state || !conn_state) { | |
594 | DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", | |
595 | drm_enc != 0, crtc_state != 0, conn_state != 0); | |
596 | return -EINVAL; | |
597 | } | |
598 | ||
599 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
600 | DPU_DEBUG_ENC(dpu_enc, "\n"); | |
601 | ||
602 | priv = drm_enc->dev->dev_private; | |
603 | dpu_kms = to_dpu_kms(priv->kms); | |
604 | mode = &crtc_state->mode; | |
605 | adj_mode = &crtc_state->adjusted_mode; | |
606 | trace_dpu_enc_atomic_check(DRMID(drm_enc)); | |
607 | ||
608 | /* | |
609 | * display drivers may populate private fields of the drm display mode | |
610 | * structure while registering possible modes of a connector with DRM. | |
611 | * These private fields are not populated back while DRM invokes | |
612 | * the mode_set callbacks. This module retrieves and populates the | |
613 | * private fields of the given mode. | |
614 | */ | |
615 | _dpu_encoder_adjust_mode(conn_state->connector, adj_mode); | |
616 | ||
617 | /* perform atomic check on the first physical encoder (master) */ | |
618 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
619 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
620 | ||
621 | if (phys && phys->ops.atomic_check) | |
622 | ret = phys->ops.atomic_check(phys, crtc_state, | |
623 | conn_state); | |
624 | else if (phys && phys->ops.mode_fixup) | |
625 | if (!phys->ops.mode_fixup(phys, mode, adj_mode)) | |
626 | ret = -EINVAL; | |
627 | ||
628 | if (ret) { | |
629 | DPU_ERROR_ENC(dpu_enc, | |
630 | "mode unsupported, phys idx %d\n", i); | |
631 | break; | |
632 | } | |
633 | } | |
634 | ||
635 | topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); | |
636 | ||
637 | /* Reserve dynamic resources now. Indicating AtomicTest phase */ | |
638 | if (!ret) { | |
639 | /* | |
640 | * Avoid reserving resources when mode set is pending. Topology | |
641 | * info may not be available to complete reservation. | |
642 | */ | |
643 | if (drm_atomic_crtc_needs_modeset(crtc_state) | |
644 | && dpu_enc->mode_set_complete) { | |
645 | ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state, | |
32ecf92a | 646 | topology, true); |
25fdd593 JS |
647 | dpu_enc->mode_set_complete = false; |
648 | } | |
649 | } | |
650 | ||
651 | if (!ret) | |
652 | drm_mode_set_crtcinfo(adj_mode, 0); | |
653 | ||
654 | trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags, | |
655 | adj_mode->private_flags); | |
656 | ||
657 | return ret; | |
658 | } | |
659 | ||
660 | static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, | |
661 | struct msm_display_info *disp_info) | |
662 | { | |
663 | struct dpu_vsync_source_cfg vsync_cfg = { 0 }; | |
664 | struct msm_drm_private *priv; | |
665 | struct dpu_kms *dpu_kms; | |
666 | struct dpu_hw_mdp *hw_mdptop; | |
667 | struct drm_encoder *drm_enc; | |
668 | int i; | |
669 | ||
670 | if (!dpu_enc || !disp_info) { | |
671 | DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n", | |
672 | dpu_enc != NULL, disp_info != NULL); | |
673 | return; | |
674 | } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) { | |
675 | DPU_ERROR("invalid num phys enc %d/%d\n", | |
676 | dpu_enc->num_phys_encs, | |
677 | (int) ARRAY_SIZE(dpu_enc->hw_pp)); | |
678 | return; | |
679 | } | |
680 | ||
681 | drm_enc = &dpu_enc->base; | |
682 | /* this pointers are checked in virt_enable_helper */ | |
683 | priv = drm_enc->dev->dev_private; | |
684 | ||
685 | dpu_kms = to_dpu_kms(priv->kms); | |
686 | if (!dpu_kms) { | |
687 | DPU_ERROR("invalid dpu_kms\n"); | |
688 | return; | |
689 | } | |
690 | ||
691 | hw_mdptop = dpu_kms->hw_mdp; | |
692 | if (!hw_mdptop) { | |
693 | DPU_ERROR("invalid mdptop\n"); | |
694 | return; | |
695 | } | |
696 | ||
697 | if (hw_mdptop->ops.setup_vsync_source && | |
698 | disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) { | |
699 | for (i = 0; i < dpu_enc->num_phys_encs; i++) | |
700 | vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx; | |
701 | ||
702 | vsync_cfg.pp_count = dpu_enc->num_phys_encs; | |
703 | if (disp_info->is_te_using_watchdog_timer) | |
704 | vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0; | |
705 | else | |
706 | vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO; | |
707 | ||
708 | hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg); | |
709 | } | |
710 | } | |
711 | ||
712 | static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable) | |
713 | { | |
714 | struct dpu_encoder_virt *dpu_enc; | |
715 | int i; | |
716 | ||
717 | if (!drm_enc) { | |
718 | DPU_ERROR("invalid encoder\n"); | |
719 | return; | |
720 | } | |
721 | ||
722 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
723 | ||
724 | DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable); | |
725 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
726 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
727 | ||
728 | if (phys && phys->ops.irq_control) | |
729 | phys->ops.irq_control(phys, enable); | |
730 | } | |
731 | ||
732 | } | |
733 | ||
734 | static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc, | |
735 | bool enable) | |
736 | { | |
737 | struct msm_drm_private *priv; | |
738 | struct dpu_kms *dpu_kms; | |
739 | struct dpu_encoder_virt *dpu_enc; | |
740 | ||
741 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
742 | priv = drm_enc->dev->dev_private; | |
743 | dpu_kms = to_dpu_kms(priv->kms); | |
744 | ||
745 | trace_dpu_enc_rc_helper(DRMID(drm_enc), enable); | |
746 | ||
747 | if (!dpu_enc->cur_master) { | |
748 | DPU_ERROR("encoder master not set\n"); | |
749 | return; | |
750 | } | |
751 | ||
752 | if (enable) { | |
753 | /* enable DPU core clks */ | |
754 | pm_runtime_get_sync(&dpu_kms->pdev->dev); | |
755 | ||
756 | /* enable all the irq */ | |
757 | _dpu_encoder_irq_control(drm_enc, true); | |
758 | ||
759 | } else { | |
760 | /* disable all the irq */ | |
761 | _dpu_encoder_irq_control(drm_enc, false); | |
762 | ||
763 | /* disable DPU core clks */ | |
764 | pm_runtime_put_sync(&dpu_kms->pdev->dev); | |
765 | } | |
766 | ||
767 | } | |
768 | ||
769 | static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, | |
770 | u32 sw_event) | |
771 | { | |
772 | struct dpu_encoder_virt *dpu_enc; | |
773 | struct msm_drm_private *priv; | |
774 | struct msm_drm_thread *disp_thread; | |
775 | bool is_vid_mode = false; | |
776 | ||
777 | if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private || | |
778 | !drm_enc->crtc) { | |
779 | DPU_ERROR("invalid parameters\n"); | |
780 | return -EINVAL; | |
781 | } | |
782 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
783 | priv = drm_enc->dev->dev_private; | |
784 | is_vid_mode = dpu_enc->disp_info.capabilities & | |
785 | MSM_DISPLAY_CAP_VID_MODE; | |
786 | ||
787 | if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) { | |
788 | DPU_ERROR("invalid crtc index\n"); | |
789 | return -EINVAL; | |
790 | } | |
791 | disp_thread = &priv->disp_thread[drm_enc->crtc->index]; | |
792 | ||
793 | /* | |
794 | * when idle_pc is not supported, process only KICKOFF, STOP and MODESET | |
795 | * events and return early for other events (ie wb display). | |
796 | */ | |
797 | if (!dpu_enc->idle_pc_supported && | |
798 | (sw_event != DPU_ENC_RC_EVENT_KICKOFF && | |
799 | sw_event != DPU_ENC_RC_EVENT_STOP && | |
800 | sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) | |
801 | return 0; | |
802 | ||
803 | trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, | |
804 | dpu_enc->rc_state, "begin"); | |
805 | ||
806 | switch (sw_event) { | |
807 | case DPU_ENC_RC_EVENT_KICKOFF: | |
808 | /* cancel delayed off work, if any */ | |
809 | if (kthread_cancel_delayed_work_sync( | |
810 | &dpu_enc->delayed_off_work)) | |
811 | DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", | |
812 | sw_event); | |
813 | ||
814 | mutex_lock(&dpu_enc->rc_lock); | |
815 | ||
816 | /* return if the resource control is already in ON state */ | |
817 | if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { | |
818 | DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n", | |
819 | DRMID(drm_enc), sw_event); | |
820 | mutex_unlock(&dpu_enc->rc_lock); | |
821 | return 0; | |
822 | } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF && | |
823 | dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) { | |
824 | DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n", | |
825 | DRMID(drm_enc), sw_event, | |
826 | dpu_enc->rc_state); | |
827 | mutex_unlock(&dpu_enc->rc_lock); | |
828 | return -EINVAL; | |
829 | } | |
830 | ||
831 | if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) | |
832 | _dpu_encoder_irq_control(drm_enc, true); | |
833 | else | |
834 | _dpu_encoder_resource_control_helper(drm_enc, true); | |
835 | ||
836 | dpu_enc->rc_state = DPU_ENC_RC_STATE_ON; | |
837 | ||
838 | trace_dpu_enc_rc(DRMID(drm_enc), sw_event, | |
839 | dpu_enc->idle_pc_supported, dpu_enc->rc_state, | |
840 | "kickoff"); | |
841 | ||
842 | mutex_unlock(&dpu_enc->rc_lock); | |
843 | break; | |
844 | ||
845 | case DPU_ENC_RC_EVENT_FRAME_DONE: | |
846 | /* | |
847 | * mutex lock is not used as this event happens at interrupt | |
848 | * context. And locking is not required as, the other events | |
849 | * like KICKOFF and STOP does a wait-for-idle before executing | |
850 | * the resource_control | |
851 | */ | |
852 | if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { | |
853 | DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n", | |
854 | DRMID(drm_enc), sw_event, | |
855 | dpu_enc->rc_state); | |
856 | return -EINVAL; | |
857 | } | |
858 | ||
859 | /* | |
860 | * schedule off work item only when there are no | |
861 | * frames pending | |
862 | */ | |
863 | if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) { | |
864 | DRM_DEBUG_KMS("id:%d skip schedule work\n", | |
865 | DRMID(drm_enc)); | |
866 | return 0; | |
867 | } | |
868 | ||
869 | kthread_queue_delayed_work( | |
870 | &disp_thread->worker, | |
871 | &dpu_enc->delayed_off_work, | |
872 | msecs_to_jiffies(dpu_enc->idle_timeout)); | |
873 | ||
874 | trace_dpu_enc_rc(DRMID(drm_enc), sw_event, | |
875 | dpu_enc->idle_pc_supported, dpu_enc->rc_state, | |
876 | "frame done"); | |
877 | break; | |
878 | ||
879 | case DPU_ENC_RC_EVENT_PRE_STOP: | |
880 | /* cancel delayed off work, if any */ | |
881 | if (kthread_cancel_delayed_work_sync( | |
882 | &dpu_enc->delayed_off_work)) | |
883 | DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", | |
884 | sw_event); | |
885 | ||
886 | mutex_lock(&dpu_enc->rc_lock); | |
887 | ||
888 | if (is_vid_mode && | |
889 | dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { | |
890 | _dpu_encoder_irq_control(drm_enc, true); | |
891 | } | |
892 | /* skip if is already OFF or IDLE, resources are off already */ | |
893 | else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF || | |
894 | dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { | |
895 | DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n", | |
896 | DRMID(drm_enc), sw_event, | |
897 | dpu_enc->rc_state); | |
898 | mutex_unlock(&dpu_enc->rc_lock); | |
899 | return 0; | |
900 | } | |
901 | ||
902 | dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF; | |
903 | ||
904 | trace_dpu_enc_rc(DRMID(drm_enc), sw_event, | |
905 | dpu_enc->idle_pc_supported, dpu_enc->rc_state, | |
906 | "pre stop"); | |
907 | ||
908 | mutex_unlock(&dpu_enc->rc_lock); | |
909 | break; | |
910 | ||
911 | case DPU_ENC_RC_EVENT_STOP: | |
912 | mutex_lock(&dpu_enc->rc_lock); | |
913 | ||
914 | /* return if the resource control is already in OFF state */ | |
915 | if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) { | |
916 | DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n", | |
917 | DRMID(drm_enc), sw_event); | |
918 | mutex_unlock(&dpu_enc->rc_lock); | |
919 | return 0; | |
920 | } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { | |
921 | DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n", | |
922 | DRMID(drm_enc), sw_event, dpu_enc->rc_state); | |
923 | mutex_unlock(&dpu_enc->rc_lock); | |
924 | return -EINVAL; | |
925 | } | |
926 | ||
927 | /** | |
928 | * expect to arrive here only if in either idle state or pre-off | |
929 | * and in IDLE state the resources are already disabled | |
930 | */ | |
931 | if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF) | |
932 | _dpu_encoder_resource_control_helper(drm_enc, false); | |
933 | ||
934 | dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF; | |
935 | ||
936 | trace_dpu_enc_rc(DRMID(drm_enc), sw_event, | |
937 | dpu_enc->idle_pc_supported, dpu_enc->rc_state, | |
938 | "stop"); | |
939 | ||
940 | mutex_unlock(&dpu_enc->rc_lock); | |
941 | break; | |
942 | ||
943 | case DPU_ENC_RC_EVENT_ENTER_IDLE: | |
944 | mutex_lock(&dpu_enc->rc_lock); | |
945 | ||
946 | if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { | |
947 | DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n", | |
948 | DRMID(drm_enc), sw_event, dpu_enc->rc_state); | |
949 | mutex_unlock(&dpu_enc->rc_lock); | |
950 | return 0; | |
951 | } | |
952 | ||
953 | /* | |
954 | * if we are in ON but a frame was just kicked off, | |
955 | * ignore the IDLE event, it's probably a stale timer event | |
956 | */ | |
957 | if (dpu_enc->frame_busy_mask[0]) { | |
958 | DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n", | |
959 | DRMID(drm_enc), sw_event, dpu_enc->rc_state); | |
960 | mutex_unlock(&dpu_enc->rc_lock); | |
961 | return 0; | |
962 | } | |
963 | ||
964 | if (is_vid_mode) | |
965 | _dpu_encoder_irq_control(drm_enc, false); | |
966 | else | |
967 | _dpu_encoder_resource_control_helper(drm_enc, false); | |
968 | ||
969 | dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE; | |
970 | ||
971 | trace_dpu_enc_rc(DRMID(drm_enc), sw_event, | |
972 | dpu_enc->idle_pc_supported, dpu_enc->rc_state, | |
973 | "idle"); | |
974 | ||
975 | mutex_unlock(&dpu_enc->rc_lock); | |
976 | break; | |
977 | ||
978 | default: | |
979 | DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc), | |
980 | sw_event); | |
981 | trace_dpu_enc_rc(DRMID(drm_enc), sw_event, | |
982 | dpu_enc->idle_pc_supported, dpu_enc->rc_state, | |
983 | "error"); | |
984 | break; | |
985 | } | |
986 | ||
987 | trace_dpu_enc_rc(DRMID(drm_enc), sw_event, | |
988 | dpu_enc->idle_pc_supported, dpu_enc->rc_state, | |
989 | "end"); | |
990 | return 0; | |
991 | } | |
992 | ||
993 | static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, | |
994 | struct drm_display_mode *mode, | |
995 | struct drm_display_mode *adj_mode) | |
996 | { | |
997 | struct dpu_encoder_virt *dpu_enc; | |
998 | struct msm_drm_private *priv; | |
999 | struct dpu_kms *dpu_kms; | |
1000 | struct list_head *connector_list; | |
1001 | struct drm_connector *conn = NULL, *conn_iter; | |
3f4db2e2 | 1002 | struct dpu_rm_hw_iter pp_iter, ctl_iter; |
25fdd593 | 1003 | struct msm_display_topology topology; |
3f4db2e2 | 1004 | struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL }; |
25fdd593 JS |
1005 | int i = 0, ret; |
1006 | ||
1007 | if (!drm_enc) { | |
1008 | DPU_ERROR("invalid encoder\n"); | |
1009 | return; | |
1010 | } | |
1011 | ||
1012 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1013 | DPU_DEBUG_ENC(dpu_enc, "\n"); | |
1014 | ||
1015 | priv = drm_enc->dev->dev_private; | |
1016 | dpu_kms = to_dpu_kms(priv->kms); | |
1017 | connector_list = &dpu_kms->dev->mode_config.connector_list; | |
1018 | ||
1019 | trace_dpu_enc_mode_set(DRMID(drm_enc)); | |
1020 | ||
1021 | list_for_each_entry(conn_iter, connector_list, head) | |
1022 | if (conn_iter->encoder == drm_enc) | |
1023 | conn = conn_iter; | |
1024 | ||
1025 | if (!conn) { | |
1026 | DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n"); | |
1027 | return; | |
1028 | } else if (!conn->state) { | |
1029 | DPU_ERROR_ENC(dpu_enc, "invalid connector state\n"); | |
1030 | return; | |
1031 | } | |
1032 | ||
1033 | topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); | |
1034 | ||
1035 | /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ | |
1036 | ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state, | |
32ecf92a | 1037 | topology, false); |
25fdd593 JS |
1038 | if (ret) { |
1039 | DPU_ERROR_ENC(dpu_enc, | |
1040 | "failed to reserve hw resources, %d\n", ret); | |
1041 | return; | |
1042 | } | |
1043 | ||
1044 | dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG); | |
1045 | for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { | |
1046 | dpu_enc->hw_pp[i] = NULL; | |
1047 | if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter)) | |
1048 | break; | |
1049 | dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw; | |
1050 | } | |
1051 | ||
3f4db2e2 JS |
1052 | dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL); |
1053 | for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { | |
1054 | if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter)) | |
1055 | break; | |
1056 | hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw; | |
1057 | } | |
1058 | ||
25fdd593 JS |
1059 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { |
1060 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
1061 | ||
1062 | if (phys) { | |
1063 | if (!dpu_enc->hw_pp[i]) { | |
3f4db2e2 JS |
1064 | DPU_ERROR_ENC(dpu_enc, "no pp block assigned" |
1065 | "at idx: %d\n", i); | |
25fdd593 JS |
1066 | return; |
1067 | } | |
3f4db2e2 JS |
1068 | |
1069 | if (!hw_ctl[i]) { | |
1070 | DPU_ERROR_ENC(dpu_enc, "no ctl block assigned" | |
1071 | "at idx: %d\n", i); | |
1072 | return; | |
1073 | } | |
1074 | ||
25fdd593 | 1075 | phys->hw_pp = dpu_enc->hw_pp[i]; |
3f4db2e2 JS |
1076 | phys->hw_ctl = hw_ctl[i]; |
1077 | ||
25fdd593 | 1078 | phys->connector = conn->state->connector; |
25fdd593 JS |
1079 | if (phys->ops.mode_set) |
1080 | phys->ops.mode_set(phys, mode, adj_mode); | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | dpu_enc->mode_set_complete = true; | |
1085 | } | |
1086 | ||
1087 | static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) | |
1088 | { | |
1089 | struct dpu_encoder_virt *dpu_enc = NULL; | |
1090 | struct msm_drm_private *priv; | |
1091 | struct dpu_kms *dpu_kms; | |
1092 | ||
1093 | if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { | |
1094 | DPU_ERROR("invalid parameters\n"); | |
1095 | return; | |
1096 | } | |
1097 | ||
1098 | priv = drm_enc->dev->dev_private; | |
1099 | dpu_kms = to_dpu_kms(priv->kms); | |
1100 | if (!dpu_kms) { | |
1101 | DPU_ERROR("invalid dpu_kms\n"); | |
1102 | return; | |
1103 | } | |
1104 | ||
1105 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1106 | if (!dpu_enc || !dpu_enc->cur_master) { | |
1107 | DPU_ERROR("invalid dpu encoder/master\n"); | |
1108 | return; | |
1109 | } | |
1110 | ||
25fdd593 JS |
1111 | if (dpu_enc->cur_master->hw_mdptop && |
1112 | dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc) | |
1113 | dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc( | |
1114 | dpu_enc->cur_master->hw_mdptop, | |
1115 | dpu_kms->catalog); | |
1116 | ||
1117 | _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); | |
1118 | } | |
1119 | ||
1120 | void dpu_encoder_virt_restore(struct drm_encoder *drm_enc) | |
1121 | { | |
1122 | struct dpu_encoder_virt *dpu_enc = NULL; | |
1123 | int i; | |
1124 | ||
1125 | if (!drm_enc) { | |
1126 | DPU_ERROR("invalid encoder\n"); | |
1127 | return; | |
1128 | } | |
1129 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1130 | ||
1131 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1132 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
1133 | ||
1134 | if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore) | |
1135 | phys->ops.restore(phys); | |
1136 | } | |
1137 | ||
1138 | if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore) | |
1139 | dpu_enc->cur_master->ops.restore(dpu_enc->cur_master); | |
1140 | ||
1141 | _dpu_encoder_virt_enable_helper(drm_enc); | |
1142 | } | |
1143 | ||
1144 | static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) | |
1145 | { | |
1146 | struct dpu_encoder_virt *dpu_enc = NULL; | |
86b89080 | 1147 | int ret = 0; |
25fdd593 JS |
1148 | struct drm_display_mode *cur_mode = NULL; |
1149 | ||
1150 | if (!drm_enc) { | |
1151 | DPU_ERROR("invalid encoder\n"); | |
1152 | return; | |
1153 | } | |
1154 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1155 | cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; | |
1156 | ||
1157 | trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, | |
1158 | cur_mode->vdisplay); | |
1159 | ||
86b89080 JS |
1160 | /* always enable slave encoder before master */ |
1161 | if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) | |
1162 | dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); | |
25fdd593 | 1163 | |
86b89080 JS |
1164 | if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) |
1165 | dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); | |
25fdd593 JS |
1166 | |
1167 | ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); | |
1168 | if (ret) { | |
1169 | DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n", | |
1170 | ret); | |
1171 | return; | |
1172 | } | |
1173 | ||
25fdd593 JS |
1174 | _dpu_encoder_virt_enable_helper(drm_enc); |
1175 | } | |
1176 | ||
1177 | static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) | |
1178 | { | |
1179 | struct dpu_encoder_virt *dpu_enc = NULL; | |
1180 | struct msm_drm_private *priv; | |
1181 | struct dpu_kms *dpu_kms; | |
1182 | struct drm_display_mode *mode; | |
1183 | int i = 0; | |
1184 | ||
1185 | if (!drm_enc) { | |
1186 | DPU_ERROR("invalid encoder\n"); | |
1187 | return; | |
1188 | } else if (!drm_enc->dev) { | |
1189 | DPU_ERROR("invalid dev\n"); | |
1190 | return; | |
1191 | } else if (!drm_enc->dev->dev_private) { | |
1192 | DPU_ERROR("invalid dev_private\n"); | |
1193 | return; | |
1194 | } | |
1195 | ||
1196 | mode = &drm_enc->crtc->state->adjusted_mode; | |
1197 | ||
1198 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1199 | DPU_DEBUG_ENC(dpu_enc, "\n"); | |
1200 | ||
1201 | priv = drm_enc->dev->dev_private; | |
1202 | dpu_kms = to_dpu_kms(priv->kms); | |
1203 | ||
1204 | trace_dpu_enc_disable(DRMID(drm_enc)); | |
1205 | ||
1206 | /* wait for idle */ | |
1207 | dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); | |
1208 | ||
1209 | dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); | |
1210 | ||
1211 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1212 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
1213 | ||
1214 | if (phys && phys->ops.disable) | |
1215 | phys->ops.disable(phys); | |
1216 | } | |
1217 | ||
1218 | /* after phys waits for frame-done, should be no more frames pending */ | |
1219 | if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) { | |
1220 | DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); | |
1221 | del_timer_sync(&dpu_enc->frame_done_timer); | |
1222 | } | |
1223 | ||
1224 | dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); | |
1225 | ||
1226 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1227 | if (dpu_enc->phys_encs[i]) | |
1228 | dpu_enc->phys_encs[i]->connector = NULL; | |
1229 | } | |
1230 | ||
25fdd593 JS |
1231 | DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); |
1232 | ||
1233 | dpu_rm_release(&dpu_kms->rm, drm_enc); | |
1234 | } | |
1235 | ||
1236 | static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog, | |
1237 | enum dpu_intf_type type, u32 controller_id) | |
1238 | { | |
1239 | int i = 0; | |
1240 | ||
1241 | for (i = 0; i < catalog->intf_count; i++) { | |
1242 | if (catalog->intf[i].type == type | |
1243 | && catalog->intf[i].controller_id == controller_id) { | |
1244 | return catalog->intf[i].id; | |
1245 | } | |
1246 | } | |
1247 | ||
1248 | return INTF_MAX; | |
1249 | } | |
1250 | ||
1251 | static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, | |
1252 | struct dpu_encoder_phys *phy_enc) | |
1253 | { | |
1254 | struct dpu_encoder_virt *dpu_enc = NULL; | |
1255 | unsigned long lock_flags; | |
1256 | ||
1257 | if (!drm_enc || !phy_enc) | |
1258 | return; | |
1259 | ||
1260 | DPU_ATRACE_BEGIN("encoder_vblank_callback"); | |
1261 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1262 | ||
1263 | spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); | |
1264 | if (dpu_enc->crtc_vblank_cb) | |
1265 | dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data); | |
1266 | spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); | |
1267 | ||
1268 | atomic_inc(&phy_enc->vsync_cnt); | |
1269 | DPU_ATRACE_END("encoder_vblank_callback"); | |
1270 | } | |
1271 | ||
1272 | static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, | |
1273 | struct dpu_encoder_phys *phy_enc) | |
1274 | { | |
1275 | if (!phy_enc) | |
1276 | return; | |
1277 | ||
1278 | DPU_ATRACE_BEGIN("encoder_underrun_callback"); | |
1279 | atomic_inc(&phy_enc->underrun_cnt); | |
1280 | trace_dpu_enc_underrun_cb(DRMID(drm_enc), | |
1281 | atomic_read(&phy_enc->underrun_cnt)); | |
1282 | DPU_ATRACE_END("encoder_underrun_callback"); | |
1283 | } | |
1284 | ||
1285 | void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc, | |
1286 | void (*vbl_cb)(void *), void *vbl_data) | |
1287 | { | |
1288 | struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1289 | unsigned long lock_flags; | |
1290 | bool enable; | |
1291 | int i; | |
1292 | ||
1293 | enable = vbl_cb ? true : false; | |
1294 | ||
1295 | if (!drm_enc) { | |
1296 | DPU_ERROR("invalid encoder\n"); | |
1297 | return; | |
1298 | } | |
1299 | trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable); | |
1300 | ||
1301 | spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); | |
1302 | dpu_enc->crtc_vblank_cb = vbl_cb; | |
1303 | dpu_enc->crtc_vblank_cb_data = vbl_data; | |
1304 | spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); | |
1305 | ||
1306 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1307 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
1308 | ||
1309 | if (phys && phys->ops.control_vblank_irq) | |
1310 | phys->ops.control_vblank_irq(phys, enable); | |
1311 | } | |
1312 | } | |
1313 | ||
1314 | void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, | |
1315 | void (*frame_event_cb)(void *, u32 event), | |
1316 | void *frame_event_cb_data) | |
1317 | { | |
1318 | struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1319 | unsigned long lock_flags; | |
1320 | bool enable; | |
1321 | ||
1322 | enable = frame_event_cb ? true : false; | |
1323 | ||
1324 | if (!drm_enc) { | |
1325 | DPU_ERROR("invalid encoder\n"); | |
1326 | return; | |
1327 | } | |
1328 | trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable); | |
1329 | ||
1330 | spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); | |
1331 | dpu_enc->crtc_frame_event_cb = frame_event_cb; | |
1332 | dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data; | |
1333 | spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); | |
1334 | } | |
1335 | ||
1336 | static void dpu_encoder_frame_done_callback( | |
1337 | struct drm_encoder *drm_enc, | |
1338 | struct dpu_encoder_phys *ready_phys, u32 event) | |
1339 | { | |
1340 | struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1341 | unsigned int i; | |
1342 | ||
1343 | if (event & (DPU_ENCODER_FRAME_EVENT_DONE | |
1344 | | DPU_ENCODER_FRAME_EVENT_ERROR | |
1345 | | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { | |
1346 | ||
1347 | if (!dpu_enc->frame_busy_mask[0]) { | |
1348 | /** | |
1349 | * suppress frame_done without waiter, | |
1350 | * likely autorefresh | |
1351 | */ | |
1352 | trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), | |
1353 | event, ready_phys->intf_idx); | |
1354 | return; | |
1355 | } | |
1356 | ||
1357 | /* One of the physical encoders has become idle */ | |
1358 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1359 | if (dpu_enc->phys_encs[i] == ready_phys) { | |
25fdd593 JS |
1360 | trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, |
1361 | dpu_enc->frame_busy_mask[0]); | |
b65bd045 | 1362 | clear_bit(i, dpu_enc->frame_busy_mask); |
25fdd593 JS |
1363 | } |
1364 | } | |
1365 | ||
1366 | if (!dpu_enc->frame_busy_mask[0]) { | |
1367 | atomic_set(&dpu_enc->frame_done_timeout, 0); | |
1368 | del_timer(&dpu_enc->frame_done_timer); | |
1369 | ||
1370 | dpu_encoder_resource_control(drm_enc, | |
1371 | DPU_ENC_RC_EVENT_FRAME_DONE); | |
1372 | ||
1373 | if (dpu_enc->crtc_frame_event_cb) | |
1374 | dpu_enc->crtc_frame_event_cb( | |
1375 | dpu_enc->crtc_frame_event_cb_data, | |
1376 | event); | |
1377 | } | |
1378 | } else { | |
1379 | if (dpu_enc->crtc_frame_event_cb) | |
1380 | dpu_enc->crtc_frame_event_cb( | |
1381 | dpu_enc->crtc_frame_event_cb_data, event); | |
1382 | } | |
1383 | } | |
1384 | ||
1385 | static void dpu_encoder_off_work(struct kthread_work *work) | |
1386 | { | |
1387 | struct dpu_encoder_virt *dpu_enc = container_of(work, | |
1388 | struct dpu_encoder_virt, delayed_off_work.work); | |
1389 | ||
1390 | if (!dpu_enc) { | |
1391 | DPU_ERROR("invalid dpu encoder\n"); | |
1392 | return; | |
1393 | } | |
1394 | ||
1395 | dpu_encoder_resource_control(&dpu_enc->base, | |
1396 | DPU_ENC_RC_EVENT_ENTER_IDLE); | |
1397 | ||
1398 | dpu_encoder_frame_done_callback(&dpu_enc->base, NULL, | |
1399 | DPU_ENCODER_FRAME_EVENT_IDLE); | |
1400 | } | |
1401 | ||
1402 | /** | |
1403 | * _dpu_encoder_trigger_flush - trigger flush for a physical encoder | |
1404 | * drm_enc: Pointer to drm encoder structure | |
1405 | * phys: Pointer to physical encoder structure | |
1406 | * extra_flush_bits: Additional bit mask to include in flush trigger | |
1407 | */ | |
1408 | static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, | |
1409 | struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) | |
1410 | { | |
1411 | struct dpu_hw_ctl *ctl; | |
1412 | int pending_kickoff_cnt; | |
1413 | u32 ret = UINT_MAX; | |
1414 | ||
1415 | if (!drm_enc || !phys) { | |
1416 | DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n", | |
1417 | drm_enc != 0, phys != 0); | |
1418 | return; | |
1419 | } | |
1420 | ||
1421 | if (!phys->hw_pp) { | |
1422 | DPU_ERROR("invalid pingpong hw\n"); | |
1423 | return; | |
1424 | } | |
1425 | ||
1426 | ctl = phys->hw_ctl; | |
1427 | if (!ctl || !ctl->ops.trigger_flush) { | |
1428 | DPU_ERROR("missing trigger cb\n"); | |
1429 | return; | |
1430 | } | |
1431 | ||
1432 | pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); | |
1433 | ||
1434 | if (extra_flush_bits && ctl->ops.update_pending_flush) | |
1435 | ctl->ops.update_pending_flush(ctl, extra_flush_bits); | |
1436 | ||
1437 | ctl->ops.trigger_flush(ctl); | |
1438 | ||
1439 | if (ctl->ops.get_pending_flush) | |
1440 | ret = ctl->ops.get_pending_flush(ctl); | |
1441 | ||
1442 | trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx, | |
1bb4e701 SP |
1443 | pending_kickoff_cnt, ctl->idx, |
1444 | extra_flush_bits, ret); | |
25fdd593 JS |
1445 | } |
1446 | ||
1447 | /** | |
1448 | * _dpu_encoder_trigger_start - trigger start for a physical encoder | |
1449 | * phys: Pointer to physical encoder structure | |
1450 | */ | |
1451 | static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) | |
1452 | { | |
1453 | if (!phys) { | |
1454 | DPU_ERROR("invalid argument(s)\n"); | |
1455 | return; | |
1456 | } | |
1457 | ||
1458 | if (!phys->hw_pp) { | |
1459 | DPU_ERROR("invalid pingpong hw\n"); | |
1460 | return; | |
1461 | } | |
1462 | ||
1463 | if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) | |
1464 | phys->ops.trigger_start(phys); | |
1465 | } | |
1466 | ||
1467 | void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) | |
1468 | { | |
1469 | struct dpu_hw_ctl *ctl; | |
1470 | ||
1471 | if (!phys_enc) { | |
1472 | DPU_ERROR("invalid encoder\n"); | |
1473 | return; | |
1474 | } | |
1475 | ||
1476 | ctl = phys_enc->hw_ctl; | |
1477 | if (ctl && ctl->ops.trigger_start) { | |
1478 | ctl->ops.trigger_start(ctl); | |
1479 | trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); | |
1480 | } | |
1481 | } | |
1482 | ||
fba33cae | 1483 | static int dpu_encoder_helper_wait_event_timeout( |
25fdd593 JS |
1484 | int32_t drm_id, |
1485 | int32_t hw_id, | |
1486 | struct dpu_encoder_wait_info *info) | |
1487 | { | |
1488 | int rc = 0; | |
1489 | s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms; | |
1490 | s64 jiffies = msecs_to_jiffies(info->timeout_ms); | |
1491 | s64 time; | |
1492 | ||
1493 | do { | |
1494 | rc = wait_event_timeout(*(info->wq), | |
1495 | atomic_read(info->atomic_cnt) == 0, jiffies); | |
1496 | time = ktime_to_ms(ktime_get()); | |
1497 | ||
1498 | trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time, | |
1499 | expected_time, | |
1500 | atomic_read(info->atomic_cnt)); | |
1501 | /* If we timed out, counter is valid and time is less, wait again */ | |
1502 | } while (atomic_read(info->atomic_cnt) && (rc == 0) && | |
1503 | (time < expected_time)); | |
1504 | ||
1505 | return rc; | |
1506 | } | |
1507 | ||
1508 | void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) | |
1509 | { | |
1510 | struct dpu_encoder_virt *dpu_enc; | |
1511 | struct dpu_hw_ctl *ctl; | |
1512 | int rc; | |
1513 | ||
1514 | if (!phys_enc) { | |
1515 | DPU_ERROR("invalid encoder\n"); | |
1516 | return; | |
1517 | } | |
1518 | dpu_enc = to_dpu_encoder_virt(phys_enc->parent); | |
1519 | ctl = phys_enc->hw_ctl; | |
1520 | ||
1521 | if (!ctl || !ctl->ops.reset) | |
1522 | return; | |
1523 | ||
1524 | DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent), | |
1525 | ctl->idx); | |
1526 | ||
1527 | rc = ctl->ops.reset(ctl); | |
1528 | if (rc) { | |
1529 | DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx); | |
1530 | dpu_dbg_dump(false, __func__, true, true); | |
1531 | } | |
1532 | ||
1533 | phys_enc->enable_state = DPU_ENC_ENABLED; | |
1534 | } | |
1535 | ||
1536 | /** | |
1537 | * _dpu_encoder_kickoff_phys - handle physical encoder kickoff | |
1538 | * Iterate through the physical encoders and perform consolidated flush | |
1539 | * and/or control start triggering as needed. This is done in the virtual | |
1540 | * encoder rather than the individual physical ones in order to handle | |
1541 | * use cases that require visibility into multiple physical encoders at | |
1542 | * a time. | |
1543 | * dpu_enc: Pointer to virtual encoder structure | |
1544 | */ | |
1545 | static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) | |
1546 | { | |
1547 | struct dpu_hw_ctl *ctl; | |
1548 | uint32_t i, pending_flush; | |
1549 | unsigned long lock_flags; | |
1550 | ||
1551 | if (!dpu_enc) { | |
1552 | DPU_ERROR("invalid encoder\n"); | |
1553 | return; | |
1554 | } | |
1555 | ||
1556 | pending_flush = 0x0; | |
1557 | ||
1558 | /* update pending counts and trigger kickoff ctl flush atomically */ | |
1559 | spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); | |
1560 | ||
1561 | /* don't perform flush/start operations for slave encoders */ | |
1562 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1563 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
1564 | ||
1565 | if (!phys || phys->enable_state == DPU_ENC_DISABLED) | |
1566 | continue; | |
1567 | ||
1568 | ctl = phys->hw_ctl; | |
1569 | if (!ctl) | |
1570 | continue; | |
1571 | ||
1572 | if (phys->split_role != ENC_ROLE_SLAVE) | |
1573 | set_bit(i, dpu_enc->frame_busy_mask); | |
1574 | if (!phys->ops.needs_single_flush || | |
1575 | !phys->ops.needs_single_flush(phys)) | |
1576 | _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0); | |
1577 | else if (ctl->ops.get_pending_flush) | |
1578 | pending_flush |= ctl->ops.get_pending_flush(ctl); | |
1579 | } | |
1580 | ||
1581 | /* for split flush, combine pending flush masks and send to master */ | |
1582 | if (pending_flush && dpu_enc->cur_master) { | |
1583 | _dpu_encoder_trigger_flush( | |
1584 | &dpu_enc->base, | |
1585 | dpu_enc->cur_master, | |
1586 | pending_flush); | |
1587 | } | |
1588 | ||
1589 | _dpu_encoder_trigger_start(dpu_enc->cur_master); | |
1590 | ||
1591 | spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); | |
1592 | } | |
1593 | ||
25fdd593 JS |
1594 | void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) |
1595 | { | |
1596 | struct dpu_encoder_virt *dpu_enc; | |
1597 | struct dpu_encoder_phys *phys; | |
1598 | unsigned int i; | |
1599 | struct dpu_hw_ctl *ctl; | |
1600 | struct msm_display_info *disp_info; | |
1601 | ||
1602 | if (!drm_enc) { | |
1603 | DPU_ERROR("invalid encoder\n"); | |
1604 | return; | |
1605 | } | |
1606 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1607 | disp_info = &dpu_enc->disp_info; | |
1608 | ||
1609 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1610 | phys = dpu_enc->phys_encs[i]; | |
1611 | ||
1612 | if (phys && phys->hw_ctl) { | |
1613 | ctl = phys->hw_ctl; | |
1614 | if (ctl->ops.clear_pending_flush) | |
1615 | ctl->ops.clear_pending_flush(ctl); | |
1616 | ||
1617 | /* update only for command mode primary ctl */ | |
1618 | if ((phys == dpu_enc->cur_master) && | |
1619 | (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) | |
1620 | && ctl->ops.trigger_pending) | |
1621 | ctl->ops.trigger_pending(ctl); | |
1622 | } | |
1623 | } | |
1624 | } | |
1625 | ||
1626 | static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, | |
1627 | struct drm_display_mode *mode) | |
1628 | { | |
1629 | u64 pclk_rate; | |
1630 | u32 pclk_period; | |
1631 | u32 line_time; | |
1632 | ||
1633 | /* | |
1634 | * For linetime calculation, only operate on master encoder. | |
1635 | */ | |
1636 | if (!dpu_enc->cur_master) | |
1637 | return 0; | |
1638 | ||
1639 | if (!dpu_enc->cur_master->ops.get_line_count) { | |
1640 | DPU_ERROR("get_line_count function not defined\n"); | |
1641 | return 0; | |
1642 | } | |
1643 | ||
1644 | pclk_rate = mode->clock; /* pixel clock in kHz */ | |
1645 | if (pclk_rate == 0) { | |
1646 | DPU_ERROR("pclk is 0, cannot calculate line time\n"); | |
1647 | return 0; | |
1648 | } | |
1649 | ||
1650 | pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); | |
1651 | if (pclk_period == 0) { | |
1652 | DPU_ERROR("pclk period is 0\n"); | |
1653 | return 0; | |
1654 | } | |
1655 | ||
1656 | /* | |
1657 | * Line time calculation based on Pixel clock and HTOTAL. | |
1658 | * Final unit is in ns. | |
1659 | */ | |
1660 | line_time = (pclk_period * mode->htotal) / 1000; | |
1661 | if (line_time == 0) { | |
1662 | DPU_ERROR("line time calculation is 0\n"); | |
1663 | return 0; | |
1664 | } | |
1665 | ||
1666 | DPU_DEBUG_ENC(dpu_enc, | |
1667 | "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n", | |
1668 | pclk_rate, pclk_period, line_time); | |
1669 | ||
1670 | return line_time; | |
1671 | } | |
1672 | ||
1673 | static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc, | |
1674 | ktime_t *wakeup_time) | |
1675 | { | |
1676 | struct drm_display_mode *mode; | |
1677 | struct dpu_encoder_virt *dpu_enc; | |
1678 | u32 cur_line; | |
1679 | u32 line_time; | |
1680 | u32 vtotal, time_to_vsync; | |
1681 | ktime_t cur_time; | |
1682 | ||
1683 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1684 | ||
1685 | if (!drm_enc->crtc || !drm_enc->crtc->state) { | |
1686 | DPU_ERROR("crtc/crtc state object is NULL\n"); | |
1687 | return -EINVAL; | |
1688 | } | |
1689 | mode = &drm_enc->crtc->state->adjusted_mode; | |
1690 | ||
1691 | line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); | |
1692 | if (!line_time) | |
1693 | return -EINVAL; | |
1694 | ||
1695 | cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master); | |
1696 | ||
1697 | vtotal = mode->vtotal; | |
1698 | if (cur_line >= vtotal) | |
1699 | time_to_vsync = line_time * vtotal; | |
1700 | else | |
1701 | time_to_vsync = line_time * (vtotal - cur_line); | |
1702 | ||
1703 | if (time_to_vsync == 0) { | |
1704 | DPU_ERROR("time to vsync should not be zero, vtotal=%d\n", | |
1705 | vtotal); | |
1706 | return -EINVAL; | |
1707 | } | |
1708 | ||
1709 | cur_time = ktime_get(); | |
1710 | *wakeup_time = ktime_add_ns(cur_time, time_to_vsync); | |
1711 | ||
1712 | DPU_DEBUG_ENC(dpu_enc, | |
1713 | "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n", | |
1714 | cur_line, vtotal, time_to_vsync, | |
1715 | ktime_to_ms(cur_time), | |
1716 | ktime_to_ms(*wakeup_time)); | |
1717 | return 0; | |
1718 | } | |
1719 | ||
1720 | static void dpu_encoder_vsync_event_handler(struct timer_list *t) | |
1721 | { | |
1722 | struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, | |
1723 | vsync_event_timer); | |
1724 | struct drm_encoder *drm_enc = &dpu_enc->base; | |
1725 | struct msm_drm_private *priv; | |
1726 | struct msm_drm_thread *event_thread; | |
1727 | ||
1728 | if (!drm_enc->dev || !drm_enc->dev->dev_private || | |
1729 | !drm_enc->crtc) { | |
1730 | DPU_ERROR("invalid parameters\n"); | |
1731 | return; | |
1732 | } | |
1733 | ||
1734 | priv = drm_enc->dev->dev_private; | |
1735 | ||
1736 | if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) { | |
1737 | DPU_ERROR("invalid crtc index\n"); | |
1738 | return; | |
1739 | } | |
1740 | event_thread = &priv->event_thread[drm_enc->crtc->index]; | |
1741 | if (!event_thread) { | |
1742 | DPU_ERROR("event_thread not found for crtc:%d\n", | |
1743 | drm_enc->crtc->index); | |
1744 | return; | |
1745 | } | |
1746 | ||
1747 | del_timer(&dpu_enc->vsync_event_timer); | |
1748 | } | |
1749 | ||
1750 | static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) | |
1751 | { | |
1752 | struct dpu_encoder_virt *dpu_enc = container_of(work, | |
1753 | struct dpu_encoder_virt, vsync_event_work); | |
1754 | ktime_t wakeup_time; | |
1755 | ||
1756 | if (!dpu_enc) { | |
1757 | DPU_ERROR("invalid dpu encoder\n"); | |
1758 | return; | |
1759 | } | |
1760 | ||
1761 | if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time)) | |
1762 | return; | |
1763 | ||
1764 | trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time); | |
1765 | mod_timer(&dpu_enc->vsync_event_timer, | |
1766 | nsecs_to_jiffies(ktime_to_ns(wakeup_time))); | |
1767 | } | |
1768 | ||
1769 | void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, | |
1770 | struct dpu_encoder_kickoff_params *params) | |
1771 | { | |
1772 | struct dpu_encoder_virt *dpu_enc; | |
1773 | struct dpu_encoder_phys *phys; | |
1774 | bool needs_hw_reset = false; | |
1775 | unsigned int i; | |
1776 | ||
1777 | if (!drm_enc || !params) { | |
1778 | DPU_ERROR("invalid args\n"); | |
1779 | return; | |
1780 | } | |
1781 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1782 | ||
1783 | trace_dpu_enc_prepare_kickoff(DRMID(drm_enc)); | |
1784 | ||
1785 | /* prepare for next kickoff, may include waiting on previous kickoff */ | |
1786 | DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); | |
1787 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1788 | phys = dpu_enc->phys_encs[i]; | |
1789 | if (phys) { | |
1790 | if (phys->ops.prepare_for_kickoff) | |
1791 | phys->ops.prepare_for_kickoff(phys, params); | |
1792 | if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) | |
1793 | needs_hw_reset = true; | |
1794 | } | |
1795 | } | |
1796 | DPU_ATRACE_END("enc_prepare_for_kickoff"); | |
1797 | ||
1798 | dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); | |
1799 | ||
1800 | /* if any phys needs reset, reset all phys, in-order */ | |
1801 | if (needs_hw_reset) { | |
1802 | trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); | |
1803 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1804 | phys = dpu_enc->phys_encs[i]; | |
1805 | if (phys && phys->ops.hw_reset) | |
1806 | phys->ops.hw_reset(phys); | |
1807 | } | |
1808 | } | |
1809 | } | |
1810 | ||
1811 | void dpu_encoder_kickoff(struct drm_encoder *drm_enc) | |
1812 | { | |
1813 | struct dpu_encoder_virt *dpu_enc; | |
1814 | struct dpu_encoder_phys *phys; | |
1815 | ktime_t wakeup_time; | |
1816 | unsigned int i; | |
1817 | ||
1818 | if (!drm_enc) { | |
1819 | DPU_ERROR("invalid encoder\n"); | |
1820 | return; | |
1821 | } | |
1822 | DPU_ATRACE_BEGIN("encoder_kickoff"); | |
1823 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1824 | ||
1825 | trace_dpu_enc_kickoff(DRMID(drm_enc)); | |
1826 | ||
1827 | atomic_set(&dpu_enc->frame_done_timeout, | |
1828 | DPU_FRAME_DONE_TIMEOUT * 1000 / | |
1829 | drm_enc->crtc->state->adjusted_mode.vrefresh); | |
1830 | mod_timer(&dpu_enc->frame_done_timer, jiffies + | |
1831 | ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000)); | |
1832 | ||
1833 | /* All phys encs are ready to go, trigger the kickoff */ | |
1834 | _dpu_encoder_kickoff_phys(dpu_enc); | |
1835 | ||
1836 | /* allow phys encs to handle any post-kickoff business */ | |
1837 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1838 | phys = dpu_enc->phys_encs[i]; | |
1839 | if (phys && phys->ops.handle_post_kickoff) | |
1840 | phys->ops.handle_post_kickoff(phys); | |
1841 | } | |
1842 | ||
1e53ac92 | 1843 | if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && |
25fdd593 JS |
1844 | !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) { |
1845 | trace_dpu_enc_early_kickoff(DRMID(drm_enc), | |
1846 | ktime_to_ms(wakeup_time)); | |
1847 | mod_timer(&dpu_enc->vsync_event_timer, | |
1848 | nsecs_to_jiffies(ktime_to_ns(wakeup_time))); | |
1849 | } | |
1850 | ||
1851 | DPU_ATRACE_END("encoder_kickoff"); | |
1852 | } | |
1853 | ||
25fdd593 JS |
1854 | void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc) |
1855 | { | |
1856 | struct dpu_encoder_virt *dpu_enc; | |
1857 | struct dpu_encoder_phys *phys; | |
1858 | int i; | |
1859 | ||
1860 | if (!drm_enc) { | |
1861 | DPU_ERROR("invalid encoder\n"); | |
1862 | return; | |
1863 | } | |
1864 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1865 | ||
1866 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1867 | phys = dpu_enc->phys_encs[i]; | |
1868 | if (phys && phys->ops.prepare_commit) | |
1869 | phys->ops.prepare_commit(phys); | |
1870 | } | |
1871 | } | |
1872 | ||
1873 | #ifdef CONFIG_DEBUG_FS | |
1874 | static int _dpu_encoder_status_show(struct seq_file *s, void *data) | |
1875 | { | |
1876 | struct dpu_encoder_virt *dpu_enc; | |
1877 | int i; | |
1878 | ||
1879 | if (!s || !s->private) | |
1880 | return -EINVAL; | |
1881 | ||
1882 | dpu_enc = s->private; | |
1883 | ||
1884 | mutex_lock(&dpu_enc->enc_lock); | |
1885 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
1886 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
1887 | ||
1888 | if (!phys) | |
1889 | continue; | |
1890 | ||
1891 | seq_printf(s, "intf:%d vsync:%8d underrun:%8d ", | |
1892 | phys->intf_idx - INTF_0, | |
1893 | atomic_read(&phys->vsync_cnt), | |
1894 | atomic_read(&phys->underrun_cnt)); | |
1895 | ||
1896 | switch (phys->intf_mode) { | |
1897 | case INTF_MODE_VIDEO: | |
1898 | seq_puts(s, "mode: video\n"); | |
1899 | break; | |
1900 | case INTF_MODE_CMD: | |
1901 | seq_puts(s, "mode: command\n"); | |
1902 | break; | |
1903 | default: | |
1904 | seq_puts(s, "mode: ???\n"); | |
1905 | break; | |
1906 | } | |
1907 | } | |
1908 | mutex_unlock(&dpu_enc->enc_lock); | |
1909 | ||
1910 | return 0; | |
1911 | } | |
1912 | ||
1913 | static int _dpu_encoder_debugfs_status_open(struct inode *inode, | |
1914 | struct file *file) | |
1915 | { | |
1916 | return single_open(file, _dpu_encoder_status_show, inode->i_private); | |
1917 | } | |
1918 | ||
25fdd593 JS |
1919 | static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) |
1920 | { | |
1921 | struct dpu_encoder_virt *dpu_enc; | |
1922 | struct msm_drm_private *priv; | |
1923 | struct dpu_kms *dpu_kms; | |
1924 | int i; | |
1925 | ||
1926 | static const struct file_operations debugfs_status_fops = { | |
1927 | .open = _dpu_encoder_debugfs_status_open, | |
1928 | .read = seq_read, | |
1929 | .llseek = seq_lseek, | |
1930 | .release = single_release, | |
1931 | }; | |
1932 | ||
25fdd593 JS |
1933 | char name[DPU_NAME_SIZE]; |
1934 | ||
1935 | if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { | |
1936 | DPU_ERROR("invalid encoder or kms\n"); | |
1937 | return -EINVAL; | |
1938 | } | |
1939 | ||
1940 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1941 | priv = drm_enc->dev->dev_private; | |
1942 | dpu_kms = to_dpu_kms(priv->kms); | |
1943 | ||
1944 | snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id); | |
1945 | ||
1946 | /* create overall sub-directory for the encoder */ | |
1947 | dpu_enc->debugfs_root = debugfs_create_dir(name, | |
1948 | drm_enc->dev->primary->debugfs_root); | |
1949 | if (!dpu_enc->debugfs_root) | |
1950 | return -ENOMEM; | |
1951 | ||
1952 | /* don't error check these */ | |
1953 | debugfs_create_file("status", 0600, | |
1954 | dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops); | |
1955 | ||
25fdd593 JS |
1956 | for (i = 0; i < dpu_enc->num_phys_encs; i++) |
1957 | if (dpu_enc->phys_encs[i] && | |
1958 | dpu_enc->phys_encs[i]->ops.late_register) | |
1959 | dpu_enc->phys_encs[i]->ops.late_register( | |
1960 | dpu_enc->phys_encs[i], | |
1961 | dpu_enc->debugfs_root); | |
1962 | ||
1963 | return 0; | |
1964 | } | |
1965 | ||
1966 | static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc) | |
1967 | { | |
1968 | struct dpu_encoder_virt *dpu_enc; | |
1969 | ||
1970 | if (!drm_enc) | |
1971 | return; | |
1972 | ||
1973 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
1974 | debugfs_remove_recursive(dpu_enc->debugfs_root); | |
1975 | } | |
1976 | #else | |
1977 | static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) | |
1978 | { | |
1979 | return 0; | |
1980 | } | |
1981 | ||
1982 | static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc) | |
1983 | { | |
1984 | } | |
1985 | #endif | |
1986 | ||
1987 | static int dpu_encoder_late_register(struct drm_encoder *encoder) | |
1988 | { | |
1989 | return _dpu_encoder_init_debugfs(encoder); | |
1990 | } | |
1991 | ||
1992 | static void dpu_encoder_early_unregister(struct drm_encoder *encoder) | |
1993 | { | |
1994 | _dpu_encoder_destroy_debugfs(encoder); | |
1995 | } | |
1996 | ||
1997 | static int dpu_encoder_virt_add_phys_encs( | |
1998 | u32 display_caps, | |
1999 | struct dpu_encoder_virt *dpu_enc, | |
2000 | struct dpu_enc_phys_init_params *params) | |
2001 | { | |
2002 | struct dpu_encoder_phys *enc = NULL; | |
2003 | ||
2004 | DPU_DEBUG_ENC(dpu_enc, "\n"); | |
2005 | ||
2006 | /* | |
2007 | * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types | |
2008 | * in this function, check up-front. | |
2009 | */ | |
2010 | if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= | |
2011 | ARRAY_SIZE(dpu_enc->phys_encs)) { | |
2012 | DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n", | |
2013 | dpu_enc->num_phys_encs); | |
2014 | return -EINVAL; | |
2015 | } | |
2016 | ||
2017 | if (display_caps & MSM_DISPLAY_CAP_VID_MODE) { | |
2018 | enc = dpu_encoder_phys_vid_init(params); | |
2019 | ||
2020 | if (IS_ERR_OR_NULL(enc)) { | |
2021 | DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", | |
2022 | PTR_ERR(enc)); | |
2023 | return enc == 0 ? -EINVAL : PTR_ERR(enc); | |
2024 | } | |
2025 | ||
2026 | dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; | |
2027 | ++dpu_enc->num_phys_encs; | |
2028 | } | |
2029 | ||
2030 | if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) { | |
2031 | enc = dpu_encoder_phys_cmd_init(params); | |
2032 | ||
2033 | if (IS_ERR_OR_NULL(enc)) { | |
2034 | DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", | |
2035 | PTR_ERR(enc)); | |
2036 | return enc == 0 ? -EINVAL : PTR_ERR(enc); | |
2037 | } | |
2038 | ||
2039 | dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; | |
2040 | ++dpu_enc->num_phys_encs; | |
2041 | } | |
2042 | ||
86b89080 JS |
2043 | if (params->split_role == ENC_ROLE_SLAVE) |
2044 | dpu_enc->cur_slave = enc; | |
2045 | else | |
2046 | dpu_enc->cur_master = enc; | |
2047 | ||
25fdd593 JS |
2048 | return 0; |
2049 | } | |
2050 | ||
2051 | static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = { | |
2052 | .handle_vblank_virt = dpu_encoder_vblank_callback, | |
2053 | .handle_underrun_virt = dpu_encoder_underrun_callback, | |
2054 | .handle_frame_done = dpu_encoder_frame_done_callback, | |
2055 | }; | |
2056 | ||
2057 | static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, | |
2058 | struct dpu_kms *dpu_kms, | |
48a8ef72 | 2059 | struct msm_display_info *disp_info) |
25fdd593 JS |
2060 | { |
2061 | int ret = 0; | |
2062 | int i = 0; | |
2063 | enum dpu_intf_type intf_type; | |
2064 | struct dpu_enc_phys_init_params phys_params; | |
2065 | ||
2066 | if (!dpu_enc || !dpu_kms) { | |
2067 | DPU_ERROR("invalid arg(s), enc %d kms %d\n", | |
2068 | dpu_enc != 0, dpu_kms != 0); | |
2069 | return -EINVAL; | |
2070 | } | |
2071 | ||
9027b871 SP |
2072 | dpu_enc->cur_master = NULL; |
2073 | ||
25fdd593 JS |
2074 | memset(&phys_params, 0, sizeof(phys_params)); |
2075 | phys_params.dpu_kms = dpu_kms; | |
2076 | phys_params.parent = &dpu_enc->base; | |
2077 | phys_params.parent_ops = &dpu_encoder_parent_ops; | |
2078 | phys_params.enc_spinlock = &dpu_enc->enc_spinlock; | |
2079 | ||
2080 | DPU_DEBUG("\n"); | |
2081 | ||
48a8ef72 | 2082 | switch (disp_info->intf_type) { |
1e53ac92 | 2083 | case DRM_MODE_ENCODER_DSI: |
25fdd593 | 2084 | intf_type = INTF_DSI; |
48a8ef72 JS |
2085 | break; |
2086 | default: | |
25fdd593 JS |
2087 | DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n"); |
2088 | return -EINVAL; | |
2089 | } | |
2090 | ||
2091 | WARN_ON(disp_info->num_of_h_tiles < 1); | |
2092 | ||
25fdd593 JS |
2093 | DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); |
2094 | ||
2095 | if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) || | |
2096 | (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE)) | |
2097 | dpu_enc->idle_pc_supported = | |
2098 | dpu_kms->catalog->caps->has_idle_pc; | |
2099 | ||
2100 | mutex_lock(&dpu_enc->enc_lock); | |
2101 | for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { | |
2102 | /* | |
2103 | * Left-most tile is at index 0, content is controller id | |
2104 | * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right | |
2105 | * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right | |
2106 | */ | |
2107 | u32 controller_id = disp_info->h_tile_instance[i]; | |
2108 | ||
2109 | if (disp_info->num_of_h_tiles > 1) { | |
2110 | if (i == 0) | |
2111 | phys_params.split_role = ENC_ROLE_MASTER; | |
2112 | else | |
2113 | phys_params.split_role = ENC_ROLE_SLAVE; | |
2114 | } else { | |
2115 | phys_params.split_role = ENC_ROLE_SOLO; | |
2116 | } | |
2117 | ||
2118 | DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n", | |
2119 | i, controller_id, phys_params.split_role); | |
2120 | ||
2121 | phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog, | |
2122 | intf_type, | |
2123 | controller_id); | |
2124 | if (phys_params.intf_idx == INTF_MAX) { | |
2125 | DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n", | |
2126 | intf_type, controller_id); | |
2127 | ret = -EINVAL; | |
2128 | } | |
2129 | ||
2130 | if (!ret) { | |
2131 | ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities, | |
2132 | dpu_enc, | |
2133 | &phys_params); | |
2134 | if (ret) | |
2135 | DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); | |
2136 | } | |
2137 | } | |
2138 | ||
2139 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
2140 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
2141 | ||
2142 | if (phys) { | |
2143 | atomic_set(&phys->vsync_cnt, 0); | |
2144 | atomic_set(&phys->underrun_cnt, 0); | |
2145 | } | |
2146 | } | |
2147 | mutex_unlock(&dpu_enc->enc_lock); | |
2148 | ||
2149 | return ret; | |
2150 | } | |
2151 | ||
2152 | static void dpu_encoder_frame_done_timeout(struct timer_list *t) | |
2153 | { | |
2154 | struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, | |
2155 | frame_done_timer); | |
2156 | struct drm_encoder *drm_enc = &dpu_enc->base; | |
2157 | struct msm_drm_private *priv; | |
2158 | u32 event; | |
2159 | ||
2160 | if (!drm_enc->dev || !drm_enc->dev->dev_private) { | |
2161 | DPU_ERROR("invalid parameters\n"); | |
2162 | return; | |
2163 | } | |
2164 | priv = drm_enc->dev->dev_private; | |
2165 | ||
2166 | if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) { | |
2167 | DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", | |
2168 | DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); | |
2169 | return; | |
2170 | } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) { | |
2171 | DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); | |
2172 | return; | |
2173 | } | |
2174 | ||
2175 | DPU_ERROR_ENC(dpu_enc, "frame done timeout\n"); | |
2176 | ||
2177 | event = DPU_ENCODER_FRAME_EVENT_ERROR; | |
2178 | trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); | |
2179 | dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event); | |
2180 | } | |
2181 | ||
2182 | static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { | |
2183 | .mode_set = dpu_encoder_virt_mode_set, | |
2184 | .disable = dpu_encoder_virt_disable, | |
2185 | .enable = dpu_kms_encoder_enable, | |
2186 | .atomic_check = dpu_encoder_virt_atomic_check, | |
2187 | ||
2188 | /* This is called by dpu_kms_encoder_enable */ | |
2189 | .commit = dpu_encoder_virt_enable, | |
2190 | }; | |
2191 | ||
2192 | static const struct drm_encoder_funcs dpu_encoder_funcs = { | |
2193 | .destroy = dpu_encoder_destroy, | |
2194 | .late_register = dpu_encoder_late_register, | |
2195 | .early_unregister = dpu_encoder_early_unregister, | |
2196 | }; | |
2197 | ||
2198 | int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, | |
2199 | struct msm_display_info *disp_info) | |
2200 | { | |
2201 | struct msm_drm_private *priv = dev->dev_private; | |
2202 | struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); | |
2203 | struct drm_encoder *drm_enc = NULL; | |
2204 | struct dpu_encoder_virt *dpu_enc = NULL; | |
25fdd593 JS |
2205 | int ret = 0; |
2206 | ||
2207 | dpu_enc = to_dpu_encoder_virt(enc); | |
2208 | ||
2209 | mutex_init(&dpu_enc->enc_lock); | |
48a8ef72 | 2210 | ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); |
25fdd593 JS |
2211 | if (ret) |
2212 | goto fail; | |
2213 | ||
25fdd593 JS |
2214 | spin_lock_init(&dpu_enc->enc_spinlock); |
2215 | ||
2216 | atomic_set(&dpu_enc->frame_done_timeout, 0); | |
2217 | timer_setup(&dpu_enc->frame_done_timer, | |
2218 | dpu_encoder_frame_done_timeout, 0); | |
2219 | ||
1e53ac92 | 2220 | if (disp_info->intf_type == DRM_MODE_ENCODER_DSI) |
25fdd593 JS |
2221 | timer_setup(&dpu_enc->vsync_event_timer, |
2222 | dpu_encoder_vsync_event_handler, | |
2223 | 0); | |
2224 | ||
2225 | ||
2226 | mutex_init(&dpu_enc->rc_lock); | |
2227 | kthread_init_delayed_work(&dpu_enc->delayed_off_work, | |
2228 | dpu_encoder_off_work); | |
2229 | dpu_enc->idle_timeout = IDLE_TIMEOUT; | |
2230 | ||
2231 | kthread_init_work(&dpu_enc->vsync_event_work, | |
2232 | dpu_encoder_vsync_event_work_handler); | |
2233 | ||
2234 | memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info)); | |
2235 | ||
2236 | DPU_DEBUG_ENC(dpu_enc, "created\n"); | |
2237 | ||
2238 | return ret; | |
2239 | ||
2240 | fail: | |
2241 | DPU_ERROR("failed to create encoder\n"); | |
2242 | if (drm_enc) | |
2243 | dpu_encoder_destroy(drm_enc); | |
2244 | ||
2245 | return ret; | |
2246 | ||
2247 | ||
2248 | } | |
2249 | ||
2250 | struct drm_encoder *dpu_encoder_init(struct drm_device *dev, | |
2251 | int drm_enc_mode) | |
2252 | { | |
2253 | struct dpu_encoder_virt *dpu_enc = NULL; | |
2254 | int rc = 0; | |
2255 | ||
2256 | dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); | |
2257 | if (!dpu_enc) | |
2258 | return ERR_PTR(ENOMEM); | |
2259 | ||
2260 | rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, | |
2261 | drm_enc_mode, NULL); | |
2262 | if (rc) { | |
2263 | devm_kfree(dev->dev, dpu_enc); | |
2264 | return ERR_PTR(rc); | |
2265 | } | |
2266 | ||
2267 | drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); | |
2268 | ||
2269 | return &dpu_enc->base; | |
2270 | } | |
2271 | ||
2272 | int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, | |
2273 | enum msm_event_wait event) | |
2274 | { | |
2275 | int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL; | |
2276 | struct dpu_encoder_virt *dpu_enc = NULL; | |
2277 | int i, ret = 0; | |
2278 | ||
2279 | if (!drm_enc) { | |
2280 | DPU_ERROR("invalid encoder\n"); | |
2281 | return -EINVAL; | |
2282 | } | |
2283 | dpu_enc = to_dpu_encoder_virt(drm_enc); | |
2284 | DPU_DEBUG_ENC(dpu_enc, "\n"); | |
2285 | ||
2286 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
2287 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
77a209cd JS |
2288 | if (!phys) |
2289 | continue; | |
25fdd593 JS |
2290 | |
2291 | switch (event) { | |
2292 | case MSM_ENC_COMMIT_DONE: | |
2293 | fn_wait = phys->ops.wait_for_commit_done; | |
2294 | break; | |
2295 | case MSM_ENC_TX_COMPLETE: | |
2296 | fn_wait = phys->ops.wait_for_tx_complete; | |
2297 | break; | |
2298 | case MSM_ENC_VBLANK: | |
2299 | fn_wait = phys->ops.wait_for_vblank; | |
2300 | break; | |
2301 | default: | |
2302 | DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", | |
2303 | event); | |
2304 | return -EINVAL; | |
2305 | }; | |
2306 | ||
77a209cd | 2307 | if (fn_wait) { |
25fdd593 JS |
2308 | DPU_ATRACE_BEGIN("wait_for_completion_event"); |
2309 | ret = fn_wait(phys); | |
2310 | DPU_ATRACE_END("wait_for_completion_event"); | |
2311 | if (ret) | |
2312 | return ret; | |
2313 | } | |
2314 | } | |
2315 | ||
2316 | return ret; | |
2317 | } | |
2318 | ||
2319 | enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) | |
2320 | { | |
2321 | struct dpu_encoder_virt *dpu_enc = NULL; | |
2322 | int i; | |
2323 | ||
2324 | if (!encoder) { | |
2325 | DPU_ERROR("invalid encoder\n"); | |
2326 | return INTF_MODE_NONE; | |
2327 | } | |
2328 | dpu_enc = to_dpu_encoder_virt(encoder); | |
2329 | ||
2330 | if (dpu_enc->cur_master) | |
2331 | return dpu_enc->cur_master->intf_mode; | |
2332 | ||
2333 | for (i = 0; i < dpu_enc->num_phys_encs; i++) { | |
2334 | struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; | |
2335 | ||
2336 | if (phys) | |
2337 | return phys->intf_mode; | |
2338 | } | |
2339 | ||
2340 | return INTF_MODE_NONE; | |
2341 | } |