Commit | Line | Data |
---|---|---|
97fb5e8d | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0deed25b | 2 | /* |
389b09a1 | 3 | * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. |
0deed25b SV |
4 | */ |
5 | ||
6 | #include "mdp5_kms.h" | |
7 | #include "mdp5_ctl.h" | |
8 | ||
9 | /* | |
10 | * CTL - MDP Control Pool Manager | |
11 | * | |
b96b3a06 | 12 | * Controls are shared between all display interfaces. |
0deed25b SV |
13 | * |
14 | * They are intended to be used for data path configuration. | |
15 | * The top level register programming describes the complete data path for | |
16 | * a specific data path ID - REG_MDP5_CTL_*(<id>, ...) | |
17 | * | |
18 | * Hardware capabilities determine the number of concurrent data paths | |
19 | * | |
20 | * In certain use cases (high-resolution dual pipe), one single CTL can be | |
21 | * shared across multiple CRTCs. | |
0deed25b SV |
22 | */ |
23 | ||
b96b3a06 HL |
24 | #define CTL_STAT_BUSY 0x1 |
25 | #define CTL_STAT_BOOKED 0x2 | |
26 | ||
0deed25b | 27 | struct mdp5_ctl { |
32c0e3e2 RC |
28 | struct mdp5_ctl_manager *ctlm; |
29 | ||
0deed25b SV |
30 | u32 id; |
31 | ||
b96b3a06 HL |
32 | /* CTL status bitmask */ |
33 | u32 status; | |
0deed25b | 34 | |
eda5dbe5 | 35 | bool encoder_enabled; |
f9cb8d8d RC |
36 | |
37 | /* pending flush_mask bits */ | |
38 | u32 flush_mask; | |
0deed25b SV |
39 | |
40 | /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ | |
41 | spinlock_t hw_lock; | |
42 | u32 reg_offset; | |
43 | ||
389b09a1 SV |
44 | /* when do CTL registers need to be flushed? (mask of trigger bits) */ |
45 | u32 pending_ctl_trigger; | |
0deed25b SV |
46 | |
47 | bool cursor_on; | |
b96b3a06 HL |
48 | |
49 | /* True if the current CTL has FLUSH bits pending for single FLUSH. */ | |
50 | bool flush_pending; | |
51 | ||
52 | struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ | |
0deed25b SV |
53 | }; |
54 | ||
55 | struct mdp5_ctl_manager { | |
56 | struct drm_device *dev; | |
57 | ||
58 | /* number of CTL / Layer Mixers in this hw config: */ | |
59 | u32 nlm; | |
60 | u32 nctl; | |
61 | ||
389b09a1 SV |
62 | /* to filter out non-present bits in the current hardware config */ |
63 | u32 flush_hw_mask; | |
64 | ||
b96b3a06 HL |
65 | /* status for single FLUSH */ |
66 | bool single_flush_supported; | |
67 | u32 single_flush_pending_mask; | |
68 | ||
0deed25b SV |
69 | /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ |
70 | spinlock_t pool_lock; | |
71 | struct mdp5_ctl ctls[MAX_CTL]; | |
72 | }; | |
73 | ||
0deed25b SV |
74 | static inline |
75 | struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr) | |
76 | { | |
77 | struct msm_drm_private *priv = ctl_mgr->dev->dev_private; | |
78 | ||
79 | return to_mdp5_kms(to_mdp_kms(priv->kms)); | |
80 | } | |
81 | ||
82 | static inline | |
83 | void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data) | |
84 | { | |
32c0e3e2 | 85 | struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); |
0deed25b SV |
86 | |
87 | (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ | |
88 | mdp5_write(mdp5_kms, reg, data); | |
89 | } | |
90 | ||
91 | static inline | |
92 | u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) | |
93 | { | |
32c0e3e2 | 94 | struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); |
0deed25b SV |
95 | |
96 | (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ | |
97 | return mdp5_read(mdp5_kms, reg); | |
98 | } | |
99 | ||
d145dd78 SV |
100 | static void set_display_intf(struct mdp5_kms *mdp5_kms, |
101 | struct mdp5_interface *intf) | |
102 | { | |
103 | unsigned long flags; | |
104 | u32 intf_sel; | |
105 | ||
106 | spin_lock_irqsave(&mdp5_kms->resource_lock, flags); | |
7b59c7e4 | 107 | intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); |
d145dd78 SV |
108 | |
109 | switch (intf->num) { | |
110 | case 0: | |
7b59c7e4 AT |
111 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK; |
112 | intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type); | |
d145dd78 SV |
113 | break; |
114 | case 1: | |
7b59c7e4 AT |
115 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK; |
116 | intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type); | |
d145dd78 SV |
117 | break; |
118 | case 2: | |
7b59c7e4 AT |
119 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK; |
120 | intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type); | |
d145dd78 SV |
121 | break; |
122 | case 3: | |
7b59c7e4 AT |
123 | intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK; |
124 | intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type); | |
d145dd78 SV |
125 | break; |
126 | default: | |
127 | BUG(); | |
128 | break; | |
129 | } | |
130 | ||
7b59c7e4 | 131 | mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); |
d145dd78 SV |
132 | spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); |
133 | } | |
0deed25b | 134 | |
3a882143 | 135 | static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) |
0deed25b | 136 | { |
0deed25b | 137 | unsigned long flags; |
3a882143 | 138 | struct mdp5_interface *intf = pipeline->intf; |
d145dd78 SV |
139 | u32 ctl_op = 0; |
140 | ||
141 | if (!mdp5_cfg_intf_is_virtual(intf->type)) | |
142 | ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num); | |
143 | ||
144 | switch (intf->type) { | |
145 | case INTF_DSI: | |
146 | if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) | |
147 | ctl_op |= MDP5_CTL_OP_CMD_MODE; | |
148 | break; | |
149 | ||
150 | case INTF_WB: | |
151 | if (intf->mode == MDP5_INTF_WB_MODE_LINE) | |
152 | ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE); | |
153 | break; | |
154 | ||
155 | default: | |
156 | break; | |
157 | } | |
0deed25b | 158 | |
3a882143 AT |
159 | if (pipeline->r_mixer) |
160 | ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE | | |
161 | MDP5_CTL_OP_PACK_3D(1); | |
162 | ||
0deed25b | 163 | spin_lock_irqsave(&ctl->hw_lock, flags); |
d145dd78 | 164 | ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); |
0deed25b | 165 | spin_unlock_irqrestore(&ctl->hw_lock, flags); |
d145dd78 SV |
166 | } |
167 | ||
f316b25a | 168 | int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) |
d145dd78 | 169 | { |
f9cb8d8d | 170 | struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); |
f316b25a | 171 | struct mdp5_interface *intf = pipeline->intf; |
389b09a1 | 172 | |
d145dd78 SV |
173 | /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ |
174 | if (!mdp5_cfg_intf_is_virtual(intf->type)) | |
175 | set_display_intf(mdp5_kms, intf); | |
176 | ||
3a882143 | 177 | set_ctl_op(ctl, pipeline); |
0deed25b SV |
178 | |
179 | return 0; | |
180 | } | |
181 | ||
f316b25a AT |
182 | static bool start_signal_needed(struct mdp5_ctl *ctl, |
183 | struct mdp5_pipeline *pipeline) | |
389b09a1 | 184 | { |
f316b25a AT |
185 | struct mdp5_interface *intf = pipeline->intf; |
186 | ||
f9cb8d8d | 187 | if (!ctl->encoder_enabled) |
389b09a1 SV |
188 | return false; |
189 | ||
f316b25a | 190 | switch (intf->type) { |
389b09a1 SV |
191 | case INTF_WB: |
192 | return true; | |
193 | case INTF_DSI: | |
f316b25a | 194 | return intf->mode == MDP5_INTF_DSI_MODE_COMMAND; |
389b09a1 SV |
195 | default: |
196 | return false; | |
197 | } | |
198 | } | |
199 | ||
200 | /* | |
201 | * send_start_signal() - Overlay Processor Start Signal | |
202 | * | |
203 | * For a given control operation (display pipeline), a START signal needs to be | |
204 | * executed in order to kick off operation and activate all layers. | |
205 | * e.g.: DSI command mode, Writeback | |
206 | */ | |
207 | static void send_start_signal(struct mdp5_ctl *ctl) | |
208 | { | |
209 | unsigned long flags; | |
210 | ||
211 | spin_lock_irqsave(&ctl->hw_lock, flags); | |
212 | ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); | |
213 | spin_unlock_irqrestore(&ctl->hw_lock, flags); | |
214 | } | |
215 | ||
389b09a1 SV |
216 | /** |
217 | * mdp5_ctl_set_encoder_state() - set the encoder state | |
218 | * | |
219 | * @enable: true, when encoder is ready for data streaming; false, otherwise. | |
220 | * | |
221 | * Note: | |
222 | * This encoder state is needed to trigger START signal (data path kickoff). | |
223 | */ | |
f316b25a AT |
224 | int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, |
225 | struct mdp5_pipeline *pipeline, | |
226 | bool enabled) | |
389b09a1 | 227 | { |
f316b25a AT |
228 | struct mdp5_interface *intf = pipeline->intf; |
229 | ||
389b09a1 SV |
230 | if (WARN_ON(!ctl)) |
231 | return -EINVAL; | |
232 | ||
eda5dbe5 | 233 | ctl->encoder_enabled = enabled; |
f316b25a | 234 | DBG("intf_%d: %s", intf->num, enabled ? "on" : "off"); |
389b09a1 | 235 | |
f316b25a | 236 | if (start_signal_needed(ctl, pipeline)) { |
389b09a1 | 237 | send_start_signal(ctl); |
389b09a1 SV |
238 | } |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | /* | |
244 | * Note: | |
245 | * CTL registers need to be flushed after calling this function | |
246 | * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) | |
247 | */ | |
f316b25a AT |
248 | int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, |
249 | int cursor_id, bool enable) | |
0deed25b | 250 | { |
32c0e3e2 | 251 | struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; |
0deed25b SV |
252 | unsigned long flags; |
253 | u32 blend_cfg; | |
f316b25a | 254 | struct mdp5_hw_mixer *mixer = pipeline->mixer; |
0deed25b | 255 | |
c044e86f | 256 | if (WARN_ON(!mixer)) { |
6a41da17 | 257 | DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM", |
adfc0e63 | 258 | ctl->id); |
0deed25b SV |
259 | return -EINVAL; |
260 | } | |
261 | ||
b7621b2a | 262 | if (pipeline->r_mixer) { |
6a41da17 | 263 | DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration"); |
b7621b2a AT |
264 | return -EINVAL; |
265 | } | |
266 | ||
0deed25b SV |
267 | spin_lock_irqsave(&ctl->hw_lock, flags); |
268 | ||
adfc0e63 | 269 | blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); |
0deed25b SV |
270 | |
271 | if (enable) | |
272 | blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; | |
273 | else | |
274 | blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; | |
275 | ||
adfc0e63 | 276 | ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); |
12987781 | 277 | ctl->cursor_on = enable; |
0deed25b SV |
278 | |
279 | spin_unlock_irqrestore(&ctl->hw_lock, flags); | |
280 | ||
389b09a1 | 281 | ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); |
0deed25b SV |
282 | |
283 | return 0; | |
284 | } | |
285 | ||
12987781 | 286 | static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, |
287 | enum mdp_mixer_stage_id stage) | |
288 | { | |
289 | switch (pipe) { | |
290 | case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); | |
291 | case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); | |
292 | case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); | |
293 | case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); | |
294 | case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); | |
295 | case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); | |
296 | case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); | |
297 | case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); | |
298 | case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); | |
299 | case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); | |
5798c8e0 AT |
300 | case SSPP_CURSOR0: |
301 | case SSPP_CURSOR1: | |
12987781 | 302 | default: return 0; |
303 | } | |
304 | } | |
305 | ||
306 | static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, | |
307 | enum mdp_mixer_stage_id stage) | |
308 | { | |
5798c8e0 | 309 | if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1)) |
12987781 | 310 | return 0; |
311 | ||
312 | switch (pipe) { | |
313 | case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3; | |
314 | case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3; | |
315 | case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3; | |
316 | case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3; | |
317 | case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3; | |
318 | case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3; | |
319 | case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3; | |
320 | case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; | |
321 | case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; | |
322 | case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; | |
5798c8e0 AT |
323 | case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage); |
324 | case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage); | |
12987781 | 325 | default: return 0; |
326 | } | |
327 | } | |
328 | ||
0d1d3e44 AT |
329 | static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl) |
330 | { | |
331 | unsigned long flags; | |
332 | struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; | |
333 | int i; | |
334 | ||
335 | spin_lock_irqsave(&ctl->hw_lock, flags); | |
336 | ||
337 | for (i = 0; i < ctl_mgr->nlm; i++) { | |
338 | ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0); | |
339 | ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0); | |
340 | } | |
341 | ||
342 | spin_unlock_irqrestore(&ctl->hw_lock, flags); | |
343 | } | |
344 | ||
b7621b2a AT |
345 | #define PIPE_LEFT 0 |
346 | #define PIPE_RIGHT 1 | |
f316b25a | 347 | int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, |
b7621b2a AT |
348 | enum mdp5_pipe stage[][MAX_PIPE_STAGE], |
349 | enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], | |
350 | u32 stage_cnt, u32 ctl_blend_op_flags) | |
0deed25b | 351 | { |
f316b25a | 352 | struct mdp5_hw_mixer *mixer = pipeline->mixer; |
b7621b2a | 353 | struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; |
0deed25b | 354 | unsigned long flags; |
12987781 | 355 | u32 blend_cfg = 0, blend_ext_cfg = 0; |
b7621b2a | 356 | u32 r_blend_cfg = 0, r_blend_ext_cfg = 0; |
12987781 | 357 | int i, start_stage; |
358 | ||
0d1d3e44 AT |
359 | mdp5_ctl_reset_blend_regs(ctl); |
360 | ||
12987781 | 361 | if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { |
362 | start_stage = STAGE0; | |
363 | blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; | |
b7621b2a AT |
364 | if (r_mixer) |
365 | r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; | |
12987781 | 366 | } else { |
367 | start_stage = STAGE_BASE; | |
368 | } | |
0deed25b | 369 | |
5798c8e0 | 370 | for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { |
b7621b2a | 371 | blend_cfg |= |
bf8dc0a0 AT |
372 | mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) | |
373 | mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i); | |
b7621b2a | 374 | blend_ext_cfg |= |
bf8dc0a0 AT |
375 | mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) | |
376 | mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i); | |
b7621b2a AT |
377 | if (r_mixer) { |
378 | r_blend_cfg |= | |
bf8dc0a0 AT |
379 | mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) | |
380 | mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i); | |
b7621b2a | 381 | r_blend_ext_cfg |= |
bf8dc0a0 AT |
382 | mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) | |
383 | mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i); | |
b7621b2a | 384 | } |
12987781 | 385 | } |
386 | ||
387 | spin_lock_irqsave(&ctl->hw_lock, flags); | |
0deed25b SV |
388 | if (ctl->cursor_on) |
389 | blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; | |
0deed25b | 390 | |
adfc0e63 AT |
391 | ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); |
392 | ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), | |
393 | blend_ext_cfg); | |
b7621b2a AT |
394 | if (r_mixer) { |
395 | ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm), | |
396 | r_blend_cfg); | |
397 | ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm), | |
398 | r_blend_ext_cfg); | |
399 | } | |
0deed25b SV |
400 | spin_unlock_irqrestore(&ctl->hw_lock, flags); |
401 | ||
adfc0e63 | 402 | ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); |
b7621b2a AT |
403 | if (r_mixer) |
404 | ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm); | |
389b09a1 | 405 | |
adfc0e63 | 406 | DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm, |
12987781 | 407 | blend_cfg, blend_ext_cfg); |
b7621b2a AT |
408 | if (r_mixer) |
409 | DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", | |
410 | r_mixer->lm, r_blend_cfg, r_blend_ext_cfg); | |
12987781 | 411 | |
0deed25b SV |
412 | return 0; |
413 | } | |
414 | ||
389b09a1 SV |
415 | u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf) |
416 | { | |
389b09a1 SV |
417 | if (intf->type == INTF_WB) |
418 | return MDP5_CTL_FLUSH_WB; | |
419 | ||
420 | switch (intf->num) { | |
421 | case 0: return MDP5_CTL_FLUSH_TIMING_0; | |
422 | case 1: return MDP5_CTL_FLUSH_TIMING_1; | |
423 | case 2: return MDP5_CTL_FLUSH_TIMING_2; | |
424 | case 3: return MDP5_CTL_FLUSH_TIMING_3; | |
425 | default: return 0; | |
426 | } | |
427 | } | |
428 | ||
429 | u32 mdp_ctl_flush_mask_cursor(int cursor_id) | |
430 | { | |
389b09a1 SV |
431 | switch (cursor_id) { |
432 | case 0: return MDP5_CTL_FLUSH_CURSOR_0; | |
433 | case 1: return MDP5_CTL_FLUSH_CURSOR_1; | |
434 | default: return 0; | |
435 | } | |
436 | } | |
437 | ||
438 | u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) | |
439 | { | |
440 | switch (pipe) { | |
441 | case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; | |
442 | case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; | |
443 | case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; | |
444 | case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; | |
445 | case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; | |
446 | case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; | |
447 | case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; | |
448 | case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; | |
449 | case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; | |
450 | case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; | |
5798c8e0 AT |
451 | case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0; |
452 | case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1; | |
389b09a1 SV |
453 | default: return 0; |
454 | } | |
455 | } | |
456 | ||
457 | u32 mdp_ctl_flush_mask_lm(int lm) | |
458 | { | |
459 | switch (lm) { | |
460 | case 0: return MDP5_CTL_FLUSH_LM0; | |
461 | case 1: return MDP5_CTL_FLUSH_LM1; | |
462 | case 2: return MDP5_CTL_FLUSH_LM2; | |
583c13fd RC |
463 | case 3: return MDP5_CTL_FLUSH_LM3; |
464 | case 4: return MDP5_CTL_FLUSH_LM4; | |
389b09a1 SV |
465 | case 5: return MDP5_CTL_FLUSH_LM5; |
466 | default: return 0; | |
467 | } | |
468 | } | |
469 | ||
f316b25a AT |
470 | static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, |
471 | u32 flush_mask) | |
389b09a1 SV |
472 | { |
473 | struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; | |
474 | u32 sw_mask = 0; | |
475 | #define BIT_NEEDS_SW_FIX(bit) \ | |
476 | (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit)) | |
477 | ||
478 | /* for some targets, cursor bit is the same as LM bit */ | |
479 | if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) | |
f316b25a | 480 | sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm); |
389b09a1 SV |
481 | |
482 | return sw_mask; | |
483 | } | |
484 | ||
b96b3a06 HL |
485 | static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, |
486 | u32 *flush_id) | |
487 | { | |
488 | struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; | |
489 | ||
490 | if (ctl->pair) { | |
491 | DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); | |
492 | ctl->flush_pending = true; | |
493 | ctl_mgr->single_flush_pending_mask |= (*flush_mask); | |
494 | *flush_mask = 0; | |
495 | ||
496 | if (ctl->pair->flush_pending) { | |
497 | *flush_id = min_t(u32, ctl->id, ctl->pair->id); | |
498 | *flush_mask = ctl_mgr->single_flush_pending_mask; | |
499 | ||
500 | ctl->flush_pending = false; | |
501 | ctl->pair->flush_pending = false; | |
502 | ctl_mgr->single_flush_pending_mask = 0; | |
503 | ||
504 | DBG("Single FLUSH mask %x,ID %d", *flush_mask, | |
505 | *flush_id); | |
506 | } | |
507 | } | |
508 | } | |
509 | ||
389b09a1 SV |
510 | /** |
511 | * mdp5_ctl_commit() - Register Flush | |
512 | * | |
513 | * The flush register is used to indicate several registers are all | |
514 | * programmed, and are safe to update to the back copy of the double | |
515 | * buffered registers. | |
516 | * | |
517 | * Some registers FLUSH bits are shared when the hardware does not have | |
518 | * dedicated bits for them; handling these is the job of fix_sw_flush(). | |
519 | * | |
520 | * CTL registers need to be flushed in some circumstances; if that is the | |
521 | * case, some trigger bits will be present in both flush mask and | |
522 | * ctl->pending_ctl_trigger. | |
0a5c9aad HL |
523 | * |
524 | * Return H/W flushed bit mask. | |
389b09a1 | 525 | */ |
f316b25a AT |
526 | u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, |
527 | struct mdp5_pipeline *pipeline, | |
f9cb8d8d | 528 | u32 flush_mask, bool start) |
0deed25b | 529 | { |
32c0e3e2 | 530 | struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; |
0deed25b | 531 | unsigned long flags; |
b96b3a06 HL |
532 | u32 flush_id = ctl->id; |
533 | u32 curr_ctl_flush_mask; | |
0deed25b | 534 | |
f9cb8d8d | 535 | VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger); |
0deed25b | 536 | |
389b09a1 SV |
537 | if (ctl->pending_ctl_trigger & flush_mask) { |
538 | flush_mask |= MDP5_CTL_FLUSH_CTL; | |
539 | ctl->pending_ctl_trigger = 0; | |
0deed25b SV |
540 | } |
541 | ||
f316b25a | 542 | flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); |
0deed25b | 543 | |
389b09a1 | 544 | flush_mask &= ctl_mgr->flush_hw_mask; |
0deed25b | 545 | |
b96b3a06 HL |
546 | curr_ctl_flush_mask = flush_mask; |
547 | ||
548 | fix_for_single_flush(ctl, &flush_mask, &flush_id); | |
549 | ||
f9cb8d8d RC |
550 | if (!start) { |
551 | ctl->flush_mask |= flush_mask; | |
552 | return curr_ctl_flush_mask; | |
553 | } else { | |
554 | flush_mask |= ctl->flush_mask; | |
555 | ctl->flush_mask = 0; | |
556 | } | |
557 | ||
389b09a1 SV |
558 | if (flush_mask) { |
559 | spin_lock_irqsave(&ctl->hw_lock, flags); | |
b96b3a06 | 560 | ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); |
389b09a1 SV |
561 | spin_unlock_irqrestore(&ctl->hw_lock, flags); |
562 | } | |
563 | ||
f316b25a | 564 | if (start_signal_needed(ctl, pipeline)) { |
389b09a1 | 565 | send_start_signal(ctl); |
389b09a1 SV |
566 | } |
567 | ||
b96b3a06 | 568 | return curr_ctl_flush_mask; |
0a5c9aad HL |
569 | } |
570 | ||
571 | u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) | |
572 | { | |
573 | return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); | |
0deed25b SV |
574 | } |
575 | ||
389b09a1 SV |
576 | int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) |
577 | { | |
578 | return WARN_ON(!ctl) ? -EINVAL : ctl->id; | |
579 | } | |
580 | ||
b96b3a06 HL |
581 | /* |
582 | * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH | |
583 | */ | |
584 | int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) | |
585 | { | |
586 | struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; | |
587 | struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); | |
588 | ||
589 | /* do nothing silently if hw doesn't support */ | |
590 | if (!ctl_mgr->single_flush_supported) | |
591 | return 0; | |
592 | ||
593 | if (!enable) { | |
594 | ctlx->pair = NULL; | |
595 | ctly->pair = NULL; | |
7b59c7e4 | 596 | mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0); |
b96b3a06 HL |
597 | return 0; |
598 | } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { | |
6a41da17 | 599 | DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n"); |
b96b3a06 HL |
600 | return -EINVAL; |
601 | } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { | |
6a41da17 | 602 | DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); |
b96b3a06 HL |
603 | return -EINVAL; |
604 | } | |
605 | ||
606 | ctlx->pair = ctly; | |
607 | ctly->pair = ctlx; | |
608 | ||
7b59c7e4 AT |
609 | mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, |
610 | MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); | |
b96b3a06 HL |
611 | |
612 | return 0; | |
613 | } | |
614 | ||
0deed25b | 615 | /* |
c71716b1 | 616 | * mdp5_ctl_request() - CTL allocation |
0deed25b | 617 | * |
b96b3a06 HL |
618 | * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. |
619 | * If no CTL is available in preferred category, allocate from the other one. | |
620 | * | |
621 | * @return fail if no CTL is available. | |
0deed25b | 622 | */ |
42238da8 | 623 | struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, |
c71716b1 | 624 | int intf_num) |
0deed25b | 625 | { |
0deed25b | 626 | struct mdp5_ctl *ctl = NULL; |
b96b3a06 HL |
627 | const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED; |
628 | u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0; | |
0deed25b SV |
629 | unsigned long flags; |
630 | int c; | |
631 | ||
632 | spin_lock_irqsave(&ctl_mgr->pool_lock, flags); | |
633 | ||
b96b3a06 | 634 | /* search the preferred */ |
0deed25b | 635 | for (c = 0; c < ctl_mgr->nctl; c++) |
b96b3a06 HL |
636 | if ((ctl_mgr->ctls[c].status & checkm) == match) |
637 | goto found; | |
0deed25b | 638 | |
b96b3a06 HL |
639 | dev_warn(ctl_mgr->dev->dev, |
640 | "fall back to the other CTL category for INTF %d!\n", intf_num); | |
641 | ||
642 | match ^= CTL_STAT_BOOKED; | |
643 | for (c = 0; c < ctl_mgr->nctl; c++) | |
644 | if ((ctl_mgr->ctls[c].status & checkm) == match) | |
645 | goto found; | |
0deed25b | 646 | |
6a41da17 | 647 | DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); |
b96b3a06 HL |
648 | goto unlock; |
649 | ||
650 | found: | |
0deed25b | 651 | ctl = &ctl_mgr->ctls[c]; |
b96b3a06 | 652 | ctl->status |= CTL_STAT_BUSY; |
389b09a1 | 653 | ctl->pending_ctl_trigger = 0; |
0deed25b SV |
654 | DBG("CTL %d allocated", ctl->id); |
655 | ||
656 | unlock: | |
657 | spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); | |
658 | return ctl; | |
659 | } | |
660 | ||
42238da8 | 661 | void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr) |
0deed25b | 662 | { |
0deed25b SV |
663 | unsigned long flags; |
664 | int c; | |
665 | ||
666 | for (c = 0; c < ctl_mgr->nctl; c++) { | |
667 | struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; | |
668 | ||
669 | spin_lock_irqsave(&ctl->hw_lock, flags); | |
670 | ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); | |
671 | spin_unlock_irqrestore(&ctl->hw_lock, flags); | |
672 | } | |
673 | } | |
674 | ||
42238da8 | 675 | void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) |
0deed25b | 676 | { |
0deed25b SV |
677 | kfree(ctl_mgr); |
678 | } | |
679 | ||
42238da8 | 680 | struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, |
b96b3a06 | 681 | void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) |
0deed25b | 682 | { |
32c0e3e2 | 683 | struct mdp5_ctl_manager *ctl_mgr; |
b96b3a06 HL |
684 | const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); |
685 | int rev = mdp5_cfg_get_hw_rev(cfg_hnd); | |
61b734cb | 686 | unsigned dsi_cnt = 0; |
389b09a1 | 687 | const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; |
0deed25b SV |
688 | unsigned long flags; |
689 | int c, ret; | |
690 | ||
32c0e3e2 RC |
691 | ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); |
692 | if (!ctl_mgr) { | |
6a41da17 | 693 | DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); |
32c0e3e2 RC |
694 | ret = -ENOMEM; |
695 | goto fail; | |
696 | } | |
697 | ||
c044e86f | 698 | if (WARN_ON(ctl_cfg->count > MAX_CTL)) { |
6a41da17 | 699 | DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n", |
0deed25b SV |
700 | ctl_cfg->count); |
701 | ret = -ENOSPC; | |
702 | goto fail; | |
703 | } | |
704 | ||
705 | /* initialize the CTL manager: */ | |
706 | ctl_mgr->dev = dev; | |
707 | ctl_mgr->nlm = hw_cfg->lm.count; | |
708 | ctl_mgr->nctl = ctl_cfg->count; | |
389b09a1 | 709 | ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask; |
0deed25b SV |
710 | spin_lock_init(&ctl_mgr->pool_lock); |
711 | ||
712 | /* initialize each CTL of the pool: */ | |
713 | spin_lock_irqsave(&ctl_mgr->pool_lock, flags); | |
714 | for (c = 0; c < ctl_mgr->nctl; c++) { | |
715 | struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; | |
716 | ||
717 | if (WARN_ON(!ctl_cfg->base[c])) { | |
6a41da17 | 718 | DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c); |
0deed25b | 719 | ret = -EINVAL; |
b96b3a06 | 720 | spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); |
0deed25b SV |
721 | goto fail; |
722 | } | |
32c0e3e2 | 723 | ctl->ctlm = ctl_mgr; |
0deed25b | 724 | ctl->id = c; |
0deed25b | 725 | ctl->reg_offset = ctl_cfg->base[c]; |
b96b3a06 | 726 | ctl->status = 0; |
0deed25b SV |
727 | spin_lock_init(&ctl->hw_lock); |
728 | } | |
b96b3a06 HL |
729 | |
730 | /* | |
731 | * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI | |
732 | * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when | |
733 | * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. | |
734 | * Single FLUSH is supported from hw rev v3.0. | |
735 | */ | |
61b734cb RC |
736 | for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++) |
737 | if (hw_cfg->intf.connect[c] == INTF_DSI) | |
738 | dsi_cnt++; | |
739 | if ((rev >= 3) && (dsi_cnt > 1)) { | |
b96b3a06 HL |
740 | ctl_mgr->single_flush_supported = true; |
741 | /* Reserve CTL0/1 for INTF1/2 */ | |
742 | ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; | |
743 | ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; | |
744 | } | |
0deed25b SV |
745 | spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); |
746 | DBG("Pool of %d CTLs created.", ctl_mgr->nctl); | |
747 | ||
748 | return ctl_mgr; | |
749 | ||
750 | fail: | |
751 | if (ctl_mgr) | |
752 | mdp5_ctlm_destroy(ctl_mgr); | |
753 | ||
754 | return ERR_PTR(ret); | |
755 | } |