treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[linux-2.6-block.git] / drivers / gpu / drm / msm / disp / mdp5 / mdp5_smp.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
06c0dd96 2/*
bfcdfb0e 3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
06c0dd96
RC
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
06c0dd96
RC
6 */
7
e9eafcb5 8#include <drm/drm_util.h>
06c0dd96
RC
9
10#include "mdp5_kms.h"
11#include "mdp5_smp.h"
12
13
bfcdfb0e
SV
14struct mdp5_smp {
15 struct drm_device *dev;
16
60fb49ca 17 uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
9cc137a3 18
bfcdfb0e
SV
19 int blk_cnt;
20 int blk_size;
0f379b79
AT
21
22 /* register cache */
23 u32 alloc_w[22];
24 u32 alloc_r[22];
25 u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
26 u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
27 u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
bfcdfb0e 28};
06c0dd96 29
bfcdfb0e
SV
30static inline
31struct mdp5_kms *get_kms(struct mdp5_smp *smp)
32{
33 struct msm_drm_private *priv = smp->dev->dev_private;
34
35 return to_mdp5_kms(to_mdp_kms(priv->kms));
36}
37
6fa6acdf 38static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
bfcdfb0e 39{
6fa6acdf
SV
40#define CID_UNUSED 0
41
42 if (WARN_ON(plane >= pipe2nclients(pipe)))
43 return CID_UNUSED;
44
45 /*
46 * Note on SMP clients:
47 * For ViG pipes, fetch Y/Cr/Cb-components clients are always
48 * consecutive, and in that order.
49 *
50 * e.g.:
51 * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
52 * Y plane's client ID is N
53 * Cr plane's client ID is N + 1
54 * Cb plane's client ID is N + 2
55 */
56
57 return mdp5_cfg->smp.clients[pipe] + plane;
bfcdfb0e 58}
06c0dd96 59
49ec5b2e 60/* allocate blocks for the specified request: */
bfcdfb0e 61static int smp_request_block(struct mdp5_smp *smp,
49ec5b2e 62 struct mdp5_smp_state *state,
6fa6acdf 63 u32 cid, int nblks)
06c0dd96 64{
49ec5b2e
RC
65 void *cs = state->client_state[cid];
66 int i, avail, cnt = smp->blk_cnt;
60fb49ca 67 uint8_t reserved;
06c0dd96 68
49ec5b2e
RC
69 /* we shouldn't be requesting blocks for an in-use client: */
70 WARN_ON(bitmap_weight(cs, cnt) > 0);
2e362e17 71
49ec5b2e 72 reserved = smp->reserved[cid];
bfcdfb0e 73
2559d19f
SV
74 if (reserved) {
75 nblks = max(0, nblks - reserved);
bfcdfb0e 76 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
2559d19f 77 }
06c0dd96 78
49ec5b2e 79 avail = cnt - bitmap_weight(state->state, cnt);
06c0dd96 80 if (nblks > avail) {
6a41da17 81 DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
bfcdfb0e 82 nblks, avail);
49ec5b2e 83 return -ENOSPC;
06c0dd96
RC
84 }
85
49ec5b2e
RC
86 for (i = 0; i < nblks; i++) {
87 int blk = find_first_zero_bit(state->state, cnt);
88 set_bit(blk, cs);
89 set_bit(blk, state->state);
06c0dd96
RC
90 }
91
bfcdfb0e
SV
92 return 0;
93}
94
95static void set_fifo_thresholds(struct mdp5_smp *smp,
96 enum mdp5_pipe pipe, int nblks)
97{
bfcdfb0e
SV
98 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
99 u32 val;
100
101 /* 1/4 of SMP pool that is being fetched */
102 val = (nblks * smp_entries_per_blk) / 4;
103
0f379b79
AT
104 smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
105 smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
106 smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
bfcdfb0e
SV
107}
108
109/*
110 * NOTE: looks like if horizontal decimation is used (if we supported that)
111 * then the width used to calculate SMP block requirements is the post-
112 * decimated width. Ie. SMP buffering sits downstream of decimation (which
113 * presumably happens during the dma from scanout buffer).
114 */
49ec5b2e
RC
115uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
116 const struct mdp_format *format,
117 u32 width, bool hdecim)
bfcdfb0e 118{
bfcdfb0e 119 struct mdp5_kms *mdp5_kms = get_kms(smp);
42238da8 120 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
49ec5b2e 121 int i, hsub, nplanes, nlines;
9cc137a3 122 u32 fmt = format->base.pixel_format;
49ec5b2e 123 uint32_t blkcfg = 0;
bfcdfb0e
SV
124
125 nplanes = drm_format_num_planes(fmt);
126 hsub = drm_format_horz_chroma_subsampling(fmt);
127
128 /* different if BWC (compressed framebuffer?) enabled: */
129 nlines = 2;
130
9cc137a3
WX
131 /* Newer MDPs have split/packing logic, which fetches sub-sampled
132 * U and V components (splits them from Y if necessary) and packs
133 * them together, writes to SMP using a single client.
134 */
135 if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
136 fmt = DRM_FORMAT_NV24;
137 nplanes = 2;
138
139 /* if decimation is enabled, HW decimates less on the
140 * sub sampled chroma components
141 */
142 if (hdecim && (hsub > 1))
143 hsub = 1;
144 }
145
49ec5b2e 146 for (i = 0; i < nplanes; i++) {
bfcdfb0e
SV
147 int n, fetch_stride, cpp;
148
149 cpp = drm_format_plane_cpp(fmt, i);
150 fetch_stride = width * cpp / (i ? hsub : 1);
151
152 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
153
154 /* for hw rev v1.00 */
2e362e17 155 if (rev == 0)
bfcdfb0e
SV
156 n = roundup_pow_of_two(n);
157
49ec5b2e
RC
158 blkcfg |= (n << (8 * i));
159 }
160
161 return blkcfg;
162}
163
164int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
165 enum mdp5_pipe pipe, uint32_t blkcfg)
166{
167 struct mdp5_kms *mdp5_kms = get_kms(smp);
168 struct drm_device *dev = mdp5_kms->dev;
169 int i, ret;
170
171 for (i = 0; i < pipe2nclients(pipe); i++) {
172 u32 cid = pipe2client(pipe, i);
173 int n = blkcfg & 0xff;
174
175 if (!n)
176 continue;
177
bfcdfb0e 178 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
49ec5b2e 179 ret = smp_request_block(smp, state, cid, n);
bfcdfb0e 180 if (ret) {
6a41da17 181 DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
bfcdfb0e
SV
182 n, ret);
183 return ret;
184 }
185
49ec5b2e 186 blkcfg >>= 8;
bfcdfb0e
SV
187 }
188
49ec5b2e 189 state->assigned |= (1 << pipe);
bfcdfb0e 190
06c0dd96
RC
191 return 0;
192}
193
bfcdfb0e 194/* Release SMP blocks for all clients of the pipe */
49ec5b2e
RC
195void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
196 enum mdp5_pipe pipe)
bfcdfb0e 197{
657c63f0 198 int i;
657c63f0
WX
199 int cnt = smp->blk_cnt;
200
201 for (i = 0; i < pipe2nclients(pipe); i++) {
657c63f0 202 u32 cid = pipe2client(pipe, i);
49ec5b2e 203 void *cs = state->client_state[cid];
657c63f0 204
49ec5b2e
RC
205 /* update global state: */
206 bitmap_andnot(state->state, state->state, cs, cnt);
657c63f0 207
49ec5b2e
RC
208 /* clear client's state */
209 bitmap_zero(cs, cnt);
657c63f0 210 }
bfcdfb0e 211
49ec5b2e 212 state->released |= (1 << pipe);
bfcdfb0e
SV
213}
214
49ec5b2e
RC
215/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
216 * happen after scanout completes.
217 */
218static unsigned update_smp_state(struct mdp5_smp *smp,
6fa6acdf 219 u32 cid, mdp5_smp_state_t *assigned)
06c0dd96 220{
bfcdfb0e 221 int cnt = smp->blk_cnt;
49ec5b2e 222 unsigned nblks = 0;
bfcdfb0e 223 u32 blk, val;
06c0dd96
RC
224
225 for_each_set_bit(blk, *assigned, cnt) {
226 int idx = blk / 3;
227 int fld = blk % 3;
228
0f379b79 229 val = smp->alloc_w[idx];
06c0dd96
RC
230
231 switch (fld) {
232 case 0:
7b59c7e4
AT
233 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
234 val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
06c0dd96
RC
235 break;
236 case 1:
7b59c7e4
AT
237 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
238 val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
06c0dd96
RC
239 break;
240 case 2:
7b59c7e4
AT
241 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
242 val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
06c0dd96
RC
243 break;
244 }
245
0f379b79
AT
246 smp->alloc_w[idx] = val;
247 smp->alloc_r[idx] = val;
49ec5b2e
RC
248
249 nblks++;
06c0dd96 250 }
49ec5b2e
RC
251
252 return nblks;
06c0dd96
RC
253}
254
0f379b79
AT
255static void write_smp_alloc_regs(struct mdp5_smp *smp)
256{
257 struct mdp5_kms *mdp5_kms = get_kms(smp);
258 int i, num_regs;
259
260 num_regs = smp->blk_cnt / 3 + 1;
261
262 for (i = 0; i < num_regs; i++) {
263 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
264 smp->alloc_w[i]);
265 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
266 smp->alloc_r[i]);
267 }
268}
269
270static void write_smp_fifo_regs(struct mdp5_smp *smp)
271{
272 struct mdp5_kms *mdp5_kms = get_kms(smp);
273 int i;
274
275 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
276 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
277 enum mdp5_pipe pipe = hwpipe->pipe;
278
279 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
280 smp->pipe_reqprio_fifo_wm0[pipe]);
281 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
282 smp->pipe_reqprio_fifo_wm1[pipe]);
283 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
284 smp->pipe_reqprio_fifo_wm2[pipe]);
285 }
286}
287
49ec5b2e 288void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
06c0dd96 289{
49ec5b2e 290 enum mdp5_pipe pipe;
06c0dd96 291
49ec5b2e
RC
292 for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
293 unsigned i, nblks = 0;
657c63f0 294
49ec5b2e
RC
295 for (i = 0; i < pipe2nclients(pipe); i++) {
296 u32 cid = pipe2client(pipe, i);
297 void *cs = state->client_state[cid];
06c0dd96 298
49ec5b2e 299 nblks += update_smp_state(smp, cid, cs);
bfcdfb0e 300
49ec5b2e
RC
301 DBG("assign %s:%u, %u blks",
302 pipe2name(pipe), i, nblks);
303 }
bfcdfb0e 304
49ec5b2e
RC
305 set_fifo_thresholds(smp, pipe, nblks);
306 }
bfcdfb0e 307
0f379b79
AT
308 write_smp_alloc_regs(smp);
309 write_smp_fifo_regs(smp);
310
49ec5b2e
RC
311 state->assigned = 0;
312}
bfcdfb0e 313
49ec5b2e
RC
314void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
315{
316 enum mdp5_pipe pipe;
06c0dd96 317
49ec5b2e
RC
318 for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
319 DBG("release %s", pipe2name(pipe));
320 set_fifo_thresholds(smp, pipe, 0);
06c0dd96 321 }
49ec5b2e 322
0f379b79
AT
323 write_smp_fifo_regs(smp);
324
49ec5b2e 325 state->released = 0;
bfcdfb0e
SV
326}
327
bc5289ee
RC
328void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
329{
330 struct mdp5_kms *mdp5_kms = get_kms(smp);
331 struct mdp5_hw_pipe_state *hwpstate;
332 struct mdp5_smp_state *state;
7907a0d7 333 struct mdp5_global_state *global_state;
bc5289ee
RC
334 int total = 0, i, j;
335
336 drm_printf(p, "name\tinuse\tplane\n");
337 drm_printf(p, "----\t-----\t-----\n");
338
e8406b61 339 if (drm_can_sleep())
7907a0d7
AT
340 drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
341
342 global_state = mdp5_get_existing_global_state(mdp5_kms);
bc5289ee
RC
343
344 /* grab these *after* we hold the state_lock */
7907a0d7
AT
345 hwpstate = &global_state->hwpipe;
346 state = &global_state->smp;
bc5289ee
RC
347
348 for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
349 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
350 struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
351 enum mdp5_pipe pipe = hwpipe->pipe;
352 for (j = 0; j < pipe2nclients(pipe); j++) {
353 u32 cid = pipe2client(pipe, j);
354 void *cs = state->client_state[cid];
355 int inuse = bitmap_weight(cs, smp->blk_cnt);
356
357 drm_printf(p, "%s:%d\t%d\t%s\n",
358 pipe2name(pipe), j, inuse,
359 plane ? plane->name : NULL);
360
361 total += inuse;
362 }
363 }
364
365 drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
366 drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
367 bitmap_weight(state->state, smp->blk_cnt));
368
e8406b61 369 if (drm_can_sleep())
7907a0d7 370 drm_modeset_unlock(&mdp5_kms->glob_state_lock);
bc5289ee
RC
371}
372
42238da8 373void mdp5_smp_destroy(struct mdp5_smp *smp)
bfcdfb0e 374{
bfcdfb0e
SV
375 kfree(smp);
376}
377
49ec5b2e 378struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
bfcdfb0e 379{
7907a0d7
AT
380 struct mdp5_smp_state *state;
381 struct mdp5_global_state *global_state;
bfcdfb0e
SV
382 struct mdp5_smp *smp = NULL;
383 int ret;
384
385 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
386 if (unlikely(!smp)) {
387 ret = -ENOMEM;
388 goto fail;
389 }
390
49ec5b2e 391 smp->dev = mdp5_kms->dev;
bfcdfb0e
SV
392 smp->blk_cnt = cfg->mmb_count;
393 smp->blk_size = cfg->mmb_size;
394
7907a0d7
AT
395 global_state = mdp5_get_existing_global_state(mdp5_kms);
396 state = &global_state->smp;
397
bfcdfb0e 398 /* statically tied MMBs cannot be re-allocated: */
49ec5b2e 399 bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
60fb49ca 400 memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
bfcdfb0e
SV
401
402 return smp;
403fail:
404 if (smp)
405 mdp5_smp_destroy(smp);
06c0dd96 406
bfcdfb0e 407 return ERR_PTR(ret);
06c0dd96 408}