drm/amd/display: Add plane capabilities to dc_caps
[linux-2.6-block.git] / drivers / gpu / drm / drm_atomic.c
CommitLineData
cc4ceb48
DV
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic.h>
72fdb40c 31#include <drm/drm_atomic_uapi.h>
5488dc16 32#include <drm/drm_mode.h>
fceffb32 33#include <drm/drm_print.h>
935774cd 34#include <drm/drm_writeback.h>
96260142 35#include <linux/sync_file.h>
cc4ceb48 36
be35f94f 37#include "drm_crtc_internal.h"
f02b604b 38#include "drm_internal.h"
be35f94f 39
b3ba3f6f 40void __drm_crtc_commit_free(struct kref *kref)
3b24f7d6
DV
41{
42 struct drm_crtc_commit *commit =
43 container_of(kref, struct drm_crtc_commit, ref);
44
45 kfree(commit);
46}
b3ba3f6f 47EXPORT_SYMBOL(__drm_crtc_commit_free);
3b24f7d6 48
036ef573
ML
49/**
50 * drm_atomic_state_default_release -
51 * release memory initialized by drm_atomic_state_init
52 * @state: atomic state
53 *
54 * Free all the memory allocated by drm_atomic_state_init.
da6c0596
DV
55 * This should only be used by drivers which are still subclassing
56 * &drm_atomic_state and haven't switched to &drm_private_state yet.
036ef573
ML
57 */
58void drm_atomic_state_default_release(struct drm_atomic_state *state)
cc4ceb48
DV
59{
60 kfree(state->connectors);
cc4ceb48 61 kfree(state->crtcs);
cc4ceb48 62 kfree(state->planes);
b430c27a 63 kfree(state->private_objs);
cc4ceb48 64}
036ef573 65EXPORT_SYMBOL(drm_atomic_state_default_release);
cc4ceb48
DV
66
67/**
036ef573 68 * drm_atomic_state_init - init new atomic state
cc4ceb48 69 * @dev: DRM device
036ef573 70 * @state: atomic state
cc4ceb48 71 *
036ef573 72 * Default implementation for filling in a new atomic state.
da6c0596
DV
73 * This should only be used by drivers which are still subclassing
74 * &drm_atomic_state and haven't switched to &drm_private_state yet.
cc4ceb48 75 */
036ef573
ML
76int
77drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
cc4ceb48 78{
0853695c
CW
79 kref_init(&state->ref);
80
d34f20d6
RC
81 /* TODO legacy paths should maybe do a better job about
82 * setting this appropriately?
83 */
84 state->allow_modeset = true;
85
cc4ceb48
DV
86 state->crtcs = kcalloc(dev->mode_config.num_crtc,
87 sizeof(*state->crtcs), GFP_KERNEL);
88 if (!state->crtcs)
89 goto fail;
cc4ceb48
DV
90 state->planes = kcalloc(dev->mode_config.num_total_plane,
91 sizeof(*state->planes), GFP_KERNEL);
92 if (!state->planes)
93 goto fail;
cc4ceb48
DV
94
95 state->dev = dev;
96
036ef573 97 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
cc4ceb48 98
036ef573 99 return 0;
cc4ceb48 100fail:
036ef573
ML
101 drm_atomic_state_default_release(state);
102 return -ENOMEM;
103}
104EXPORT_SYMBOL(drm_atomic_state_init);
105
106/**
107 * drm_atomic_state_alloc - allocate atomic state
108 * @dev: DRM device
109 *
110 * This allocates an empty atomic state to track updates.
111 */
112struct drm_atomic_state *
113drm_atomic_state_alloc(struct drm_device *dev)
114{
115 struct drm_mode_config *config = &dev->mode_config;
036ef573
ML
116
117 if (!config->funcs->atomic_state_alloc) {
ac7c7483
DK
118 struct drm_atomic_state *state;
119
036ef573
ML
120 state = kzalloc(sizeof(*state), GFP_KERNEL);
121 if (!state)
122 return NULL;
123 if (drm_atomic_state_init(dev, state) < 0) {
124 kfree(state);
125 return NULL;
126 }
127 return state;
128 }
cc4ceb48 129
036ef573 130 return config->funcs->atomic_state_alloc(dev);
cc4ceb48
DV
131}
132EXPORT_SYMBOL(drm_atomic_state_alloc);
133
134/**
036ef573 135 * drm_atomic_state_default_clear - clear base atomic state
cc4ceb48
DV
136 * @state: atomic state
137 *
036ef573 138 * Default implementation for clearing atomic state.
da6c0596
DV
139 * This should only be used by drivers which are still subclassing
140 * &drm_atomic_state and haven't switched to &drm_private_state yet.
cc4ceb48 141 */
036ef573 142void drm_atomic_state_default_clear(struct drm_atomic_state *state)
cc4ceb48
DV
143{
144 struct drm_device *dev = state->dev;
6f75cea6 145 struct drm_mode_config *config = &dev->mode_config;
cc4ceb48
DV
146 int i;
147
17a38d9c 148 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
cc4ceb48 149
f52b69f1 150 for (i = 0; i < state->num_connector; i++) {
63e83c1d 151 struct drm_connector *connector = state->connectors[i].ptr;
cc4ceb48
DV
152
153 if (!connector)
154 continue;
155
d2307dea 156 connector->funcs->atomic_destroy_state(connector,
63e83c1d
DV
157 state->connectors[i].state);
158 state->connectors[i].ptr = NULL;
159 state->connectors[i].state = NULL;
f0b408ee
VS
160 state->connectors[i].old_state = NULL;
161 state->connectors[i].new_state = NULL;
ad093607 162 drm_connector_put(connector);
cc4ceb48
DV
163 }
164
6f75cea6 165 for (i = 0; i < config->num_crtc; i++) {
5d943aa6 166 struct drm_crtc *crtc = state->crtcs[i].ptr;
cc4ceb48
DV
167
168 if (!crtc)
169 continue;
170
171 crtc->funcs->atomic_destroy_state(crtc,
5d943aa6 172 state->crtcs[i].state);
3b24f7d6 173
5d943aa6
DV
174 state->crtcs[i].ptr = NULL;
175 state->crtcs[i].state = NULL;
f0b408ee
VS
176 state->crtcs[i].old_state = NULL;
177 state->crtcs[i].new_state = NULL;
4364bcb2
LL
178
179 if (state->crtcs[i].commit) {
180 drm_crtc_commit_put(state->crtcs[i].commit);
181 state->crtcs[i].commit = NULL;
182 }
cc4ceb48
DV
183 }
184
6f75cea6 185 for (i = 0; i < config->num_total_plane; i++) {
b8b5342b 186 struct drm_plane *plane = state->planes[i].ptr;
cc4ceb48
DV
187
188 if (!plane)
189 continue;
190
191 plane->funcs->atomic_destroy_state(plane,
b8b5342b
DV
192 state->planes[i].state);
193 state->planes[i].ptr = NULL;
194 state->planes[i].state = NULL;
f0b408ee
VS
195 state->planes[i].old_state = NULL;
196 state->planes[i].new_state = NULL;
cc4ceb48 197 }
b430c27a
PD
198
199 for (i = 0; i < state->num_private_objs; i++) {
a4370c77 200 struct drm_private_obj *obj = state->private_objs[i].ptr;
b430c27a 201
a4370c77
VS
202 obj->funcs->atomic_destroy_state(obj,
203 state->private_objs[i].state);
204 state->private_objs[i].ptr = NULL;
205 state->private_objs[i].state = NULL;
b5cb2e5a
VS
206 state->private_objs[i].old_state = NULL;
207 state->private_objs[i].new_state = NULL;
b430c27a
PD
208 }
209 state->num_private_objs = 0;
210
21a01abb
ML
211 if (state->fake_commit) {
212 drm_crtc_commit_put(state->fake_commit);
213 state->fake_commit = NULL;
214 }
cc4ceb48 215}
036ef573
ML
216EXPORT_SYMBOL(drm_atomic_state_default_clear);
217
218/**
219 * drm_atomic_state_clear - clear state object
220 * @state: atomic state
221 *
222 * When the w/w mutex algorithm detects a deadlock we need to back off and drop
223 * all locks. So someone else could sneak in and change the current modeset
224 * configuration. Which means that all the state assembled in @state is no
225 * longer an atomic update to the current state, but to some arbitrary earlier
d574528a
DV
226 * state. Which could break assumptions the driver's
227 * &drm_mode_config_funcs.atomic_check likely relies on.
036ef573
ML
228 *
229 * Hence we must clear all cached state and completely start over, using this
230 * function.
231 */
232void drm_atomic_state_clear(struct drm_atomic_state *state)
233{
234 struct drm_device *dev = state->dev;
235 struct drm_mode_config *config = &dev->mode_config;
236
237 if (config->funcs->atomic_state_clear)
238 config->funcs->atomic_state_clear(state);
239 else
240 drm_atomic_state_default_clear(state);
241}
cc4ceb48
DV
242EXPORT_SYMBOL(drm_atomic_state_clear);
243
244/**
0853695c
CW
245 * __drm_atomic_state_free - free all memory for an atomic state
246 * @ref: This atomic state to deallocate
cc4ceb48
DV
247 *
248 * This frees all memory associated with an atomic state, including all the
249 * per-object state for planes, crtcs and connectors.
250 */
0853695c 251void __drm_atomic_state_free(struct kref *ref)
cc4ceb48 252{
0853695c
CW
253 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
254 struct drm_mode_config *config = &state->dev->mode_config;
036ef573 255
cc4ceb48
DV
256 drm_atomic_state_clear(state);
257
17a38d9c 258 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
cc4ceb48 259
036ef573
ML
260 if (config->funcs->atomic_state_free) {
261 config->funcs->atomic_state_free(state);
262 } else {
263 drm_atomic_state_default_release(state);
264 kfree(state);
265 }
cc4ceb48 266}
0853695c 267EXPORT_SYMBOL(__drm_atomic_state_free);
cc4ceb48
DV
268
269/**
270 * drm_atomic_get_crtc_state - get crtc state
271 * @state: global atomic state object
272 * @crtc: crtc to get state object for
273 *
274 * This function returns the crtc state for the given crtc, allocating it if
275 * needed. It will also grab the relevant crtc lock to make sure that the state
276 * is consistent.
277 *
278 * Returns:
279 *
280 * Either the allocated state or the error code encoded into the pointer. When
281 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
282 * entire atomic sequence must be restarted. All other errors are fatal.
283 */
284struct drm_crtc_state *
285drm_atomic_get_crtc_state(struct drm_atomic_state *state,
286 struct drm_crtc *crtc)
287{
1b26a5e1 288 int ret, index = drm_crtc_index(crtc);
cc4ceb48
DV
289 struct drm_crtc_state *crtc_state;
290
7f4eaa89
ML
291 WARN_ON(!state->acquire_ctx);
292
1b26a5e1
ML
293 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
294 if (crtc_state)
295 return crtc_state;
cc4ceb48
DV
296
297 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
298 if (ret)
299 return ERR_PTR(ret);
300
301 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
302 if (!crtc_state)
303 return ERR_PTR(-ENOMEM);
304
5d943aa6 305 state->crtcs[index].state = crtc_state;
581e49fe
ML
306 state->crtcs[index].old_state = crtc->state;
307 state->crtcs[index].new_state = crtc_state;
5d943aa6 308 state->crtcs[index].ptr = crtc;
cc4ceb48
DV
309 crtc_state->state = state;
310
fa3ab4c2
VS
311 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
312 crtc->base.id, crtc->name, crtc_state, state);
cc4ceb48
DV
313
314 return crtc_state;
315}
316EXPORT_SYMBOL(drm_atomic_get_crtc_state);
317
b2432adf
VS
318static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
319 const struct drm_crtc_state *new_crtc_state)
5e743737 320{
b2432adf
VS
321 struct drm_crtc *crtc = new_crtc_state->crtc;
322
5e743737
RC
323 /* NOTE: we explicitly don't enforce constraints such as primary
324 * layer covering entire screen, since that is something we want
325 * to allow (on hw that supports it). For hw that does not, it
326 * should be checked in driver's crtc->atomic_check() vfunc.
327 *
328 * TODO: Add generic modeset state checks once we support those.
329 */
eab3bbef 330
b2432adf 331 if (new_crtc_state->active && !new_crtc_state->enable) {
fa3ab4c2
VS
332 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
333 crtc->base.id, crtc->name);
eab3bbef
DV
334 return -EINVAL;
335 }
336
99cf4a29
DS
337 /* The state->enable vs. state->mode_blob checks can be WARN_ON,
338 * as this is a kernel-internal detail that userspace should never
339 * be able to trigger. */
340 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
b2432adf 341 WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
fa3ab4c2
VS
342 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
343 crtc->base.id, crtc->name);
99cf4a29
DS
344 return -EINVAL;
345 }
346
347 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
b2432adf 348 WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
fa3ab4c2
VS
349 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
350 crtc->base.id, crtc->name);
99cf4a29
DS
351 return -EINVAL;
352 }
353
4cba6850
DV
354 /*
355 * Reject event generation for when a CRTC is off and stays off.
356 * It wouldn't be hard to implement this, but userspace has a track
357 * record of happily burning through 100% cpu (or worse, crash) when the
358 * display pipe is suspended. To avoid all that fun just reject updates
359 * that ask for events since likely that indicates a bug in the
360 * compositor's drawing loop. This is consistent with the vblank IOCTL
361 * and legacy page_flip IOCTL which also reject service on a disabled
362 * pipe.
363 */
b2432adf
VS
364 if (new_crtc_state->event &&
365 !new_crtc_state->active && !old_crtc_state->active) {
6ac7c548
RK
366 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
367 crtc->base.id, crtc->name);
4cba6850
DV
368 return -EINVAL;
369 }
370
5e743737
RC
371 return 0;
372}
373
fceffb32
RC
374static void drm_atomic_crtc_print_state(struct drm_printer *p,
375 const struct drm_crtc_state *state)
376{
377 struct drm_crtc *crtc = state->crtc;
378
379 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
380 drm_printf(p, "\tenable=%d\n", state->enable);
381 drm_printf(p, "\tactive=%d\n", state->active);
382 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
383 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
384 drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
385 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
386 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
387 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
388 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
389 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
390 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
391
392 if (crtc->funcs->atomic_print_state)
393 crtc->funcs->atomic_print_state(p, state);
394}
395
935774cd
BS
396static int drm_atomic_connector_check(struct drm_connector *connector,
397 struct drm_connector_state *state)
398{
399 struct drm_crtc_state *crtc_state;
400 struct drm_writeback_job *writeback_job = state->writeback_job;
47e22ff1
RS
401 const struct drm_display_info *info = &connector->display_info;
402
403 state->max_bpc = info->bpc ? info->bpc : 8;
404 if (connector->max_bpc_property)
405 state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
935774cd
BS
406
407 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
408 return 0;
409
410 if (writeback_job->fb && !state->crtc) {
411 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
412 connector->base.id, connector->name);
413 return -EINVAL;
414 }
415
416 if (state->crtc)
417 crtc_state = drm_atomic_get_existing_crtc_state(state->state,
418 state->crtc);
419
420 if (writeback_job->fb && !crtc_state->active) {
421 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
422 connector->base.id, connector->name,
423 state->crtc->base.id);
424 return -EINVAL;
425 }
426
b13cc8dd
BS
427 if (writeback_job->out_fence && !writeback_job->fb) {
428 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
429 connector->base.id, connector->name);
430 return -EINVAL;
431 }
432
935774cd
BS
433 return 0;
434}
435
cc4ceb48
DV
436/**
437 * drm_atomic_get_plane_state - get plane state
438 * @state: global atomic state object
439 * @plane: plane to get state object for
440 *
441 * This function returns the plane state for the given plane, allocating it if
442 * needed. It will also grab the relevant plane lock to make sure that the state
443 * is consistent.
444 *
445 * Returns:
446 *
447 * Either the allocated state or the error code encoded into the pointer. When
448 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
449 * entire atomic sequence must be restarted. All other errors are fatal.
450 */
451struct drm_plane_state *
452drm_atomic_get_plane_state(struct drm_atomic_state *state,
453 struct drm_plane *plane)
454{
1b26a5e1 455 int ret, index = drm_plane_index(plane);
cc4ceb48
DV
456 struct drm_plane_state *plane_state;
457
7f4eaa89
ML
458 WARN_ON(!state->acquire_ctx);
459
e00fb856
VS
460 /* the legacy pointers should never be set */
461 WARN_ON(plane->fb);
462 WARN_ON(plane->old_fb);
463 WARN_ON(plane->crtc);
464
1b26a5e1
ML
465 plane_state = drm_atomic_get_existing_plane_state(state, plane);
466 if (plane_state)
467 return plane_state;
cc4ceb48 468
4d02e2de 469 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
cc4ceb48
DV
470 if (ret)
471 return ERR_PTR(ret);
472
473 plane_state = plane->funcs->atomic_duplicate_state(plane);
474 if (!plane_state)
475 return ERR_PTR(-ENOMEM);
476
b8b5342b
DV
477 state->planes[index].state = plane_state;
478 state->planes[index].ptr = plane;
581e49fe
ML
479 state->planes[index].old_state = plane->state;
480 state->planes[index].new_state = plane_state;
cc4ceb48
DV
481 plane_state->state = state;
482
9f4c97a2
VS
483 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
484 plane->base.id, plane->name, plane_state, state);
cc4ceb48
DV
485
486 if (plane_state->crtc) {
487 struct drm_crtc_state *crtc_state;
488
489 crtc_state = drm_atomic_get_crtc_state(state,
490 plane_state->crtc);
491 if (IS_ERR(crtc_state))
492 return ERR_CAST(crtc_state);
493 }
494
495 return plane_state;
496}
497EXPORT_SYMBOL(drm_atomic_get_plane_state);
498
f8aeb41c 499static bool
d9be05b7
VS
500plane_switching_crtc(const struct drm_plane_state *old_plane_state,
501 const struct drm_plane_state *new_plane_state)
f8aeb41c 502{
d9be05b7 503 if (!old_plane_state->crtc || !new_plane_state->crtc)
f8aeb41c
DV
504 return false;
505
d9be05b7 506 if (old_plane_state->crtc == new_plane_state->crtc)
f8aeb41c
DV
507 return false;
508
509 /* This could be refined, but currently there's no helper or driver code
510 * to implement direct switching of active planes nor userspace to take
511 * advantage of more direct plane switching without the intermediate
512 * full OFF state.
513 */
514 return true;
515}
516
5e743737
RC
517/**
518 * drm_atomic_plane_check - check plane state
d9be05b7
VS
519 * @old_plane_state: old plane state to check
520 * @new_plane_state: new plane state to check
5e743737
RC
521 *
522 * Provides core sanity checks for plane state.
523 *
524 * RETURNS:
525 * Zero on success, error code on failure
526 */
d9be05b7
VS
527static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
528 const struct drm_plane_state *new_plane_state)
5e743737 529{
d9be05b7
VS
530 struct drm_plane *plane = new_plane_state->plane;
531 struct drm_crtc *crtc = new_plane_state->crtc;
532 const struct drm_framebuffer *fb = new_plane_state->fb;
5e743737 533 unsigned int fb_width, fb_height;
d3b21767
LS
534 struct drm_mode_rect *clips;
535 uint32_t num_clips;
ead8610d 536 int ret;
5e743737
RC
537
538 /* either *both* CRTC and FB must be set, or neither */
d9be05b7 539 if (crtc && !fb) {
b6f690ab
VS
540 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
541 plane->base.id, plane->name);
5e743737 542 return -EINVAL;
d9be05b7 543 } else if (fb && !crtc) {
b6f690ab
VS
544 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
545 plane->base.id, plane->name);
5e743737
RC
546 return -EINVAL;
547 }
548
549 /* if disabled, we don't care about the rest of the state: */
d9be05b7 550 if (!crtc)
5e743737
RC
551 return 0;
552
553 /* Check whether this plane is usable on this CRTC */
d9be05b7 554 if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
b6f690ab 555 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
d9be05b7 556 crtc->base.id, crtc->name,
b6f690ab 557 plane->base.id, plane->name);
5e743737
RC
558 return -EINVAL;
559 }
560
561 /* Check whether this plane supports the fb pixel format. */
d9be05b7
VS
562 ret = drm_plane_check_pixel_format(plane, fb->format->format,
563 fb->modifier);
ead8610d 564 if (ret) {
b3c11ac2 565 struct drm_format_name_buf format_name;
b6f690ab
VS
566 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
567 plane->base.id, plane->name,
d9be05b7 568 drm_get_format_name(fb->format->format,
23163a7d 569 &format_name),
d9be05b7 570 fb->modifier);
ead8610d 571 return ret;
5e743737
RC
572 }
573
574 /* Give drivers some help against integer overflows */
d9be05b7
VS
575 if (new_plane_state->crtc_w > INT_MAX ||
576 new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
577 new_plane_state->crtc_h > INT_MAX ||
578 new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
b6f690ab
VS
579 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
580 plane->base.id, plane->name,
d9be05b7
VS
581 new_plane_state->crtc_w, new_plane_state->crtc_h,
582 new_plane_state->crtc_x, new_plane_state->crtc_y);
5e743737
RC
583 return -ERANGE;
584 }
585
d9be05b7
VS
586 fb_width = fb->width << 16;
587 fb_height = fb->height << 16;
5e743737
RC
588
589 /* Make sure source coordinates are inside the fb. */
d9be05b7
VS
590 if (new_plane_state->src_w > fb_width ||
591 new_plane_state->src_x > fb_width - new_plane_state->src_w ||
592 new_plane_state->src_h > fb_height ||
593 new_plane_state->src_y > fb_height - new_plane_state->src_h) {
b6f690ab 594 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
0338f0d0 595 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
b6f690ab 596 plane->base.id, plane->name,
d9be05b7
VS
597 new_plane_state->src_w >> 16,
598 ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
599 new_plane_state->src_h >> 16,
600 ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
601 new_plane_state->src_x >> 16,
602 ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
603 new_plane_state->src_y >> 16,
604 ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
605 fb->width, fb->height);
5e743737
RC
606 return -ENOSPC;
607 }
608
d3b21767
LS
609 clips = drm_plane_get_damage_clips(new_plane_state);
610 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
611
612 /* Make sure damage clips are valid and inside the fb. */
613 while (num_clips > 0) {
614 if (clips->x1 >= clips->x2 ||
615 clips->y1 >= clips->y2 ||
616 clips->x1 < 0 ||
617 clips->y1 < 0 ||
618 clips->x2 > fb_width ||
619 clips->y2 > fb_height) {
620 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
621 plane->base.id, plane->name, clips->x1,
622 clips->y1, clips->x2, clips->y2);
623 return -EINVAL;
624 }
625 clips++;
626 num_clips--;
627 }
628
d9be05b7 629 if (plane_switching_crtc(old_plane_state, new_plane_state)) {
9f4c97a2
VS
630 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
631 plane->base.id, plane->name);
f8aeb41c
DV
632 return -EINVAL;
633 }
634
5e743737
RC
635 return 0;
636}
637
fceffb32
RC
638static void drm_atomic_plane_print_state(struct drm_printer *p,
639 const struct drm_plane_state *state)
640{
641 struct drm_plane *plane = state->plane;
642 struct drm_rect src = drm_plane_state_src(state);
643 struct drm_rect dest = drm_plane_state_dest(state);
644
645 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
646 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
647 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
f02b604b
NT
648 if (state->fb)
649 drm_framebuffer_print_info(p, 2, state->fb);
fceffb32
RC
650 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
651 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
652 drm_printf(p, "\trotation=%x\n", state->rotation);
f8878bb2 653 drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
56dbbaff
VS
654 drm_printf(p, "\tcolor-encoding=%s\n",
655 drm_get_color_encoding_name(state->color_encoding));
656 drm_printf(p, "\tcolor-range=%s\n",
657 drm_get_color_range_name(state->color_range));
fceffb32
RC
658
659 if (plane->funcs->atomic_print_state)
660 plane->funcs->atomic_print_state(p, state);
661}
662
da6c0596
DV
663/**
664 * DOC: handling driver private state
665 *
666 * Very often the DRM objects exposed to userspace in the atomic modeset api
667 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
668 * underlying hardware. Especially for any kind of shared resources (e.g. shared
669 * clocks, scaler units, bandwidth and fifo limits shared among a group of
670 * planes or CRTCs, and so on) it makes sense to model these as independent
671 * objects. Drivers then need to do similar state tracking and commit ordering for
672 * such private (since not exposed to userpace) objects as the atomic core and
673 * helpers already provide for connectors, planes and CRTCs.
674 *
675 * To make this easier on drivers the atomic core provides some support to track
676 * driver private state objects using struct &drm_private_obj, with the
677 * associated state struct &drm_private_state.
678 *
679 * Similar to userspace-exposed objects, private state structures can be
680 * acquired by calling drm_atomic_get_private_obj_state(). Since this function
681 * does not take care of locking, drivers should wrap it for each type of
682 * private state object they have with the required call to drm_modeset_lock()
683 * for the corresponding &drm_modeset_lock.
684 *
685 * All private state structures contained in a &drm_atomic_state update can be
686 * iterated using for_each_oldnew_private_obj_in_state(),
687 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
688 * Drivers are recommended to wrap these for each type of driver private state
689 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
690 * least if they want to iterate over all objects of a given type.
691 *
692 * An earlier way to handle driver private state was by subclassing struct
693 * &drm_atomic_state. But since that encourages non-standard ways to implement
694 * the check/commit split atomic requires (by using e.g. "check and rollback or
695 * commit instead" of "duplicate state, check, then either commit or release
696 * duplicated state) it is deprecated in favour of using &drm_private_state.
697 */
698
a4370c77
VS
699/**
700 * drm_atomic_private_obj_init - initialize private object
b962a120 701 * @dev: DRM device this object will be attached to
a4370c77
VS
702 * @obj: private object
703 * @state: initial private object state
704 * @funcs: pointer to the struct of function pointers that identify the object
705 * type
706 *
707 * Initialize the private object, which can be embedded into any
708 * driver private object that needs its own atomic state.
709 */
710void
b962a120
RC
711drm_atomic_private_obj_init(struct drm_device *dev,
712 struct drm_private_obj *obj,
a4370c77
VS
713 struct drm_private_state *state,
714 const struct drm_private_state_funcs *funcs)
715{
716 memset(obj, 0, sizeof(*obj));
717
b962a120
RC
718 drm_modeset_lock_init(&obj->lock);
719
a4370c77
VS
720 obj->state = state;
721 obj->funcs = funcs;
b962a120 722 list_add_tail(&obj->head, &dev->mode_config.privobj_list);
a4370c77
VS
723}
724EXPORT_SYMBOL(drm_atomic_private_obj_init);
725
726/**
727 * drm_atomic_private_obj_fini - finalize private object
728 * @obj: private object
729 *
730 * Finalize the private object.
731 */
732void
733drm_atomic_private_obj_fini(struct drm_private_obj *obj)
734{
b962a120 735 list_del(&obj->head);
a4370c77 736 obj->funcs->atomic_destroy_state(obj, obj->state);
b962a120 737 drm_modeset_lock_fini(&obj->lock);
a4370c77
VS
738}
739EXPORT_SYMBOL(drm_atomic_private_obj_fini);
740
b430c27a
PD
741/**
742 * drm_atomic_get_private_obj_state - get private object state
743 * @state: global atomic state
744 * @obj: private object to get the state for
b430c27a
PD
745 *
746 * This function returns the private object state for the given private object,
b962a120
RC
747 * allocating the state if needed. It will also grab the relevant private
748 * object lock to make sure that the state is consistent.
b430c27a
PD
749 *
750 * RETURNS:
751 *
752 * Either the allocated state or the error code encoded into a pointer.
753 */
a4370c77
VS
754struct drm_private_state *
755drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
756 struct drm_private_obj *obj)
b430c27a 757{
b962a120 758 int index, num_objs, i, ret;
b430c27a
PD
759 size_t size;
760 struct __drm_private_objs_state *arr;
a4370c77 761 struct drm_private_state *obj_state;
b430c27a
PD
762
763 for (i = 0; i < state->num_private_objs; i++)
a4370c77
VS
764 if (obj == state->private_objs[i].ptr)
765 return state->private_objs[i].state;
b430c27a 766
b962a120
RC
767 ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
768 if (ret)
769 return ERR_PTR(ret);
770
b430c27a
PD
771 num_objs = state->num_private_objs + 1;
772 size = sizeof(*state->private_objs) * num_objs;
773 arr = krealloc(state->private_objs, size, GFP_KERNEL);
774 if (!arr)
775 return ERR_PTR(-ENOMEM);
776
777 state->private_objs = arr;
778 index = state->num_private_objs;
779 memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
780
a4370c77
VS
781 obj_state = obj->funcs->atomic_duplicate_state(obj);
782 if (!obj_state)
b430c27a
PD
783 return ERR_PTR(-ENOMEM);
784
a4370c77
VS
785 state->private_objs[index].state = obj_state;
786 state->private_objs[index].old_state = obj->state;
787 state->private_objs[index].new_state = obj_state;
788 state->private_objs[index].ptr = obj;
e89ea355 789 obj_state->state = state;
a4370c77 790
b430c27a
PD
791 state->num_private_objs = num_objs;
792
a4370c77
VS
793 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
794 obj, obj_state, state);
b430c27a 795
a4370c77 796 return obj_state;
b430c27a
PD
797}
798EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
799
cc4ceb48
DV
800/**
801 * drm_atomic_get_connector_state - get connector state
802 * @state: global atomic state object
803 * @connector: connector to get state object for
804 *
805 * This function returns the connector state for the given connector,
806 * allocating it if needed. It will also grab the relevant connector lock to
807 * make sure that the state is consistent.
808 *
809 * Returns:
810 *
811 * Either the allocated state or the error code encoded into the pointer. When
812 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
813 * entire atomic sequence must be restarted. All other errors are fatal.
814 */
815struct drm_connector_state *
816drm_atomic_get_connector_state(struct drm_atomic_state *state,
817 struct drm_connector *connector)
818{
819 int ret, index;
820 struct drm_mode_config *config = &connector->dev->mode_config;
821 struct drm_connector_state *connector_state;
822
7f4eaa89
ML
823 WARN_ON(!state->acquire_ctx);
824
c7eb76f4
DV
825 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
826 if (ret)
827 return ERR_PTR(ret);
828
cc4ceb48
DV
829 index = drm_connector_index(connector);
830
f52b69f1 831 if (index >= state->num_connector) {
63e83c1d 832 struct __drm_connnectors_state *c;
5fff80bb
ML
833 int alloc = max(index + 1, config->num_connector);
834
835 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
836 if (!c)
837 return ERR_PTR(-ENOMEM);
838
839 state->connectors = c;
840 memset(&state->connectors[state->num_connector], 0,
841 sizeof(*state->connectors) * (alloc - state->num_connector));
842
5fff80bb 843 state->num_connector = alloc;
f52b69f1
DV
844 }
845
63e83c1d
DV
846 if (state->connectors[index].state)
847 return state->connectors[index].state;
cc4ceb48 848
cc4ceb48
DV
849 connector_state = connector->funcs->atomic_duplicate_state(connector);
850 if (!connector_state)
851 return ERR_PTR(-ENOMEM);
852
ad093607 853 drm_connector_get(connector);
63e83c1d 854 state->connectors[index].state = connector_state;
581e49fe
ML
855 state->connectors[index].old_state = connector->state;
856 state->connectors[index].new_state = connector_state;
63e83c1d 857 state->connectors[index].ptr = connector;
cc4ceb48
DV
858 connector_state->state = state;
859
6ac7c548
RK
860 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
861 connector->base.id, connector->name,
862 connector_state, state);
cc4ceb48
DV
863
864 if (connector_state->crtc) {
865 struct drm_crtc_state *crtc_state;
866
867 crtc_state = drm_atomic_get_crtc_state(state,
868 connector_state->crtc);
869 if (IS_ERR(crtc_state))
870 return ERR_CAST(crtc_state);
871 }
872
873 return connector_state;
874}
875EXPORT_SYMBOL(drm_atomic_get_connector_state);
876
fceffb32
RC
877static void drm_atomic_connector_print_state(struct drm_printer *p,
878 const struct drm_connector_state *state)
879{
880 struct drm_connector *connector = state->connector;
881
882 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
883 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
884
8cbc5caf
BS
885 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
886 if (state->writeback_job && state->writeback_job->fb)
887 drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
888
fceffb32
RC
889 if (connector->funcs->atomic_print_state)
890 connector->funcs->atomic_print_state(p, state);
891}
892
cc4ceb48
DV
893/**
894 * drm_atomic_add_affected_connectors - add connectors for crtc
895 * @state: atomic state
896 * @crtc: DRM crtc
897 *
898 * This function walks the current configuration and adds all connectors
899 * currently using @crtc to the atomic configuration @state. Note that this
900 * function must acquire the connection mutex. This can potentially cause
901 * unneeded seralization if the update is just for the planes on one crtc. Hence
902 * drivers and helpers should only call this when really needed (e.g. when a
903 * full modeset needs to happen due to some change).
904 *
905 * Returns:
906 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
907 * then the w/w mutex code has detected a deadlock and the entire atomic
908 * sequence must be restarted. All other errors are fatal.
909 */
910int
911drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
912 struct drm_crtc *crtc)
913{
914 struct drm_mode_config *config = &state->dev->mode_config;
915 struct drm_connector *connector;
916 struct drm_connector_state *conn_state;
613051da 917 struct drm_connector_list_iter conn_iter;
5351bbdd 918 struct drm_crtc_state *crtc_state;
cc4ceb48
DV
919 int ret;
920
5351bbdd
ML
921 crtc_state = drm_atomic_get_crtc_state(state, crtc);
922 if (IS_ERR(crtc_state))
923 return PTR_ERR(crtc_state);
924
cc4ceb48
DV
925 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
926 if (ret)
927 return ret;
928
fa3ab4c2
VS
929 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
930 crtc->base.id, crtc->name, state);
cc4ceb48
DV
931
932 /*
5351bbdd
ML
933 * Changed connectors are already in @state, so only need to look
934 * at the connector_mask in crtc_state.
cc4ceb48 935 */
b982dab1 936 drm_connector_list_iter_begin(state->dev, &conn_iter);
613051da 937 drm_for_each_connector_iter(connector, &conn_iter) {
73705732 938 if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
cc4ceb48
DV
939 continue;
940
941 conn_state = drm_atomic_get_connector_state(state, connector);
613051da 942 if (IS_ERR(conn_state)) {
b982dab1 943 drm_connector_list_iter_end(&conn_iter);
cc4ceb48 944 return PTR_ERR(conn_state);
613051da 945 }
cc4ceb48 946 }
b982dab1 947 drm_connector_list_iter_end(&conn_iter);
cc4ceb48
DV
948
949 return 0;
950}
951EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
952
e01e9f75
ML
953/**
954 * drm_atomic_add_affected_planes - add planes for crtc
955 * @state: atomic state
956 * @crtc: DRM crtc
957 *
958 * This function walks the current configuration and adds all planes
959 * currently used by @crtc to the atomic configuration @state. This is useful
960 * when an atomic commit also needs to check all currently enabled plane on
961 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
962 * to avoid special code to force-enable all planes.
963 *
964 * Since acquiring a plane state will always also acquire the w/w mutex of the
965 * current CRTC for that plane (if there is any) adding all the plane states for
966 * a CRTC will not reduce parallism of atomic updates.
967 *
968 * Returns:
969 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
970 * then the w/w mutex code has detected a deadlock and the entire atomic
971 * sequence must be restarted. All other errors are fatal.
972 */
973int
974drm_atomic_add_affected_planes(struct drm_atomic_state *state,
975 struct drm_crtc *crtc)
976{
534903d6
VS
977 const struct drm_crtc_state *old_crtc_state =
978 drm_atomic_get_old_crtc_state(state, crtc);
e01e9f75
ML
979 struct drm_plane *plane;
980
b4d93679 981 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
e01e9f75 982
b6f690ab
VS
983 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
984 crtc->base.id, crtc->name, state);
985
534903d6 986 drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
e01e9f75
ML
987 struct drm_plane_state *plane_state =
988 drm_atomic_get_plane_state(state, plane);
989
990 if (IS_ERR(plane_state))
991 return PTR_ERR(plane_state);
992 }
993 return 0;
994}
995EXPORT_SYMBOL(drm_atomic_add_affected_planes);
996
cc4ceb48
DV
997/**
998 * drm_atomic_check_only - check whether a given config would work
999 * @state: atomic configuration to check
1000 *
1001 * Note that this function can return -EDEADLK if the driver needed to acquire
1002 * more locks but encountered a deadlock. The caller must then do the usual w/w
1003 * backoff dance and restart. All other errors are fatal.
1004 *
1005 * Returns:
1006 * 0 on success, negative error code on failure.
1007 */
1008int drm_atomic_check_only(struct drm_atomic_state *state)
1009{
5e743737
RC
1010 struct drm_device *dev = state->dev;
1011 struct drm_mode_config *config = &dev->mode_config;
df63b999 1012 struct drm_plane *plane;
d9be05b7
VS
1013 struct drm_plane_state *old_plane_state;
1014 struct drm_plane_state *new_plane_state;
df63b999 1015 struct drm_crtc *crtc;
b2432adf
VS
1016 struct drm_crtc_state *old_crtc_state;
1017 struct drm_crtc_state *new_crtc_state;
935774cd
BS
1018 struct drm_connector *conn;
1019 struct drm_connector_state *conn_state;
5e743737 1020 int i, ret = 0;
cc4ceb48 1021
17a38d9c 1022 DRM_DEBUG_ATOMIC("checking %p\n", state);
cc4ceb48 1023
d9be05b7
VS
1024 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1025 ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
5e743737 1026 if (ret) {
9f4c97a2
VS
1027 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
1028 plane->base.id, plane->name);
5e743737
RC
1029 return ret;
1030 }
1031 }
1032
b2432adf
VS
1033 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1034 ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
5e743737 1035 if (ret) {
fa3ab4c2
VS
1036 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
1037 crtc->base.id, crtc->name);
5e743737
RC
1038 return ret;
1039 }
1040 }
1041
935774cd
BS
1042 for_each_new_connector_in_state(state, conn, conn_state, i) {
1043 ret = drm_atomic_connector_check(conn, conn_state);
1044 if (ret) {
1045 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
1046 conn->base.id, conn->name);
1047 return ret;
1048 }
1049 }
1050
14d4e522 1051 if (config->funcs->atomic_check) {
5e743737
RC
1052 ret = config->funcs->atomic_check(state->dev, state);
1053
14d4e522
LP
1054 if (ret) {
1055 DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
1056 state, ret);
1057 return ret;
1058 }
1059 }
a0ffc51e 1060
d34f20d6 1061 if (!state->allow_modeset) {
b2432adf
VS
1062 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1063 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
fa3ab4c2
VS
1064 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1065 crtc->base.id, crtc->name);
d34f20d6
RC
1066 return -EINVAL;
1067 }
1068 }
1069 }
1070
a0ffc51e 1071 return 0;
cc4ceb48
DV
1072}
1073EXPORT_SYMBOL(drm_atomic_check_only);
1074
1075/**
1076 * drm_atomic_commit - commit configuration atomically
1077 * @state: atomic configuration to check
1078 *
1079 * Note that this function can return -EDEADLK if the driver needed to acquire
1080 * more locks but encountered a deadlock. The caller must then do the usual w/w
1081 * backoff dance and restart. All other errors are fatal.
1082 *
76fede2f
ML
1083 * This function will take its own reference on @state.
1084 * Callers should always release their reference with drm_atomic_state_put().
cc4ceb48
DV
1085 *
1086 * Returns:
1087 * 0 on success, negative error code on failure.
1088 */
1089int drm_atomic_commit(struct drm_atomic_state *state)
1090{
1091 struct drm_mode_config *config = &state->dev->mode_config;
1092 int ret;
1093
1094 ret = drm_atomic_check_only(state);
1095 if (ret)
1096 return ret;
1097
a0752d4a 1098 DRM_DEBUG_ATOMIC("committing %p\n", state);
cc4ceb48
DV
1099
1100 return config->funcs->atomic_commit(state->dev, state, false);
1101}
1102EXPORT_SYMBOL(drm_atomic_commit);
1103
1104/**
d574528a 1105 * drm_atomic_nonblocking_commit - atomic nonblocking commit
cc4ceb48
DV
1106 * @state: atomic configuration to check
1107 *
1108 * Note that this function can return -EDEADLK if the driver needed to acquire
1109 * more locks but encountered a deadlock. The caller must then do the usual w/w
1110 * backoff dance and restart. All other errors are fatal.
1111 *
76fede2f
ML
1112 * This function will take its own reference on @state.
1113 * Callers should always release their reference with drm_atomic_state_put().
cc4ceb48
DV
1114 *
1115 * Returns:
1116 * 0 on success, negative error code on failure.
1117 */
b837ba0a 1118int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
cc4ceb48
DV
1119{
1120 struct drm_mode_config *config = &state->dev->mode_config;
1121 int ret;
1122
1123 ret = drm_atomic_check_only(state);
1124 if (ret)
1125 return ret;
1126
a0752d4a 1127 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
cc4ceb48
DV
1128
1129 return config->funcs->atomic_commit(state->dev, state, true);
1130}
b837ba0a 1131EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
d34f20d6 1132
72fdb40c 1133void drm_atomic_print_state(const struct drm_atomic_state *state)
fceffb32
RC
1134{
1135 struct drm_printer p = drm_info_printer(state->dev->dev);
1136 struct drm_plane *plane;
1137 struct drm_plane_state *plane_state;
1138 struct drm_crtc *crtc;
1139 struct drm_crtc_state *crtc_state;
1140 struct drm_connector *connector;
1141 struct drm_connector_state *connector_state;
1142 int i;
1143
1144 DRM_DEBUG_ATOMIC("checking %p\n", state);
1145
5721a380 1146 for_each_new_plane_in_state(state, plane, plane_state, i)
fceffb32
RC
1147 drm_atomic_plane_print_state(&p, plane_state);
1148
5721a380 1149 for_each_new_crtc_in_state(state, crtc, crtc_state, i)
fceffb32
RC
1150 drm_atomic_crtc_print_state(&p, crtc_state);
1151
5721a380 1152 for_each_new_connector_in_state(state, connector, connector_state, i)
fceffb32
RC
1153 drm_atomic_connector_print_state(&p, connector_state);
1154}
1155
c2d85564
DV
1156static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
1157 bool take_locks)
6559c901
RC
1158{
1159 struct drm_mode_config *config = &dev->mode_config;
1160 struct drm_plane *plane;
1161 struct drm_crtc *crtc;
1162 struct drm_connector *connector;
613051da 1163 struct drm_connector_list_iter conn_iter;
6559c901 1164
3c499ea0 1165 if (!drm_drv_uses_atomic_modeset(dev))
6559c901
RC
1166 return;
1167
c2d85564
DV
1168 list_for_each_entry(plane, &config->plane_list, head) {
1169 if (take_locks)
1170 drm_modeset_lock(&plane->mutex, NULL);
6559c901 1171 drm_atomic_plane_print_state(p, plane->state);
c2d85564
DV
1172 if (take_locks)
1173 drm_modeset_unlock(&plane->mutex);
1174 }
6559c901 1175
c2d85564
DV
1176 list_for_each_entry(crtc, &config->crtc_list, head) {
1177 if (take_locks)
1178 drm_modeset_lock(&crtc->mutex, NULL);
6559c901 1179 drm_atomic_crtc_print_state(p, crtc->state);
c2d85564
DV
1180 if (take_locks)
1181 drm_modeset_unlock(&crtc->mutex);
1182 }
6559c901 1183
b982dab1 1184 drm_connector_list_iter_begin(dev, &conn_iter);
c2d85564
DV
1185 if (take_locks)
1186 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
613051da 1187 drm_for_each_connector_iter(connector, &conn_iter)
6559c901 1188 drm_atomic_connector_print_state(p, connector->state);
c2d85564
DV
1189 if (take_locks)
1190 drm_modeset_unlock(&dev->mode_config.connection_mutex);
b982dab1 1191 drm_connector_list_iter_end(&conn_iter);
6559c901 1192}
c2d85564
DV
1193
1194/**
1195 * drm_state_dump - dump entire device atomic state
1196 * @dev: the drm device
1197 * @p: where to print the state to
1198 *
1199 * Just for debugging. Drivers might want an option to dump state
1200 * to dmesg in case of error irq's. (Hint, you probably want to
1201 * ratelimit this!)
1202 *
1203 * The caller must drm_modeset_lock_all(), or if this is called
1204 * from error irq handler, it should not be enabled by default.
1205 * (Ie. if you are debugging errors you might not care that this
1206 * is racey. But calling this without all modeset locks held is
1207 * not inherently safe.)
1208 */
1209void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
1210{
1211 __drm_state_dump(dev, p, false);
1212}
6559c901
RC
1213EXPORT_SYMBOL(drm_state_dump);
1214
1215#ifdef CONFIG_DEBUG_FS
1216static int drm_state_info(struct seq_file *m, void *data)
1217{
1218 struct drm_info_node *node = (struct drm_info_node *) m->private;
1219 struct drm_device *dev = node->minor->dev;
1220 struct drm_printer p = drm_seq_file_printer(m);
1221
c2d85564 1222 __drm_state_dump(dev, &p, true);
6559c901
RC
1223
1224 return 0;
1225}
1226
1227/* any use in debugfs files to dump individual planes/crtc/etc? */
1228static const struct drm_info_list drm_atomic_debugfs_list[] = {
1229 {"state", drm_state_info, 0},
1230};
1231
1232int drm_atomic_debugfs_init(struct drm_minor *minor)
1233{
1234 return drm_debugfs_create_files(drm_atomic_debugfs_list,
1235 ARRAY_SIZE(drm_atomic_debugfs_list),
1236 minor->debugfs_root, minor);
1237}
1238#endif
1239