drm/vgem: Enable dmabuf import interfaces
[linux-2.6-block.git] / drivers / gpu / drm / drm_atomic.c
CommitLineData
cc4ceb48
DV
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic.h>
5488dc16 31#include <drm/drm_mode.h>
cc4ceb48 32#include <drm/drm_plane_helper.h>
fceffb32 33#include <drm/drm_print.h>
96260142 34#include <linux/sync_file.h>
cc4ceb48 35
be35f94f
TR
36#include "drm_crtc_internal.h"
37
b3ba3f6f 38void __drm_crtc_commit_free(struct kref *kref)
3b24f7d6
DV
39{
40 struct drm_crtc_commit *commit =
41 container_of(kref, struct drm_crtc_commit, ref);
42
43 kfree(commit);
44}
b3ba3f6f 45EXPORT_SYMBOL(__drm_crtc_commit_free);
3b24f7d6 46
036ef573
ML
47/**
48 * drm_atomic_state_default_release -
49 * release memory initialized by drm_atomic_state_init
50 * @state: atomic state
51 *
52 * Free all the memory allocated by drm_atomic_state_init.
53 * This is useful for drivers that subclass the atomic state.
54 */
55void drm_atomic_state_default_release(struct drm_atomic_state *state)
cc4ceb48
DV
56{
57 kfree(state->connectors);
cc4ceb48 58 kfree(state->crtcs);
cc4ceb48 59 kfree(state->planes);
b430c27a 60 kfree(state->private_objs);
cc4ceb48 61}
036ef573 62EXPORT_SYMBOL(drm_atomic_state_default_release);
cc4ceb48
DV
63
64/**
036ef573 65 * drm_atomic_state_init - init new atomic state
cc4ceb48 66 * @dev: DRM device
036ef573 67 * @state: atomic state
cc4ceb48 68 *
036ef573
ML
69 * Default implementation for filling in a new atomic state.
70 * This is useful for drivers that subclass the atomic state.
cc4ceb48 71 */
036ef573
ML
72int
73drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
cc4ceb48 74{
0853695c
CW
75 kref_init(&state->ref);
76
d34f20d6
RC
77 /* TODO legacy paths should maybe do a better job about
78 * setting this appropriately?
79 */
80 state->allow_modeset = true;
81
cc4ceb48
DV
82 state->crtcs = kcalloc(dev->mode_config.num_crtc,
83 sizeof(*state->crtcs), GFP_KERNEL);
84 if (!state->crtcs)
85 goto fail;
cc4ceb48
DV
86 state->planes = kcalloc(dev->mode_config.num_total_plane,
87 sizeof(*state->planes), GFP_KERNEL);
88 if (!state->planes)
89 goto fail;
cc4ceb48
DV
90
91 state->dev = dev;
92
036ef573 93 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
cc4ceb48 94
036ef573 95 return 0;
cc4ceb48 96fail:
036ef573
ML
97 drm_atomic_state_default_release(state);
98 return -ENOMEM;
99}
100EXPORT_SYMBOL(drm_atomic_state_init);
101
102/**
103 * drm_atomic_state_alloc - allocate atomic state
104 * @dev: DRM device
105 *
106 * This allocates an empty atomic state to track updates.
107 */
108struct drm_atomic_state *
109drm_atomic_state_alloc(struct drm_device *dev)
110{
111 struct drm_mode_config *config = &dev->mode_config;
112 struct drm_atomic_state *state;
113
114 if (!config->funcs->atomic_state_alloc) {
115 state = kzalloc(sizeof(*state), GFP_KERNEL);
116 if (!state)
117 return NULL;
118 if (drm_atomic_state_init(dev, state) < 0) {
119 kfree(state);
120 return NULL;
121 }
122 return state;
123 }
cc4ceb48 124
036ef573 125 return config->funcs->atomic_state_alloc(dev);
cc4ceb48
DV
126}
127EXPORT_SYMBOL(drm_atomic_state_alloc);
128
129/**
036ef573 130 * drm_atomic_state_default_clear - clear base atomic state
cc4ceb48
DV
131 * @state: atomic state
132 *
036ef573
ML
133 * Default implementation for clearing atomic state.
134 * This is useful for drivers that subclass the atomic state.
cc4ceb48 135 */
036ef573 136void drm_atomic_state_default_clear(struct drm_atomic_state *state)
cc4ceb48
DV
137{
138 struct drm_device *dev = state->dev;
6f75cea6 139 struct drm_mode_config *config = &dev->mode_config;
cc4ceb48
DV
140 int i;
141
17a38d9c 142 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
cc4ceb48 143
f52b69f1 144 for (i = 0; i < state->num_connector; i++) {
63e83c1d 145 struct drm_connector *connector = state->connectors[i].ptr;
cc4ceb48
DV
146
147 if (!connector)
148 continue;
149
d2307dea 150 connector->funcs->atomic_destroy_state(connector,
63e83c1d
DV
151 state->connectors[i].state);
152 state->connectors[i].ptr = NULL;
153 state->connectors[i].state = NULL;
ad093607 154 drm_connector_put(connector);
cc4ceb48
DV
155 }
156
6f75cea6 157 for (i = 0; i < config->num_crtc; i++) {
5d943aa6 158 struct drm_crtc *crtc = state->crtcs[i].ptr;
cc4ceb48
DV
159
160 if (!crtc)
161 continue;
162
163 crtc->funcs->atomic_destroy_state(crtc,
5d943aa6 164 state->crtcs[i].state);
3b24f7d6
DV
165
166 if (state->crtcs[i].commit) {
167 kfree(state->crtcs[i].commit->event);
168 state->crtcs[i].commit->event = NULL;
169 drm_crtc_commit_put(state->crtcs[i].commit);
170 }
171
172 state->crtcs[i].commit = NULL;
5d943aa6
DV
173 state->crtcs[i].ptr = NULL;
174 state->crtcs[i].state = NULL;
cc4ceb48
DV
175 }
176
6f75cea6 177 for (i = 0; i < config->num_total_plane; i++) {
b8b5342b 178 struct drm_plane *plane = state->planes[i].ptr;
cc4ceb48
DV
179
180 if (!plane)
181 continue;
182
183 plane->funcs->atomic_destroy_state(plane,
b8b5342b
DV
184 state->planes[i].state);
185 state->planes[i].ptr = NULL;
186 state->planes[i].state = NULL;
cc4ceb48 187 }
b430c27a
PD
188
189 for (i = 0; i < state->num_private_objs; i++) {
190 void *obj_state = state->private_objs[i].obj_state;
191
192 state->private_objs[i].funcs->destroy_state(obj_state);
193 state->private_objs[i].obj = NULL;
194 state->private_objs[i].obj_state = NULL;
195 state->private_objs[i].funcs = NULL;
196 }
197 state->num_private_objs = 0;
198
cc4ceb48 199}
036ef573
ML
200EXPORT_SYMBOL(drm_atomic_state_default_clear);
201
202/**
203 * drm_atomic_state_clear - clear state object
204 * @state: atomic state
205 *
206 * When the w/w mutex algorithm detects a deadlock we need to back off and drop
207 * all locks. So someone else could sneak in and change the current modeset
208 * configuration. Which means that all the state assembled in @state is no
209 * longer an atomic update to the current state, but to some arbitrary earlier
d574528a
DV
210 * state. Which could break assumptions the driver's
211 * &drm_mode_config_funcs.atomic_check likely relies on.
036ef573
ML
212 *
213 * Hence we must clear all cached state and completely start over, using this
214 * function.
215 */
216void drm_atomic_state_clear(struct drm_atomic_state *state)
217{
218 struct drm_device *dev = state->dev;
219 struct drm_mode_config *config = &dev->mode_config;
220
221 if (config->funcs->atomic_state_clear)
222 config->funcs->atomic_state_clear(state);
223 else
224 drm_atomic_state_default_clear(state);
225}
cc4ceb48
DV
226EXPORT_SYMBOL(drm_atomic_state_clear);
227
228/**
0853695c
CW
229 * __drm_atomic_state_free - free all memory for an atomic state
230 * @ref: This atomic state to deallocate
cc4ceb48
DV
231 *
232 * This frees all memory associated with an atomic state, including all the
233 * per-object state for planes, crtcs and connectors.
234 */
0853695c 235void __drm_atomic_state_free(struct kref *ref)
cc4ceb48 236{
0853695c
CW
237 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
238 struct drm_mode_config *config = &state->dev->mode_config;
036ef573 239
cc4ceb48
DV
240 drm_atomic_state_clear(state);
241
17a38d9c 242 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
cc4ceb48 243
036ef573
ML
244 if (config->funcs->atomic_state_free) {
245 config->funcs->atomic_state_free(state);
246 } else {
247 drm_atomic_state_default_release(state);
248 kfree(state);
249 }
cc4ceb48 250}
0853695c 251EXPORT_SYMBOL(__drm_atomic_state_free);
cc4ceb48
DV
252
253/**
254 * drm_atomic_get_crtc_state - get crtc state
255 * @state: global atomic state object
256 * @crtc: crtc to get state object for
257 *
258 * This function returns the crtc state for the given crtc, allocating it if
259 * needed. It will also grab the relevant crtc lock to make sure that the state
260 * is consistent.
261 *
262 * Returns:
263 *
264 * Either the allocated state or the error code encoded into the pointer. When
265 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
266 * entire atomic sequence must be restarted. All other errors are fatal.
267 */
268struct drm_crtc_state *
269drm_atomic_get_crtc_state(struct drm_atomic_state *state,
270 struct drm_crtc *crtc)
271{
1b26a5e1 272 int ret, index = drm_crtc_index(crtc);
cc4ceb48
DV
273 struct drm_crtc_state *crtc_state;
274
7f4eaa89
ML
275 WARN_ON(!state->acquire_ctx);
276
1b26a5e1
ML
277 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
278 if (crtc_state)
279 return crtc_state;
cc4ceb48
DV
280
281 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
282 if (ret)
283 return ERR_PTR(ret);
284
285 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
286 if (!crtc_state)
287 return ERR_PTR(-ENOMEM);
288
5d943aa6 289 state->crtcs[index].state = crtc_state;
581e49fe
ML
290 state->crtcs[index].old_state = crtc->state;
291 state->crtcs[index].new_state = crtc_state;
5d943aa6 292 state->crtcs[index].ptr = crtc;
cc4ceb48
DV
293 crtc_state->state = state;
294
fa3ab4c2
VS
295 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
296 crtc->base.id, crtc->name, crtc_state, state);
cc4ceb48
DV
297
298 return crtc_state;
299}
300EXPORT_SYMBOL(drm_atomic_get_crtc_state);
301
beaf5af4 302static void set_out_fence_for_crtc(struct drm_atomic_state *state,
7e9081c5 303 struct drm_crtc *crtc, s32 __user *fence_ptr)
beaf5af4
GP
304{
305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
306}
307
7e9081c5 308static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
beaf5af4
GP
309 struct drm_crtc *crtc)
310{
7e9081c5 311 s32 __user *fence_ptr;
beaf5af4
GP
312
313 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
314 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
315
316 return fence_ptr;
317}
318
819364da
DS
319/**
320 * drm_atomic_set_mode_for_crtc - set mode for CRTC
321 * @state: the CRTC whose incoming state to update
322 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
323 *
cbef9099
DP
324 * Set a mode (originating from the kernel) on the desired CRTC state and update
325 * the enable property.
819364da
DS
326 *
327 * RETURNS:
328 * Zero on success, error code on failure. Cannot return -EDEADLK.
329 */
330int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
331 struct drm_display_mode *mode)
332{
99cf4a29
DS
333 struct drm_mode_modeinfo umode;
334
819364da
DS
335 /* Early return for no change. */
336 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
337 return 0;
338
6472e509 339 drm_property_blob_put(state->mode_blob);
99cf4a29
DS
340 state->mode_blob = NULL;
341
819364da 342 if (mode) {
99cf4a29
DS
343 drm_mode_convert_to_umode(&umode, mode);
344 state->mode_blob =
345 drm_property_create_blob(state->crtc->dev,
346 sizeof(umode),
347 &umode);
348 if (IS_ERR(state->mode_blob))
349 return PTR_ERR(state->mode_blob);
350
819364da
DS
351 drm_mode_copy(&state->mode, mode);
352 state->enable = true;
353 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
354 mode->name, state);
355 } else {
356 memset(&state->mode, 0, sizeof(state->mode));
357 state->enable = false;
358 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
359 state);
360 }
361
362 return 0;
363}
364EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
365
955f3c33
DS
366/**
367 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
368 * @state: the CRTC whose incoming state to update
369 * @blob: pointer to blob property to use for mode
370 *
371 * Set a mode (originating from a blob property) on the desired CRTC state.
372 * This function will take a reference on the blob property for the CRTC state,
373 * and release the reference held on the state's existing mode property, if any
374 * was set.
375 *
376 * RETURNS:
377 * Zero on success, error code on failure. Cannot return -EDEADLK.
378 */
379int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
380 struct drm_property_blob *blob)
381{
382 if (blob == state->mode_blob)
383 return 0;
384
6472e509 385 drm_property_blob_put(state->mode_blob);
955f3c33
DS
386 state->mode_blob = NULL;
387
6709887c
TV
388 memset(&state->mode, 0, sizeof(state->mode));
389
955f3c33
DS
390 if (blob) {
391 if (blob->length != sizeof(struct drm_mode_modeinfo) ||
392 drm_mode_convert_umode(&state->mode,
393 (const struct drm_mode_modeinfo *)
394 blob->data))
395 return -EINVAL;
396
6472e509 397 state->mode_blob = drm_property_blob_get(blob);
955f3c33
DS
398 state->enable = true;
399 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
400 state->mode.name, state);
401 } else {
955f3c33
DS
402 state->enable = false;
403 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
404 state);
405 }
406
407 return 0;
408}
409EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
410
5488dc16
LL
411/**
412 * drm_atomic_replace_property_blob - replace a blob property
413 * @blob: a pointer to the member blob to be replaced
414 * @new_blob: the new blob to replace with
5488dc16
LL
415 * @replaced: whether the blob has been replaced
416 *
417 * RETURNS:
418 * Zero on success, error code on failure
419 */
420static void
421drm_atomic_replace_property_blob(struct drm_property_blob **blob,
422 struct drm_property_blob *new_blob,
423 bool *replaced)
424{
425 struct drm_property_blob *old_blob = *blob;
426
427 if (old_blob == new_blob)
428 return;
429
6472e509 430 drm_property_blob_put(old_blob);
5488dc16 431 if (new_blob)
6472e509 432 drm_property_blob_get(new_blob);
5488dc16
LL
433 *blob = new_blob;
434 *replaced = true;
435
436 return;
437}
438
439static int
dafee60d 440drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
5488dc16
LL
441 struct drm_property_blob **blob,
442 uint64_t blob_id,
443 ssize_t expected_size,
444 bool *replaced)
445{
5488dc16
LL
446 struct drm_property_blob *new_blob = NULL;
447
448 if (blob_id != 0) {
dafee60d 449 new_blob = drm_property_lookup_blob(dev, blob_id);
5488dc16
LL
450 if (new_blob == NULL)
451 return -EINVAL;
cac5fced
FM
452
453 if (expected_size > 0 && expected_size != new_blob->length) {
6472e509 454 drm_property_blob_put(new_blob);
5488dc16 455 return -EINVAL;
cac5fced 456 }
5488dc16
LL
457 }
458
459 drm_atomic_replace_property_blob(blob, new_blob, replaced);
6472e509 460 drm_property_blob_put(new_blob);
5488dc16
LL
461
462 return 0;
463}
464
40ecc694
RC
465/**
466 * drm_atomic_crtc_set_property - set property on CRTC
467 * @crtc: the drm CRTC to set a property on
468 * @state: the state object to update with the new property value
469 * @property: the property to set
470 * @val: the new property value
471 *
d574528a
DV
472 * This function handles generic/core properties and calls out to driver's
473 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
474 * consistent behavior you must call this function rather than the driver hook
475 * directly.
40ecc694
RC
476 *
477 * RETURNS:
478 * Zero on success, error code on failure
479 */
480int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
481 struct drm_crtc_state *state, struct drm_property *property,
482 uint64_t val)
483{
eab3bbef
DV
484 struct drm_device *dev = crtc->dev;
485 struct drm_mode_config *config = &dev->mode_config;
5488dc16 486 bool replaced = false;
955f3c33 487 int ret;
eab3bbef 488
27798365 489 if (property == config->prop_active)
eab3bbef 490 state->active = val;
955f3c33
DS
491 else if (property == config->prop_mode_id) {
492 struct drm_property_blob *mode =
493 drm_property_lookup_blob(dev, val);
494 ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
6472e509 495 drm_property_blob_put(mode);
955f3c33 496 return ret;
5488dc16 497 } else if (property == config->degamma_lut_property) {
dafee60d 498 ret = drm_atomic_replace_property_blob_from_id(dev,
5488dc16
LL
499 &state->degamma_lut,
500 val,
501 -1,
502 &replaced);
add1fa75 503 state->color_mgmt_changed |= replaced;
5488dc16
LL
504 return ret;
505 } else if (property == config->ctm_property) {
dafee60d 506 ret = drm_atomic_replace_property_blob_from_id(dev,
5488dc16
LL
507 &state->ctm,
508 val,
509 sizeof(struct drm_color_ctm),
510 &replaced);
add1fa75 511 state->color_mgmt_changed |= replaced;
5488dc16
LL
512 return ret;
513 } else if (property == config->gamma_lut_property) {
dafee60d 514 ret = drm_atomic_replace_property_blob_from_id(dev,
5488dc16
LL
515 &state->gamma_lut,
516 val,
517 -1,
518 &replaced);
add1fa75 519 state->color_mgmt_changed |= replaced;
5488dc16 520 return ret;
beaf5af4 521 } else if (property == config->prop_out_fence_ptr) {
7e9081c5 522 s32 __user *fence_ptr = u64_to_user_ptr(val);
beaf5af4
GP
523
524 if (!fence_ptr)
525 return 0;
526
527 if (put_user(-1, fence_ptr))
528 return -EFAULT;
529
530 set_out_fence_for_crtc(state->state, crtc, fence_ptr);
5488dc16 531 } else if (crtc->funcs->atomic_set_property)
40ecc694 532 return crtc->funcs->atomic_set_property(crtc, state, property, val);
27798365
DS
533 else
534 return -EINVAL;
535
536 return 0;
40ecc694
RC
537}
538EXPORT_SYMBOL(drm_atomic_crtc_set_property);
539
c0714fc9
DV
540/**
541 * drm_atomic_crtc_get_property - get property value from CRTC state
542 * @crtc: the drm CRTC to set a property on
543 * @state: the state object to get the property value from
544 * @property: the property to set
545 * @val: return location for the property value
546 *
d574528a
DV
547 * This function handles generic/core properties and calls out to driver's
548 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
549 * consistent behavior you must call this function rather than the driver hook
550 * directly.
c0714fc9
DV
551 *
552 * RETURNS:
553 * Zero on success, error code on failure
ac9c9256 554 */
bf22f3be
GT
555static int
556drm_atomic_crtc_get_property(struct drm_crtc *crtc,
ac9c9256
RC
557 const struct drm_crtc_state *state,
558 struct drm_property *property, uint64_t *val)
559{
8f164ce4
DS
560 struct drm_device *dev = crtc->dev;
561 struct drm_mode_config *config = &dev->mode_config;
562
563 if (property == config->prop_active)
564 *val = state->active;
955f3c33
DS
565 else if (property == config->prop_mode_id)
566 *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
5488dc16
LL
567 else if (property == config->degamma_lut_property)
568 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
569 else if (property == config->ctm_property)
570 *val = (state->ctm) ? state->ctm->base.id : 0;
571 else if (property == config->gamma_lut_property)
572 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
beaf5af4
GP
573 else if (property == config->prop_out_fence_ptr)
574 *val = 0;
8f164ce4 575 else if (crtc->funcs->atomic_get_property)
ac9c9256 576 return crtc->funcs->atomic_get_property(crtc, state, property, val);
8f164ce4
DS
577 else
578 return -EINVAL;
579
580 return 0;
ac9c9256 581}
ac9c9256 582
5e743737
RC
583/**
584 * drm_atomic_crtc_check - check crtc state
585 * @crtc: crtc to check
586 * @state: crtc state to check
587 *
588 * Provides core sanity checks for crtc state.
589 *
590 * RETURNS:
591 * Zero on success, error code on failure
592 */
593static int drm_atomic_crtc_check(struct drm_crtc *crtc,
594 struct drm_crtc_state *state)
595{
596 /* NOTE: we explicitly don't enforce constraints such as primary
597 * layer covering entire screen, since that is something we want
598 * to allow (on hw that supports it). For hw that does not, it
599 * should be checked in driver's crtc->atomic_check() vfunc.
600 *
601 * TODO: Add generic modeset state checks once we support those.
602 */
eab3bbef
DV
603
604 if (state->active && !state->enable) {
fa3ab4c2
VS
605 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
606 crtc->base.id, crtc->name);
eab3bbef
DV
607 return -EINVAL;
608 }
609
99cf4a29
DS
610 /* The state->enable vs. state->mode_blob checks can be WARN_ON,
611 * as this is a kernel-internal detail that userspace should never
612 * be able to trigger. */
613 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
614 WARN_ON(state->enable && !state->mode_blob)) {
fa3ab4c2
VS
615 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
616 crtc->base.id, crtc->name);
99cf4a29
DS
617 return -EINVAL;
618 }
619
620 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
621 WARN_ON(!state->enable && state->mode_blob)) {
fa3ab4c2
VS
622 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
623 crtc->base.id, crtc->name);
99cf4a29
DS
624 return -EINVAL;
625 }
626
4cba6850
DV
627 /*
628 * Reject event generation for when a CRTC is off and stays off.
629 * It wouldn't be hard to implement this, but userspace has a track
630 * record of happily burning through 100% cpu (or worse, crash) when the
631 * display pipe is suspended. To avoid all that fun just reject updates
632 * that ask for events since likely that indicates a bug in the
633 * compositor's drawing loop. This is consistent with the vblank IOCTL
634 * and legacy page_flip IOCTL which also reject service on a disabled
635 * pipe.
636 */
637 if (state->event && !state->active && !crtc->state->active) {
6ac7c548
RK
638 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
639 crtc->base.id, crtc->name);
4cba6850
DV
640 return -EINVAL;
641 }
642
5e743737
RC
643 return 0;
644}
645
fceffb32
RC
646static void drm_atomic_crtc_print_state(struct drm_printer *p,
647 const struct drm_crtc_state *state)
648{
649 struct drm_crtc *crtc = state->crtc;
650
651 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
652 drm_printf(p, "\tenable=%d\n", state->enable);
653 drm_printf(p, "\tactive=%d\n", state->active);
654 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
655 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
656 drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
657 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
658 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
659 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
660 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
661 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
662 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
663
664 if (crtc->funcs->atomic_print_state)
665 crtc->funcs->atomic_print_state(p, state);
666}
667
cc4ceb48
DV
668/**
669 * drm_atomic_get_plane_state - get plane state
670 * @state: global atomic state object
671 * @plane: plane to get state object for
672 *
673 * This function returns the plane state for the given plane, allocating it if
674 * needed. It will also grab the relevant plane lock to make sure that the state
675 * is consistent.
676 *
677 * Returns:
678 *
679 * Either the allocated state or the error code encoded into the pointer. When
680 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
681 * entire atomic sequence must be restarted. All other errors are fatal.
682 */
683struct drm_plane_state *
684drm_atomic_get_plane_state(struct drm_atomic_state *state,
685 struct drm_plane *plane)
686{
1b26a5e1 687 int ret, index = drm_plane_index(plane);
cc4ceb48
DV
688 struct drm_plane_state *plane_state;
689
7f4eaa89
ML
690 WARN_ON(!state->acquire_ctx);
691
1b26a5e1
ML
692 plane_state = drm_atomic_get_existing_plane_state(state, plane);
693 if (plane_state)
694 return plane_state;
cc4ceb48 695
4d02e2de 696 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
cc4ceb48
DV
697 if (ret)
698 return ERR_PTR(ret);
699
700 plane_state = plane->funcs->atomic_duplicate_state(plane);
701 if (!plane_state)
702 return ERR_PTR(-ENOMEM);
703
b8b5342b
DV
704 state->planes[index].state = plane_state;
705 state->planes[index].ptr = plane;
581e49fe
ML
706 state->planes[index].old_state = plane->state;
707 state->planes[index].new_state = plane_state;
cc4ceb48
DV
708 plane_state->state = state;
709
9f4c97a2
VS
710 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
711 plane->base.id, plane->name, plane_state, state);
cc4ceb48
DV
712
713 if (plane_state->crtc) {
714 struct drm_crtc_state *crtc_state;
715
716 crtc_state = drm_atomic_get_crtc_state(state,
717 plane_state->crtc);
718 if (IS_ERR(crtc_state))
719 return ERR_CAST(crtc_state);
720 }
721
722 return plane_state;
723}
724EXPORT_SYMBOL(drm_atomic_get_plane_state);
725
40ecc694
RC
726/**
727 * drm_atomic_plane_set_property - set property on plane
728 * @plane: the drm plane to set a property on
729 * @state: the state object to update with the new property value
730 * @property: the property to set
731 * @val: the new property value
732 *
d574528a
DV
733 * This function handles generic/core properties and calls out to driver's
734 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
735 * consistent behavior you must call this function rather than the driver hook
736 * directly.
40ecc694
RC
737 *
738 * RETURNS:
739 * Zero on success, error code on failure
740 */
741int drm_atomic_plane_set_property(struct drm_plane *plane,
742 struct drm_plane_state *state, struct drm_property *property,
743 uint64_t val)
744{
6b4959f4
RC
745 struct drm_device *dev = plane->dev;
746 struct drm_mode_config *config = &dev->mode_config;
747
748 if (property == config->prop_fb_id) {
749 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
750 drm_atomic_set_fb_for_plane(state, fb);
751 if (fb)
a4a69da0 752 drm_framebuffer_put(fb);
96260142
GP
753 } else if (property == config->prop_in_fence_fd) {
754 if (state->fence)
755 return -EINVAL;
756
757 if (U642I64(val) == -1)
758 return 0;
759
760 state->fence = sync_file_get_fence(val);
761 if (!state->fence)
762 return -EINVAL;
763
6b4959f4
RC
764 } else if (property == config->prop_crtc_id) {
765 struct drm_crtc *crtc = drm_crtc_find(dev, val);
766 return drm_atomic_set_crtc_for_plane(state, crtc);
767 } else if (property == config->prop_crtc_x) {
768 state->crtc_x = U642I64(val);
769 } else if (property == config->prop_crtc_y) {
770 state->crtc_y = U642I64(val);
771 } else if (property == config->prop_crtc_w) {
772 state->crtc_w = val;
773 } else if (property == config->prop_crtc_h) {
774 state->crtc_h = val;
775 } else if (property == config->prop_src_x) {
776 state->src_x = val;
777 } else if (property == config->prop_src_y) {
778 state->src_y = val;
779 } else if (property == config->prop_src_w) {
780 state->src_w = val;
781 } else if (property == config->prop_src_h) {
782 state->src_h = val;
6686df8c 783 } else if (property == plane->rotation_property) {
6e0c7c33
VS
784 if (!is_power_of_2(val & DRM_ROTATE_MASK))
785 return -EINVAL;
1da30627 786 state->rotation = val;
44d1240d
MS
787 } else if (property == plane->zpos_property) {
788 state->zpos = val;
6b4959f4
RC
789 } else if (plane->funcs->atomic_set_property) {
790 return plane->funcs->atomic_set_property(plane, state,
791 property, val);
792 } else {
793 return -EINVAL;
794 }
795
796 return 0;
40ecc694
RC
797}
798EXPORT_SYMBOL(drm_atomic_plane_set_property);
799
c0714fc9
DV
800/**
801 * drm_atomic_plane_get_property - get property value from plane state
802 * @plane: the drm plane to set a property on
803 * @state: the state object to get the property value from
804 * @property: the property to set
805 * @val: return location for the property value
806 *
d574528a
DV
807 * This function handles generic/core properties and calls out to driver's
808 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
809 * consistent behavior you must call this function rather than the driver hook
810 * directly.
c0714fc9
DV
811 *
812 * RETURNS:
813 * Zero on success, error code on failure
ac9c9256 814 */
a97df1cc
DV
815static int
816drm_atomic_plane_get_property(struct drm_plane *plane,
ac9c9256
RC
817 const struct drm_plane_state *state,
818 struct drm_property *property, uint64_t *val)
819{
6b4959f4
RC
820 struct drm_device *dev = plane->dev;
821 struct drm_mode_config *config = &dev->mode_config;
822
823 if (property == config->prop_fb_id) {
824 *val = (state->fb) ? state->fb->base.id : 0;
96260142
GP
825 } else if (property == config->prop_in_fence_fd) {
826 *val = -1;
6b4959f4
RC
827 } else if (property == config->prop_crtc_id) {
828 *val = (state->crtc) ? state->crtc->base.id : 0;
829 } else if (property == config->prop_crtc_x) {
830 *val = I642U64(state->crtc_x);
831 } else if (property == config->prop_crtc_y) {
832 *val = I642U64(state->crtc_y);
833 } else if (property == config->prop_crtc_w) {
834 *val = state->crtc_w;
835 } else if (property == config->prop_crtc_h) {
836 *val = state->crtc_h;
837 } else if (property == config->prop_src_x) {
838 *val = state->src_x;
839 } else if (property == config->prop_src_y) {
840 *val = state->src_y;
841 } else if (property == config->prop_src_w) {
842 *val = state->src_w;
843 } else if (property == config->prop_src_h) {
844 *val = state->src_h;
6686df8c 845 } else if (property == plane->rotation_property) {
4cda09ca 846 *val = state->rotation;
44d1240d
MS
847 } else if (property == plane->zpos_property) {
848 *val = state->zpos;
6b4959f4 849 } else if (plane->funcs->atomic_get_property) {
ac9c9256 850 return plane->funcs->atomic_get_property(plane, state, property, val);
6b4959f4
RC
851 } else {
852 return -EINVAL;
853 }
854
855 return 0;
ac9c9256 856}
ac9c9256 857
f8aeb41c
DV
858static bool
859plane_switching_crtc(struct drm_atomic_state *state,
860 struct drm_plane *plane,
861 struct drm_plane_state *plane_state)
862{
863 if (!plane->state->crtc || !plane_state->crtc)
864 return false;
865
866 if (plane->state->crtc == plane_state->crtc)
867 return false;
868
869 /* This could be refined, but currently there's no helper or driver code
870 * to implement direct switching of active planes nor userspace to take
871 * advantage of more direct plane switching without the intermediate
872 * full OFF state.
873 */
874 return true;
875}
876
5e743737
RC
877/**
878 * drm_atomic_plane_check - check plane state
879 * @plane: plane to check
880 * @state: plane state to check
881 *
882 * Provides core sanity checks for plane state.
883 *
884 * RETURNS:
885 * Zero on success, error code on failure
886 */
887static int drm_atomic_plane_check(struct drm_plane *plane,
888 struct drm_plane_state *state)
889{
890 unsigned int fb_width, fb_height;
ead8610d 891 int ret;
5e743737
RC
892
893 /* either *both* CRTC and FB must be set, or neither */
894 if (WARN_ON(state->crtc && !state->fb)) {
17a38d9c 895 DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
5e743737
RC
896 return -EINVAL;
897 } else if (WARN_ON(state->fb && !state->crtc)) {
17a38d9c 898 DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
5e743737
RC
899 return -EINVAL;
900 }
901
902 /* if disabled, we don't care about the rest of the state: */
903 if (!state->crtc)
904 return 0;
905
906 /* Check whether this plane is usable on this CRTC */
907 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
17a38d9c 908 DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
5e743737
RC
909 return -EINVAL;
910 }
911
912 /* Check whether this plane supports the fb pixel format. */
438b74a5 913 ret = drm_plane_check_pixel_format(plane, state->fb->format->format);
ead8610d 914 if (ret) {
b3c11ac2
EE
915 struct drm_format_name_buf format_name;
916 DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
438b74a5 917 drm_get_format_name(state->fb->format->format,
b3c11ac2 918 &format_name));
ead8610d 919 return ret;
5e743737
RC
920 }
921
922 /* Give drivers some help against integer overflows */
923 if (state->crtc_w > INT_MAX ||
924 state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
925 state->crtc_h > INT_MAX ||
926 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
17a38d9c
DV
927 DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
928 state->crtc_w, state->crtc_h,
929 state->crtc_x, state->crtc_y);
5e743737
RC
930 return -ERANGE;
931 }
932
933 fb_width = state->fb->width << 16;
934 fb_height = state->fb->height << 16;
935
936 /* Make sure source coordinates are inside the fb. */
937 if (state->src_w > fb_width ||
938 state->src_x > fb_width - state->src_w ||
939 state->src_h > fb_height ||
940 state->src_y > fb_height - state->src_h) {
17a38d9c
DV
941 DRM_DEBUG_ATOMIC("Invalid source coordinates "
942 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
943 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
944 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
945 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
946 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
5e743737
RC
947 return -ENOSPC;
948 }
949
f8aeb41c 950 if (plane_switching_crtc(state->state, plane, state)) {
9f4c97a2
VS
951 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
952 plane->base.id, plane->name);
f8aeb41c
DV
953 return -EINVAL;
954 }
955
5e743737
RC
956 return 0;
957}
958
fceffb32
RC
959static void drm_atomic_plane_print_state(struct drm_printer *p,
960 const struct drm_plane_state *state)
961{
962 struct drm_plane *plane = state->plane;
963 struct drm_rect src = drm_plane_state_src(state);
964 struct drm_rect dest = drm_plane_state_dest(state);
965
966 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
967 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
968 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
969 if (state->fb) {
970 struct drm_framebuffer *fb = state->fb;
bcb0b461 971 int i, n = fb->format->num_planes;
b3c11ac2 972 struct drm_format_name_buf format_name;
fceffb32
RC
973
974 drm_printf(p, "\t\tformat=%s\n",
438b74a5 975 drm_get_format_name(fb->format->format, &format_name));
bae781b2 976 drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
fceffb32
RC
977 drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
978 drm_printf(p, "\t\tlayers:\n");
979 for (i = 0; i < n; i++) {
980 drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
981 drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
fceffb32
RC
982 }
983 }
984 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
985 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
986 drm_printf(p, "\trotation=%x\n", state->rotation);
987
988 if (plane->funcs->atomic_print_state)
989 plane->funcs->atomic_print_state(p, state);
990}
991
b430c27a
PD
992/**
993 * drm_atomic_get_private_obj_state - get private object state
994 * @state: global atomic state
995 * @obj: private object to get the state for
996 * @funcs: pointer to the struct of function pointers that identify the object
997 * type
998 *
999 * This function returns the private object state for the given private object,
1000 * allocating the state if needed. It does not grab any locks as the caller is
1001 * expected to care of any required locking.
1002 *
1003 * RETURNS:
1004 *
1005 * Either the allocated state or the error code encoded into a pointer.
1006 */
1007void *
1008drm_atomic_get_private_obj_state(struct drm_atomic_state *state, void *obj,
1009 const struct drm_private_state_funcs *funcs)
1010{
1011 int index, num_objs, i;
1012 size_t size;
1013 struct __drm_private_objs_state *arr;
1014
1015 for (i = 0; i < state->num_private_objs; i++)
1016 if (obj == state->private_objs[i].obj &&
1017 state->private_objs[i].obj_state)
1018 return state->private_objs[i].obj_state;
1019
1020 num_objs = state->num_private_objs + 1;
1021 size = sizeof(*state->private_objs) * num_objs;
1022 arr = krealloc(state->private_objs, size, GFP_KERNEL);
1023 if (!arr)
1024 return ERR_PTR(-ENOMEM);
1025
1026 state->private_objs = arr;
1027 index = state->num_private_objs;
1028 memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
1029
1030 state->private_objs[index].obj_state = funcs->duplicate_state(state, obj);
1031 if (!state->private_objs[index].obj_state)
1032 return ERR_PTR(-ENOMEM);
1033
1034 state->private_objs[index].obj = obj;
1035 state->private_objs[index].funcs = funcs;
1036 state->num_private_objs = num_objs;
1037
1038 DRM_DEBUG_ATOMIC("Added new private object state %p to %p\n",
1039 state->private_objs[index].obj_state, state);
1040
1041 return state->private_objs[index].obj_state;
1042}
1043EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
1044
cc4ceb48
DV
1045/**
1046 * drm_atomic_get_connector_state - get connector state
1047 * @state: global atomic state object
1048 * @connector: connector to get state object for
1049 *
1050 * This function returns the connector state for the given connector,
1051 * allocating it if needed. It will also grab the relevant connector lock to
1052 * make sure that the state is consistent.
1053 *
1054 * Returns:
1055 *
1056 * Either the allocated state or the error code encoded into the pointer. When
1057 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
1058 * entire atomic sequence must be restarted. All other errors are fatal.
1059 */
1060struct drm_connector_state *
1061drm_atomic_get_connector_state(struct drm_atomic_state *state,
1062 struct drm_connector *connector)
1063{
1064 int ret, index;
1065 struct drm_mode_config *config = &connector->dev->mode_config;
1066 struct drm_connector_state *connector_state;
1067
7f4eaa89
ML
1068 WARN_ON(!state->acquire_ctx);
1069
c7eb76f4
DV
1070 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
1071 if (ret)
1072 return ERR_PTR(ret);
1073
cc4ceb48
DV
1074 index = drm_connector_index(connector);
1075
f52b69f1 1076 if (index >= state->num_connector) {
63e83c1d 1077 struct __drm_connnectors_state *c;
5fff80bb
ML
1078 int alloc = max(index + 1, config->num_connector);
1079
1080 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
1081 if (!c)
1082 return ERR_PTR(-ENOMEM);
1083
1084 state->connectors = c;
1085 memset(&state->connectors[state->num_connector], 0,
1086 sizeof(*state->connectors) * (alloc - state->num_connector));
1087
5fff80bb 1088 state->num_connector = alloc;
f52b69f1
DV
1089 }
1090
63e83c1d
DV
1091 if (state->connectors[index].state)
1092 return state->connectors[index].state;
cc4ceb48 1093
cc4ceb48
DV
1094 connector_state = connector->funcs->atomic_duplicate_state(connector);
1095 if (!connector_state)
1096 return ERR_PTR(-ENOMEM);
1097
ad093607 1098 drm_connector_get(connector);
63e83c1d 1099 state->connectors[index].state = connector_state;
581e49fe
ML
1100 state->connectors[index].old_state = connector->state;
1101 state->connectors[index].new_state = connector_state;
63e83c1d 1102 state->connectors[index].ptr = connector;
cc4ceb48
DV
1103 connector_state->state = state;
1104
6ac7c548
RK
1105 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
1106 connector->base.id, connector->name,
1107 connector_state, state);
cc4ceb48
DV
1108
1109 if (connector_state->crtc) {
1110 struct drm_crtc_state *crtc_state;
1111
1112 crtc_state = drm_atomic_get_crtc_state(state,
1113 connector_state->crtc);
1114 if (IS_ERR(crtc_state))
1115 return ERR_CAST(crtc_state);
1116 }
1117
1118 return connector_state;
1119}
1120EXPORT_SYMBOL(drm_atomic_get_connector_state);
1121
40ecc694
RC
1122/**
1123 * drm_atomic_connector_set_property - set property on connector.
1124 * @connector: the drm connector to set a property on
1125 * @state: the state object to update with the new property value
1126 * @property: the property to set
1127 * @val: the new property value
1128 *
d574528a
DV
1129 * This function handles generic/core properties and calls out to driver's
1130 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
1131 * consistent behavior you must call this function rather than the driver hook
1132 * directly.
40ecc694
RC
1133 *
1134 * RETURNS:
1135 * Zero on success, error code on failure
1136 */
1137int drm_atomic_connector_set_property(struct drm_connector *connector,
1138 struct drm_connector_state *state, struct drm_property *property,
1139 uint64_t val)
1140{
1141 struct drm_device *dev = connector->dev;
1142 struct drm_mode_config *config = &dev->mode_config;
1143
ae16c597
RC
1144 if (property == config->prop_crtc_id) {
1145 struct drm_crtc *crtc = drm_crtc_find(dev, val);
1146 return drm_atomic_set_crtc_for_connector(state, crtc);
1147 } else if (property == config->dpms_property) {
40ecc694
RC
1148 /* setting DPMS property requires special handling, which
1149 * is done in legacy setprop path for us. Disallow (for
1150 * now?) atomic writes to DPMS property:
1151 */
1152 return -EINVAL;
299a16b1
BB
1153 } else if (property == config->tv_select_subconnector_property) {
1154 state->tv.subconnector = val;
1155 } else if (property == config->tv_left_margin_property) {
1156 state->tv.margins.left = val;
1157 } else if (property == config->tv_right_margin_property) {
1158 state->tv.margins.right = val;
1159 } else if (property == config->tv_top_margin_property) {
1160 state->tv.margins.top = val;
1161 } else if (property == config->tv_bottom_margin_property) {
1162 state->tv.margins.bottom = val;
1163 } else if (property == config->tv_mode_property) {
1164 state->tv.mode = val;
1165 } else if (property == config->tv_brightness_property) {
1166 state->tv.brightness = val;
1167 } else if (property == config->tv_contrast_property) {
1168 state->tv.contrast = val;
1169 } else if (property == config->tv_flicker_reduction_property) {
1170 state->tv.flicker_reduction = val;
1171 } else if (property == config->tv_overscan_property) {
1172 state->tv.overscan = val;
1173 } else if (property == config->tv_saturation_property) {
1174 state->tv.saturation = val;
1175 } else if (property == config->tv_hue_property) {
1176 state->tv.hue = val;
40ee6fbe
MN
1177 } else if (property == config->link_status_property) {
1178 /* Never downgrade from GOOD to BAD on userspace's request here,
1179 * only hw issues can do that.
1180 *
1181 * For an atomic property the userspace doesn't need to be able
1182 * to understand all the properties, but needs to be able to
1183 * restore the state it wants on VT switch. So if the userspace
1184 * tries to change the link_status from GOOD to BAD, driver
1185 * silently rejects it and returns a 0. This prevents userspace
1186 * from accidently breaking the display when it restores the
1187 * state.
1188 */
1189 if (state->link_status != DRM_LINK_STATUS_GOOD)
1190 state->link_status = val;
40ecc694
RC
1191 } else if (connector->funcs->atomic_set_property) {
1192 return connector->funcs->atomic_set_property(connector,
1193 state, property, val);
1194 } else {
1195 return -EINVAL;
1196 }
299a16b1
BB
1197
1198 return 0;
40ecc694
RC
1199}
1200EXPORT_SYMBOL(drm_atomic_connector_set_property);
1201
fceffb32
RC
1202static void drm_atomic_connector_print_state(struct drm_printer *p,
1203 const struct drm_connector_state *state)
1204{
1205 struct drm_connector *connector = state->connector;
1206
1207 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
1208 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
1209
1210 if (connector->funcs->atomic_print_state)
1211 connector->funcs->atomic_print_state(p, state);
1212}
1213
c0714fc9
DV
1214/**
1215 * drm_atomic_connector_get_property - get property value from connector state
1216 * @connector: the drm connector to set a property on
1217 * @state: the state object to get the property value from
1218 * @property: the property to set
1219 * @val: return location for the property value
1220 *
d574528a
DV
1221 * This function handles generic/core properties and calls out to driver's
1222 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
1223 * consistent behavior you must call this function rather than the driver hook
1224 * directly.
c0714fc9
DV
1225 *
1226 * RETURNS:
1227 * Zero on success, error code on failure
ac9c9256 1228 */
a97df1cc
DV
1229static int
1230drm_atomic_connector_get_property(struct drm_connector *connector,
ac9c9256
RC
1231 const struct drm_connector_state *state,
1232 struct drm_property *property, uint64_t *val)
1233{
1234 struct drm_device *dev = connector->dev;
1235 struct drm_mode_config *config = &dev->mode_config;
1236
ae16c597
RC
1237 if (property == config->prop_crtc_id) {
1238 *val = (state->crtc) ? state->crtc->base.id : 0;
1239 } else if (property == config->dpms_property) {
ac9c9256 1240 *val = connector->dpms;
299a16b1
BB
1241 } else if (property == config->tv_select_subconnector_property) {
1242 *val = state->tv.subconnector;
1243 } else if (property == config->tv_left_margin_property) {
1244 *val = state->tv.margins.left;
1245 } else if (property == config->tv_right_margin_property) {
1246 *val = state->tv.margins.right;
1247 } else if (property == config->tv_top_margin_property) {
1248 *val = state->tv.margins.top;
1249 } else if (property == config->tv_bottom_margin_property) {
1250 *val = state->tv.margins.bottom;
1251 } else if (property == config->tv_mode_property) {
1252 *val = state->tv.mode;
1253 } else if (property == config->tv_brightness_property) {
1254 *val = state->tv.brightness;
1255 } else if (property == config->tv_contrast_property) {
1256 *val = state->tv.contrast;
1257 } else if (property == config->tv_flicker_reduction_property) {
1258 *val = state->tv.flicker_reduction;
1259 } else if (property == config->tv_overscan_property) {
1260 *val = state->tv.overscan;
1261 } else if (property == config->tv_saturation_property) {
1262 *val = state->tv.saturation;
1263 } else if (property == config->tv_hue_property) {
1264 *val = state->tv.hue;
40ee6fbe
MN
1265 } else if (property == config->link_status_property) {
1266 *val = state->link_status;
ac9c9256
RC
1267 } else if (connector->funcs->atomic_get_property) {
1268 return connector->funcs->atomic_get_property(connector,
1269 state, property, val);
1270 } else {
1271 return -EINVAL;
1272 }
1273
1274 return 0;
1275}
ac9c9256 1276
88a48e29
RC
1277int drm_atomic_get_property(struct drm_mode_object *obj,
1278 struct drm_property *property, uint64_t *val)
1279{
1280 struct drm_device *dev = property->dev;
1281 int ret;
1282
1283 switch (obj->type) {
1284 case DRM_MODE_OBJECT_CONNECTOR: {
1285 struct drm_connector *connector = obj_to_connector(obj);
1286 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1287 ret = drm_atomic_connector_get_property(connector,
1288 connector->state, property, val);
1289 break;
1290 }
1291 case DRM_MODE_OBJECT_CRTC: {
1292 struct drm_crtc *crtc = obj_to_crtc(obj);
1293 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1294 ret = drm_atomic_crtc_get_property(crtc,
1295 crtc->state, property, val);
1296 break;
1297 }
1298 case DRM_MODE_OBJECT_PLANE: {
1299 struct drm_plane *plane = obj_to_plane(obj);
1300 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1301 ret = drm_atomic_plane_get_property(plane,
1302 plane->state, property, val);
1303 break;
1304 }
1305 default:
1306 ret = -EINVAL;
1307 break;
1308 }
1309
1310 return ret;
1311}
1312
cc4ceb48
DV
1313/**
1314 * drm_atomic_set_crtc_for_plane - set crtc for plane
07cc0ef6 1315 * @plane_state: the plane whose incoming state to update
cc4ceb48
DV
1316 * @crtc: crtc to use for the plane
1317 *
1318 * Changing the assigned crtc for a plane requires us to grab the lock and state
1319 * for the new crtc, as needed. This function takes care of all these details
1320 * besides updating the pointer in the state object itself.
1321 *
1322 * Returns:
1323 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1324 * then the w/w mutex code has detected a deadlock and the entire atomic
1325 * sequence must be restarted. All other errors are fatal.
1326 */
1327int
07cc0ef6
DV
1328drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
1329 struct drm_crtc *crtc)
cc4ceb48 1330{
07cc0ef6 1331 struct drm_plane *plane = plane_state->plane;
cc4ceb48
DV
1332 struct drm_crtc_state *crtc_state;
1333
6ddd388a
RC
1334 if (plane_state->crtc) {
1335 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1336 plane_state->crtc);
1337 if (WARN_ON(IS_ERR(crtc_state)))
1338 return PTR_ERR(crtc_state);
1339
1340 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
1341 }
1342
1343 plane_state->crtc = crtc;
1344
cc4ceb48
DV
1345 if (crtc) {
1346 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
1347 crtc);
1348 if (IS_ERR(crtc_state))
1349 return PTR_ERR(crtc_state);
6ddd388a 1350 crtc_state->plane_mask |= (1 << drm_plane_index(plane));
cc4ceb48
DV
1351 }
1352
cc4ceb48 1353 if (crtc)
fa3ab4c2
VS
1354 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
1355 plane_state, crtc->base.id, crtc->name);
cc4ceb48 1356 else
17a38d9c
DV
1357 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
1358 plane_state);
cc4ceb48
DV
1359
1360 return 0;
1361}
1362EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
1363
321ebf04 1364/**
16d78bc2 1365 * drm_atomic_set_fb_for_plane - set framebuffer for plane
321ebf04
DV
1366 * @plane_state: atomic state object for the plane
1367 * @fb: fb to use for the plane
1368 *
1369 * Changing the assigned framebuffer for a plane requires us to grab a reference
1370 * to the new fb and drop the reference to the old fb, if there is one. This
1371 * function takes care of all these details besides updating the pointer in the
1372 * state object itself.
1373 */
1374void
1375drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
1376 struct drm_framebuffer *fb)
1377{
321ebf04 1378 if (fb)
17a38d9c
DV
1379 DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
1380 fb->base.id, plane_state);
321ebf04 1381 else
17a38d9c
DV
1382 DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
1383 plane_state);
389f78b3
CW
1384
1385 drm_framebuffer_assign(&plane_state->fb, fb);
321ebf04
DV
1386}
1387EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
1388
13b55664
GP
1389/**
1390 * drm_atomic_set_fence_for_plane - set fence for plane
1391 * @plane_state: atomic state object for the plane
1392 * @fence: dma_fence to use for the plane
1393 *
1394 * Helper to setup the plane_state fence in case it is not set yet.
1395 * By using this drivers doesn't need to worry if the user choose
1396 * implicit or explicit fencing.
1397 *
1398 * This function will not set the fence to the state if it was set
d574528a
DV
1399 * via explicit fencing interfaces on the atomic ioctl. In that case it will
1400 * drop the reference to the fence as we are not storing it anywhere.
1401 * Otherwise, if &drm_plane_state.fence is not set this function we just set it
1402 * with the received implicit fence. In both cases this function consumes a
1403 * reference for @fence.
13b55664
GP
1404 */
1405void
1406drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
1407 struct dma_fence *fence)
1408{
1409 if (plane_state->fence) {
1410 dma_fence_put(fence);
1411 return;
1412 }
1413
1414 plane_state->fence = fence;
1415}
1416EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
1417
cc4ceb48
DV
1418/**
1419 * drm_atomic_set_crtc_for_connector - set crtc for connector
1420 * @conn_state: atomic state object for the connector
1421 * @crtc: crtc to use for the connector
1422 *
1423 * Changing the assigned crtc for a connector requires us to grab the lock and
1424 * state for the new crtc, as needed. This function takes care of all these
1425 * details besides updating the pointer in the state object itself.
1426 *
1427 * Returns:
1428 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1429 * then the w/w mutex code has detected a deadlock and the entire atomic
1430 * sequence must be restarted. All other errors are fatal.
1431 */
1432int
1433drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
1434 struct drm_crtc *crtc)
1435{
1436 struct drm_crtc_state *crtc_state;
1437
e2d800a3
CW
1438 if (conn_state->crtc == crtc)
1439 return 0;
1440
1441 if (conn_state->crtc) {
b4d93679
ML
1442 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
1443 conn_state->crtc);
4cd9fa52
ML
1444
1445 crtc_state->connector_mask &=
1446 ~(1 << drm_connector_index(conn_state->connector));
e2d800a3 1447
ad093607 1448 drm_connector_put(conn_state->connector);
e2d800a3 1449 conn_state->crtc = NULL;
4cd9fa52
ML
1450 }
1451
cc4ceb48
DV
1452 if (crtc) {
1453 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
1454 if (IS_ERR(crtc_state))
1455 return PTR_ERR(crtc_state);
4cd9fa52
ML
1456
1457 crtc_state->connector_mask |=
1458 1 << drm_connector_index(conn_state->connector);
cc4ceb48 1459
ad093607 1460 drm_connector_get(conn_state->connector);
e2d800a3 1461 conn_state->crtc = crtc;
cc4ceb48 1462
fa3ab4c2
VS
1463 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
1464 conn_state, crtc->base.id, crtc->name);
e2d800a3 1465 } else {
17a38d9c
DV
1466 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
1467 conn_state);
e2d800a3 1468 }
cc4ceb48
DV
1469
1470 return 0;
1471}
1472EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
1473
1474/**
1475 * drm_atomic_add_affected_connectors - add connectors for crtc
1476 * @state: atomic state
1477 * @crtc: DRM crtc
1478 *
1479 * This function walks the current configuration and adds all connectors
1480 * currently using @crtc to the atomic configuration @state. Note that this
1481 * function must acquire the connection mutex. This can potentially cause
1482 * unneeded seralization if the update is just for the planes on one crtc. Hence
1483 * drivers and helpers should only call this when really needed (e.g. when a
1484 * full modeset needs to happen due to some change).
1485 *
1486 * Returns:
1487 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1488 * then the w/w mutex code has detected a deadlock and the entire atomic
1489 * sequence must be restarted. All other errors are fatal.
1490 */
1491int
1492drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
1493 struct drm_crtc *crtc)
1494{
1495 struct drm_mode_config *config = &state->dev->mode_config;
1496 struct drm_connector *connector;
1497 struct drm_connector_state *conn_state;
613051da 1498 struct drm_connector_list_iter conn_iter;
5351bbdd 1499 struct drm_crtc_state *crtc_state;
cc4ceb48
DV
1500 int ret;
1501
5351bbdd
ML
1502 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1503 if (IS_ERR(crtc_state))
1504 return PTR_ERR(crtc_state);
1505
cc4ceb48
DV
1506 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
1507 if (ret)
1508 return ret;
1509
fa3ab4c2
VS
1510 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
1511 crtc->base.id, crtc->name, state);
cc4ceb48
DV
1512
1513 /*
5351bbdd
ML
1514 * Changed connectors are already in @state, so only need to look
1515 * at the connector_mask in crtc_state.
cc4ceb48 1516 */
b982dab1 1517 drm_connector_list_iter_begin(state->dev, &conn_iter);
613051da 1518 drm_for_each_connector_iter(connector, &conn_iter) {
5351bbdd 1519 if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
cc4ceb48
DV
1520 continue;
1521
1522 conn_state = drm_atomic_get_connector_state(state, connector);
613051da 1523 if (IS_ERR(conn_state)) {
b982dab1 1524 drm_connector_list_iter_end(&conn_iter);
cc4ceb48 1525 return PTR_ERR(conn_state);
613051da 1526 }
cc4ceb48 1527 }
b982dab1 1528 drm_connector_list_iter_end(&conn_iter);
cc4ceb48
DV
1529
1530 return 0;
1531}
1532EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
1533
e01e9f75
ML
1534/**
1535 * drm_atomic_add_affected_planes - add planes for crtc
1536 * @state: atomic state
1537 * @crtc: DRM crtc
1538 *
1539 * This function walks the current configuration and adds all planes
1540 * currently used by @crtc to the atomic configuration @state. This is useful
1541 * when an atomic commit also needs to check all currently enabled plane on
1542 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
1543 * to avoid special code to force-enable all planes.
1544 *
1545 * Since acquiring a plane state will always also acquire the w/w mutex of the
1546 * current CRTC for that plane (if there is any) adding all the plane states for
1547 * a CRTC will not reduce parallism of atomic updates.
1548 *
1549 * Returns:
1550 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
1551 * then the w/w mutex code has detected a deadlock and the entire atomic
1552 * sequence must be restarted. All other errors are fatal.
1553 */
1554int
1555drm_atomic_add_affected_planes(struct drm_atomic_state *state,
1556 struct drm_crtc *crtc)
1557{
1558 struct drm_plane *plane;
1559
b4d93679 1560 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
e01e9f75
ML
1561
1562 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
1563 struct drm_plane_state *plane_state =
1564 drm_atomic_get_plane_state(state, plane);
1565
1566 if (IS_ERR(plane_state))
1567 return PTR_ERR(plane_state);
1568 }
1569 return 0;
1570}
1571EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1572
cc4ceb48
DV
1573/**
1574 * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
1575 * @state: atomic state
1576 *
1577 * This function should be used by legacy entry points which don't understand
1578 * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
16d78bc2 1579 * the slowpath completed.
cc4ceb48
DV
1580 */
1581void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1582{
81e257e9 1583 struct drm_device *dev = state->dev;
cc4ceb48 1584 int ret;
81e257e9
ML
1585 bool global = false;
1586
81e257e9
ML
1587 if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
1588 global = true;
1589
1590 dev->mode_config.acquire_ctx = NULL;
1591 }
cc4ceb48
DV
1592
1593retry:
1594 drm_modeset_backoff(state->acquire_ctx);
1595
81e257e9 1596 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
cc4ceb48
DV
1597 if (ret)
1598 goto retry;
81e257e9 1599
81e257e9
ML
1600 if (global)
1601 dev->mode_config.acquire_ctx = state->acquire_ctx;
cc4ceb48
DV
1602}
1603EXPORT_SYMBOL(drm_atomic_legacy_backoff);
1604
1605/**
1606 * drm_atomic_check_only - check whether a given config would work
1607 * @state: atomic configuration to check
1608 *
1609 * Note that this function can return -EDEADLK if the driver needed to acquire
1610 * more locks but encountered a deadlock. The caller must then do the usual w/w
1611 * backoff dance and restart. All other errors are fatal.
1612 *
1613 * Returns:
1614 * 0 on success, negative error code on failure.
1615 */
1616int drm_atomic_check_only(struct drm_atomic_state *state)
1617{
5e743737
RC
1618 struct drm_device *dev = state->dev;
1619 struct drm_mode_config *config = &dev->mode_config;
df63b999
ACO
1620 struct drm_plane *plane;
1621 struct drm_plane_state *plane_state;
1622 struct drm_crtc *crtc;
1623 struct drm_crtc_state *crtc_state;
5e743737 1624 int i, ret = 0;
cc4ceb48 1625
17a38d9c 1626 DRM_DEBUG_ATOMIC("checking %p\n", state);
cc4ceb48 1627
5721a380 1628 for_each_new_plane_in_state(state, plane, plane_state, i) {
df63b999 1629 ret = drm_atomic_plane_check(plane, plane_state);
5e743737 1630 if (ret) {
9f4c97a2
VS
1631 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
1632 plane->base.id, plane->name);
5e743737
RC
1633 return ret;
1634 }
1635 }
1636
5721a380 1637 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
df63b999 1638 ret = drm_atomic_crtc_check(crtc, crtc_state);
5e743737 1639 if (ret) {
fa3ab4c2
VS
1640 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
1641 crtc->base.id, crtc->name);
5e743737
RC
1642 return ret;
1643 }
1644 }
1645
cc4ceb48 1646 if (config->funcs->atomic_check)
5e743737
RC
1647 ret = config->funcs->atomic_check(state->dev, state);
1648
d34f20d6 1649 if (!state->allow_modeset) {
5721a380 1650 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2465ff62 1651 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
fa3ab4c2
VS
1652 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
1653 crtc->base.id, crtc->name);
d34f20d6
RC
1654 return -EINVAL;
1655 }
1656 }
1657 }
1658
5e743737 1659 return ret;
cc4ceb48
DV
1660}
1661EXPORT_SYMBOL(drm_atomic_check_only);
1662
1663/**
1664 * drm_atomic_commit - commit configuration atomically
1665 * @state: atomic configuration to check
1666 *
1667 * Note that this function can return -EDEADLK if the driver needed to acquire
1668 * more locks but encountered a deadlock. The caller must then do the usual w/w
1669 * backoff dance and restart. All other errors are fatal.
1670 *
76fede2f
ML
1671 * This function will take its own reference on @state.
1672 * Callers should always release their reference with drm_atomic_state_put().
cc4ceb48
DV
1673 *
1674 * Returns:
1675 * 0 on success, negative error code on failure.
1676 */
1677int drm_atomic_commit(struct drm_atomic_state *state)
1678{
1679 struct drm_mode_config *config = &state->dev->mode_config;
1680 int ret;
1681
1682 ret = drm_atomic_check_only(state);
1683 if (ret)
1684 return ret;
1685
a0752d4a 1686 DRM_DEBUG_ATOMIC("committing %p\n", state);
cc4ceb48
DV
1687
1688 return config->funcs->atomic_commit(state->dev, state, false);
1689}
1690EXPORT_SYMBOL(drm_atomic_commit);
1691
1692/**
d574528a 1693 * drm_atomic_nonblocking_commit - atomic nonblocking commit
cc4ceb48
DV
1694 * @state: atomic configuration to check
1695 *
1696 * Note that this function can return -EDEADLK if the driver needed to acquire
1697 * more locks but encountered a deadlock. The caller must then do the usual w/w
1698 * backoff dance and restart. All other errors are fatal.
1699 *
76fede2f
ML
1700 * This function will take its own reference on @state.
1701 * Callers should always release their reference with drm_atomic_state_put().
cc4ceb48
DV
1702 *
1703 * Returns:
1704 * 0 on success, negative error code on failure.
1705 */
b837ba0a 1706int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
cc4ceb48
DV
1707{
1708 struct drm_mode_config *config = &state->dev->mode_config;
1709 int ret;
1710
1711 ret = drm_atomic_check_only(state);
1712 if (ret)
1713 return ret;
1714
a0752d4a 1715 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
cc4ceb48
DV
1716
1717 return config->funcs->atomic_commit(state->dev, state, true);
1718}
b837ba0a 1719EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
d34f20d6 1720
fceffb32
RC
1721static void drm_atomic_print_state(const struct drm_atomic_state *state)
1722{
1723 struct drm_printer p = drm_info_printer(state->dev->dev);
1724 struct drm_plane *plane;
1725 struct drm_plane_state *plane_state;
1726 struct drm_crtc *crtc;
1727 struct drm_crtc_state *crtc_state;
1728 struct drm_connector *connector;
1729 struct drm_connector_state *connector_state;
1730 int i;
1731
1732 DRM_DEBUG_ATOMIC("checking %p\n", state);
1733
5721a380 1734 for_each_new_plane_in_state(state, plane, plane_state, i)
fceffb32
RC
1735 drm_atomic_plane_print_state(&p, plane_state);
1736
5721a380 1737 for_each_new_crtc_in_state(state, crtc, crtc_state, i)
fceffb32
RC
1738 drm_atomic_crtc_print_state(&p, crtc_state);
1739
5721a380 1740 for_each_new_connector_in_state(state, connector, connector_state, i)
fceffb32
RC
1741 drm_atomic_connector_print_state(&p, connector_state);
1742}
1743
c2d85564
DV
1744static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
1745 bool take_locks)
6559c901
RC
1746{
1747 struct drm_mode_config *config = &dev->mode_config;
1748 struct drm_plane *plane;
1749 struct drm_crtc *crtc;
1750 struct drm_connector *connector;
613051da 1751 struct drm_connector_list_iter conn_iter;
6559c901
RC
1752
1753 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
1754 return;
1755
c2d85564
DV
1756 list_for_each_entry(plane, &config->plane_list, head) {
1757 if (take_locks)
1758 drm_modeset_lock(&plane->mutex, NULL);
6559c901 1759 drm_atomic_plane_print_state(p, plane->state);
c2d85564
DV
1760 if (take_locks)
1761 drm_modeset_unlock(&plane->mutex);
1762 }
6559c901 1763
c2d85564
DV
1764 list_for_each_entry(crtc, &config->crtc_list, head) {
1765 if (take_locks)
1766 drm_modeset_lock(&crtc->mutex, NULL);
6559c901 1767 drm_atomic_crtc_print_state(p, crtc->state);
c2d85564
DV
1768 if (take_locks)
1769 drm_modeset_unlock(&crtc->mutex);
1770 }
6559c901 1771
b982dab1 1772 drm_connector_list_iter_begin(dev, &conn_iter);
c2d85564
DV
1773 if (take_locks)
1774 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
613051da 1775 drm_for_each_connector_iter(connector, &conn_iter)
6559c901 1776 drm_atomic_connector_print_state(p, connector->state);
c2d85564
DV
1777 if (take_locks)
1778 drm_modeset_unlock(&dev->mode_config.connection_mutex);
b982dab1 1779 drm_connector_list_iter_end(&conn_iter);
6559c901 1780}
c2d85564
DV
1781
1782/**
1783 * drm_state_dump - dump entire device atomic state
1784 * @dev: the drm device
1785 * @p: where to print the state to
1786 *
1787 * Just for debugging. Drivers might want an option to dump state
1788 * to dmesg in case of error irq's. (Hint, you probably want to
1789 * ratelimit this!)
1790 *
1791 * The caller must drm_modeset_lock_all(), or if this is called
1792 * from error irq handler, it should not be enabled by default.
1793 * (Ie. if you are debugging errors you might not care that this
1794 * is racey. But calling this without all modeset locks held is
1795 * not inherently safe.)
1796 */
1797void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
1798{
1799 __drm_state_dump(dev, p, false);
1800}
6559c901
RC
1801EXPORT_SYMBOL(drm_state_dump);
1802
1803#ifdef CONFIG_DEBUG_FS
1804static int drm_state_info(struct seq_file *m, void *data)
1805{
1806 struct drm_info_node *node = (struct drm_info_node *) m->private;
1807 struct drm_device *dev = node->minor->dev;
1808 struct drm_printer p = drm_seq_file_printer(m);
1809
c2d85564 1810 __drm_state_dump(dev, &p, true);
6559c901
RC
1811
1812 return 0;
1813}
1814
1815/* any use in debugfs files to dump individual planes/crtc/etc? */
1816static const struct drm_info_list drm_atomic_debugfs_list[] = {
1817 {"state", drm_state_info, 0},
1818};
1819
1820int drm_atomic_debugfs_init(struct drm_minor *minor)
1821{
1822 return drm_debugfs_create_files(drm_atomic_debugfs_list,
1823 ARRAY_SIZE(drm_atomic_debugfs_list),
1824 minor->debugfs_root, minor);
1825}
1826#endif
1827
d34f20d6
RC
1828/*
1829 * The big monstor ioctl
1830 */
1831
1832static struct drm_pending_vblank_event *create_vblank_event(
beaf5af4 1833 struct drm_device *dev, uint64_t user_data)
d34f20d6
RC
1834{
1835 struct drm_pending_vblank_event *e = NULL;
d34f20d6
RC
1836
1837 e = kzalloc(sizeof *e, GFP_KERNEL);
2dd500f1
DV
1838 if (!e)
1839 return NULL;
d34f20d6
RC
1840
1841 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
2dd500f1 1842 e->event.base.length = sizeof(e->event);
d34f20d6 1843 e->event.user_data = user_data;
d34f20d6 1844
2dd500f1 1845 return e;
d34f20d6
RC
1846}
1847
1848static int atomic_set_prop(struct drm_atomic_state *state,
1849 struct drm_mode_object *obj, struct drm_property *prop,
1850 uint64_t prop_value)
1851{
1852 struct drm_mode_object *ref;
1853 int ret;
1854
1855 if (!drm_property_change_valid_get(prop, prop_value, &ref))
1856 return -EINVAL;
1857
1858 switch (obj->type) {
1859 case DRM_MODE_OBJECT_CONNECTOR: {
1860 struct drm_connector *connector = obj_to_connector(obj);
1861 struct drm_connector_state *connector_state;
1862
1863 connector_state = drm_atomic_get_connector_state(state, connector);
1864 if (IS_ERR(connector_state)) {
1865 ret = PTR_ERR(connector_state);
1866 break;
1867 }
1868
1869 ret = drm_atomic_connector_set_property(connector,
1870 connector_state, prop, prop_value);
1871 break;
1872 }
1873 case DRM_MODE_OBJECT_CRTC: {
1874 struct drm_crtc *crtc = obj_to_crtc(obj);
1875 struct drm_crtc_state *crtc_state;
1876
1877 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1878 if (IS_ERR(crtc_state)) {
1879 ret = PTR_ERR(crtc_state);
1880 break;
1881 }
1882
1883 ret = drm_atomic_crtc_set_property(crtc,
1884 crtc_state, prop, prop_value);
1885 break;
1886 }
1887 case DRM_MODE_OBJECT_PLANE: {
1888 struct drm_plane *plane = obj_to_plane(obj);
1889 struct drm_plane_state *plane_state;
1890
1891 plane_state = drm_atomic_get_plane_state(state, plane);
1892 if (IS_ERR(plane_state)) {
1893 ret = PTR_ERR(plane_state);
1894 break;
1895 }
1896
1897 ret = drm_atomic_plane_set_property(plane,
1898 plane_state, prop, prop_value);
1899 break;
1900 }
1901 default:
1902 ret = -EINVAL;
1903 break;
1904 }
1905
1906 drm_property_change_valid_put(prop, ref);
1907 return ret;
1908}
1909
0f45c26f 1910/**
9744bf41 1911 * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
0f45c26f
ML
1912 *
1913 * @dev: drm device to check.
1914 * @plane_mask: plane mask for planes that were updated.
1915 * @ret: return value, can be -EDEADLK for a retry.
1916 *
d574528a
DV
1917 * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
1918 * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
1919 * is a common operation for each atomic update, so this call is split off as a
1920 * helper.
0f45c26f
ML
1921 */
1922void drm_atomic_clean_old_fb(struct drm_device *dev,
1923 unsigned plane_mask,
1924 int ret)
1925{
1926 struct drm_plane *plane;
1927
1928 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1929 * locks (ie. while it is still safe to deref plane->state). We
1930 * need to do this here because the driver entry points cannot
1931 * distinguish between legacy and atomic ioctls.
1932 */
1933 drm_for_each_plane_mask(plane, dev, plane_mask) {
1934 if (ret == 0) {
1935 struct drm_framebuffer *new_fb = plane->state->fb;
1936 if (new_fb)
a4a69da0 1937 drm_framebuffer_get(new_fb);
0f45c26f
ML
1938 plane->fb = new_fb;
1939 plane->crtc = plane->state->crtc;
1940
1941 if (plane->old_fb)
a4a69da0 1942 drm_framebuffer_put(plane->old_fb);
0f45c26f
ML
1943 }
1944 plane->old_fb = NULL;
1945 }
1946}
1947EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1948
9a83a71a
GP
1949/**
1950 * DOC: explicit fencing properties
1951 *
1952 * Explicit fencing allows userspace to control the buffer synchronization
1953 * between devices. A Fence or a group of fences are transfered to/from
1954 * userspace using Sync File fds and there are two DRM properties for that.
1955 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
1956 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
1957 *
1958 * As a contrast, with implicit fencing the kernel keeps track of any
1959 * ongoing rendering, and automatically ensures that the atomic update waits
1960 * for any pending rendering to complete. For shared buffers represented with
d574528a 1961 * a &struct dma_buf this is tracked in &struct reservation_object.
9a83a71a
GP
1962 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
1963 * whereas explicit fencing is what Android wants.
1964 *
1965 * "IN_FENCE_FD”:
1966 * Use this property to pass a fence that DRM should wait on before
1967 * proceeding with the Atomic Commit request and show the framebuffer for
1968 * the plane on the screen. The fence can be either a normal fence or a
1969 * merged one, the sync_file framework will handle both cases and use a
1970 * fence_array if a merged fence is received. Passing -1 here means no
1971 * fences to wait on.
1972 *
1973 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
1974 * it will only check if the Sync File is a valid one.
1975 *
1976 * On the driver side the fence is stored on the @fence parameter of
ea0dd85a 1977 * &struct drm_plane_state. Drivers which also support implicit fencing
9a83a71a
GP
1978 * should set the implicit fence using drm_atomic_set_fence_for_plane(),
1979 * to make sure there's consistent behaviour between drivers in precedence
1980 * of implicit vs. explicit fencing.
1981 *
1982 * "OUT_FENCE_PTR”:
1983 * Use this property to pass a file descriptor pointer to DRM. Once the
1984 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
1985 * the file descriptor number of a Sync File. This Sync File contains the
1986 * CRTC fence that will be signaled when all framebuffers present on the
1987 * Atomic Commit * request for that given CRTC are scanned out on the
1988 * screen.
1989 *
1990 * The Atomic Commit request fails if a invalid pointer is passed. If the
1991 * Atomic Commit request fails for any other reason the out fence fd
1992 * returned will be -1. On a Atomic Commit with the
1993 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
1994 *
1995 * Note that out-fences don't have a special interface to drivers and are
ea0dd85a 1996 * internally represented by a &struct drm_pending_vblank_event in struct
9a83a71a
GP
1997 * &drm_crtc_state, which is also used by the nonblocking atomic commit
1998 * helpers and for the DRM event handling for existing userspace.
1999 */
2000
beaf5af4 2001struct drm_out_fence_state {
7e9081c5 2002 s32 __user *out_fence_ptr;
beaf5af4
GP
2003 struct sync_file *sync_file;
2004 int fd;
2005};
2006
2007static int setup_out_fence(struct drm_out_fence_state *fence_state,
2008 struct dma_fence *fence)
2009{
2010 fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
2011 if (fence_state->fd < 0)
2012 return fence_state->fd;
2013
2014 if (put_user(fence_state->fd, fence_state->out_fence_ptr))
2015 return -EFAULT;
2016
2017 fence_state->sync_file = sync_file_create(fence);
2018 if (!fence_state->sync_file)
2019 return -ENOMEM;
2020
2021 return 0;
2022}
2023
2024static int prepare_crtc_signaling(struct drm_device *dev,
2025 struct drm_atomic_state *state,
2026 struct drm_mode_atomic *arg,
2027 struct drm_file *file_priv,
2028 struct drm_out_fence_state **fence_state,
2029 unsigned int *num_fences)
2030{
2031 struct drm_crtc *crtc;
2032 struct drm_crtc_state *crtc_state;
2033 int i, ret;
2034
2035 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
2036 return 0;
2037
5721a380 2038 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
7e9081c5 2039 s32 __user *fence_ptr;
beaf5af4
GP
2040
2041 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
2042
2043 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
2044 struct drm_pending_vblank_event *e;
2045
2046 e = create_vblank_event(dev, arg->user_data);
2047 if (!e)
2048 return -ENOMEM;
2049
2050 crtc_state->event = e;
2051 }
2052
2053 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
2054 struct drm_pending_vblank_event *e = crtc_state->event;
2055
2056 if (!file_priv)
2057 continue;
2058
2059 ret = drm_event_reserve_init(dev, file_priv, &e->base,
2060 &e->event.base);
2061 if (ret) {
2062 kfree(e);
2063 crtc_state->event = NULL;
2064 return ret;
2065 }
2066 }
2067
2068 if (fence_ptr) {
2069 struct dma_fence *fence;
2070 struct drm_out_fence_state *f;
2071
2072 f = krealloc(*fence_state, sizeof(**fence_state) *
2073 (*num_fences + 1), GFP_KERNEL);
2074 if (!f)
2075 return -ENOMEM;
2076
2077 memset(&f[*num_fences], 0, sizeof(*f));
2078
2079 f[*num_fences].out_fence_ptr = fence_ptr;
2080 *fence_state = f;
2081
35f8cc3b 2082 fence = drm_crtc_create_fence(crtc);
beaf5af4
GP
2083 if (!fence)
2084 return -ENOMEM;
2085
2086 ret = setup_out_fence(&f[(*num_fences)++], fence);
2087 if (ret) {
2088 dma_fence_put(fence);
2089 return ret;
2090 }
2091
2092 crtc_state->event->base.fence = fence;
2093 }
2094 }
2095
2096 return 0;
2097}
2098
2099static void complete_crtc_signaling(struct drm_device *dev,
2100 struct drm_atomic_state *state,
2101 struct drm_out_fence_state *fence_state,
2102 unsigned int num_fences,
2103 bool install_fds)
2104{
2105 struct drm_crtc *crtc;
2106 struct drm_crtc_state *crtc_state;
2107 int i;
2108
2109 if (install_fds) {
2110 for (i = 0; i < num_fences; i++)
2111 fd_install(fence_state[i].fd,
2112 fence_state[i].sync_file->file);
2113
2114 kfree(fence_state);
2115 return;
2116 }
2117
5721a380 2118 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
92c715fc 2119 struct drm_pending_vblank_event *event = crtc_state->event;
beaf5af4 2120 /*
92c715fc
ML
2121 * Free the allocated event. drm_atomic_helper_setup_commit
2122 * can allocate an event too, so only free it if it's ours
2123 * to prevent a double free in drm_atomic_state_clear.
beaf5af4 2124 */
92c715fc
ML
2125 if (event && (event->base.fence || event->base.file_priv)) {
2126 drm_event_cancel_free(dev, &event->base);
2127 crtc_state->event = NULL;
2128 }
beaf5af4
GP
2129 }
2130
2131 if (!fence_state)
2132 return;
2133
2134 for (i = 0; i < num_fences; i++) {
2135 if (fence_state[i].sync_file)
2136 fput(fence_state[i].sync_file->file);
2137 if (fence_state[i].fd >= 0)
2138 put_unused_fd(fence_state[i].fd);
2139
2140 /* If this fails log error to the user */
2141 if (fence_state[i].out_fence_ptr &&
2142 put_user(-1, fence_state[i].out_fence_ptr))
2143 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
2144 }
2145
2146 kfree(fence_state);
2147}
2148
d34f20d6
RC
2149int drm_mode_atomic_ioctl(struct drm_device *dev,
2150 void *data, struct drm_file *file_priv)
2151{
2152 struct drm_mode_atomic *arg = data;
2153 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
2154 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
2155 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
2156 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
2157 unsigned int copied_objs, copied_props;
2158 struct drm_atomic_state *state;
2159 struct drm_modeset_acquire_ctx ctx;
2160 struct drm_plane *plane;
beaf5af4 2161 struct drm_out_fence_state *fence_state = NULL;
45723728 2162 unsigned plane_mask;
d34f20d6 2163 int ret = 0;
beaf5af4 2164 unsigned int i, j, num_fences = 0;
d34f20d6
RC
2165
2166 /* disallow for drivers not supporting atomic: */
2167 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
2168 return -EINVAL;
2169
2170 /* disallow for userspace that has not enabled atomic cap (even
2171 * though this may be a bit overkill, since legacy userspace
2172 * wouldn't know how to call this ioctl)
2173 */
2174 if (!file_priv->atomic)
2175 return -EINVAL;
2176
2177 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
2178 return -EINVAL;
2179
2180 if (arg->reserved)
2181 return -EINVAL;
2182
2183 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
2184 !dev->mode_config.async_page_flip)
2185 return -EINVAL;
2186
2187 /* can't test and expect an event at the same time. */
2188 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
2189 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
2190 return -EINVAL;
2191
2192 drm_modeset_acquire_init(&ctx, 0);
2193
2194 state = drm_atomic_state_alloc(dev);
2195 if (!state)
2196 return -ENOMEM;
2197
2198 state->acquire_ctx = &ctx;
2199 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
2200
2201retry:
45723728 2202 plane_mask = 0;
d34f20d6
RC
2203 copied_objs = 0;
2204 copied_props = 0;
2205
2206 for (i = 0; i < arg->count_objs; i++) {
2207 uint32_t obj_id, count_props;
2208 struct drm_mode_object *obj;
2209
2210 if (get_user(obj_id, objs_ptr + copied_objs)) {
2211 ret = -EFAULT;
ec9f932e 2212 goto out;
d34f20d6
RC
2213 }
2214
2215 obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
b164d31f
DA
2216 if (!obj) {
2217 ret = -ENOENT;
2218 goto out;
2219 }
2220
2221 if (!obj->properties) {
020a218f 2222 drm_mode_object_put(obj);
d34f20d6 2223 ret = -ENOENT;
ec9f932e 2224 goto out;
d34f20d6
RC
2225 }
2226
d34f20d6 2227 if (get_user(count_props, count_props_ptr + copied_objs)) {
020a218f 2228 drm_mode_object_put(obj);
d34f20d6 2229 ret = -EFAULT;
ec9f932e 2230 goto out;
d34f20d6
RC
2231 }
2232
2233 copied_objs++;
2234
2235 for (j = 0; j < count_props; j++) {
2236 uint32_t prop_id;
2237 uint64_t prop_value;
2238 struct drm_property *prop;
2239
2240 if (get_user(prop_id, props_ptr + copied_props)) {
020a218f 2241 drm_mode_object_put(obj);
d34f20d6 2242 ret = -EFAULT;
ec9f932e 2243 goto out;
d34f20d6
RC
2244 }
2245
f92f053b 2246 prop = drm_mode_obj_find_prop_id(obj, prop_id);
d34f20d6 2247 if (!prop) {
020a218f 2248 drm_mode_object_put(obj);
d34f20d6 2249 ret = -ENOENT;
ec9f932e 2250 goto out;
d34f20d6
RC
2251 }
2252
42c5814c
GR
2253 if (copy_from_user(&prop_value,
2254 prop_values_ptr + copied_props,
2255 sizeof(prop_value))) {
020a218f 2256 drm_mode_object_put(obj);
d34f20d6 2257 ret = -EFAULT;
ec9f932e 2258 goto out;
d34f20d6
RC
2259 }
2260
2261 ret = atomic_set_prop(state, obj, prop, prop_value);
b164d31f 2262 if (ret) {
020a218f 2263 drm_mode_object_put(obj);
ec9f932e 2264 goto out;
b164d31f 2265 }
d34f20d6
RC
2266
2267 copied_props++;
2268 }
a9cc54ee 2269
c4749c9a
ML
2270 if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
2271 !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
a9cc54ee
ML
2272 plane = obj_to_plane(obj);
2273 plane_mask |= (1 << drm_plane_index(plane));
2274 plane->old_fb = plane->fb;
2275 }
020a218f 2276 drm_mode_object_put(obj);
d34f20d6
RC
2277 }
2278
beaf5af4
GP
2279 ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
2280 &num_fences);
2281 if (ret)
2282 goto out;
d34f20d6
RC
2283
2284 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
2285 ret = drm_atomic_check_only(state);
d34f20d6 2286 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
b837ba0a 2287 ret = drm_atomic_nonblocking_commit(state);
d34f20d6 2288 } else {
fceffb32
RC
2289 if (unlikely(drm_debug & DRM_UT_STATE))
2290 drm_atomic_print_state(state);
2291
d34f20d6
RC
2292 ret = drm_atomic_commit(state);
2293 }
2294
ec9f932e 2295out:
0f45c26f 2296 drm_atomic_clean_old_fb(dev, plane_mask, ret);
d34f20d6 2297
beaf5af4 2298 complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
c4749c9a 2299
ec9f932e
ML
2300 if (ret == -EDEADLK) {
2301 drm_atomic_state_clear(state);
2302 drm_modeset_backoff(&ctx);
2303 goto retry;
2304 }
d34f20d6 2305
0853695c 2306 drm_atomic_state_put(state);
d34f20d6
RC
2307
2308 drm_modeset_drop_locks(&ctx);
2309 drm_modeset_acquire_fini(&ctx);
2310
2311 return ret;
d34f20d6 2312}