drm: Add acquire ctx parameter to ->set_config
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_display.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include <drm/drmP.h>
27#include <drm/amdgpu_drm.h>
28#include "amdgpu.h"
29#include "amdgpu_i2c.h"
30#include "atom.h"
31#include "amdgpu_connectors.h"
32#include <asm/div64.h>
33
34#include <linux/pm_runtime.h>
35#include <drm/drm_crtc_helper.h>
36#include <drm/drm_edid.h>
37
f54d1867 38static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
1ffd2652 39{
c3874b75
CK
40 struct amdgpu_flip_work *work =
41 container_of(cb, struct amdgpu_flip_work, cb);
1ffd2652 42
f54d1867 43 dma_fence_put(f);
325cbba1 44 schedule_work(&work->flip_work.work);
c3874b75 45}
1ffd2652 46
c3874b75 47static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
f54d1867 48 struct dma_fence **f)
c3874b75 49{
f54d1867 50 struct dma_fence *fence= *f;
c3874b75
CK
51
52 if (fence == NULL)
53 return false;
1ffd2652 54
1ffd2652 55 *f = NULL;
c3874b75 56
f54d1867 57 if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
c3874b75
CK
58 return true;
59
f54d1867 60 dma_fence_put(fence);
c3874b75 61 return false;
1ffd2652 62}
d38ceaf9
AD
63
64static void amdgpu_flip_work_func(struct work_struct *__work)
65{
325cbba1
MD
66 struct delayed_work *delayed_work =
67 container_of(__work, struct delayed_work, work);
d38ceaf9 68 struct amdgpu_flip_work *work =
325cbba1 69 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
d38ceaf9 70 struct amdgpu_device *adev = work->adev;
f93932bc 71 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
d38ceaf9 72
f93932bc 73 struct drm_crtc *crtc = &amdgpu_crtc->base;
d38ceaf9 74 unsigned long flags;
325cbba1
MD
75 unsigned i;
76 int vpos, hpos;
d38ceaf9 77
c3874b75
CK
78 if (amdgpu_flip_handle_fence(work, &work->excl))
79 return;
80
1ffd2652 81 for (i = 0; i < work->shared_count; ++i)
c3874b75
CK
82 if (amdgpu_flip_handle_fence(work, &work->shared[i]))
83 return;
d38ceaf9 84
325cbba1
MD
85 /* Wait until we're out of the vertical blank period before the one
86 * targeted by the flip
8e36f9d3 87 */
f93932bc 88 if (amdgpu_crtc->enabled &&
325cbba1
MD
89 (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
90 &vpos, &hpos, NULL, NULL,
91 &crtc->hwmode)
92 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
93 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
94 (int)(work->target_vblank -
f93932bc 95 amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
325cbba1
MD
96 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
97 return;
9c3578af 98 }
8e36f9d3 99
325cbba1
MD
100 /* We borrow the event spin lock for protecting flip_status */
101 spin_lock_irqsave(&crtc->dev->event_lock, flags);
e1d09dc0 102
bd4c72d1 103 /* Do the flip (mmio) */
cb9e59d7 104 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
bd4c72d1
AG
105
106 /* Set the flip status */
f93932bc 107 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
d38ceaf9 108 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
6bd9e877 109
bd4c72d1
AG
110
111 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
f93932bc 112 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
bd4c72d1 113
d38ceaf9
AD
114}
115
116/*
117 * Handle unpin events outside the interrupt handler proper.
118 */
119static void amdgpu_unpin_work_func(struct work_struct *__work)
120{
121 struct amdgpu_flip_work *work =
122 container_of(__work, struct amdgpu_flip_work, unpin_work);
123 int r;
124
125 /* unpin of the old buffer */
765e7fbf 126 r = amdgpu_bo_reserve(work->old_abo, false);
d38ceaf9 127 if (likely(r == 0)) {
765e7fbf 128 r = amdgpu_bo_unpin(work->old_abo);
d38ceaf9
AD
129 if (unlikely(r != 0)) {
130 DRM_ERROR("failed to unpin buffer after flip\n");
131 }
765e7fbf 132 amdgpu_bo_unreserve(work->old_abo);
d38ceaf9
AD
133 } else
134 DRM_ERROR("failed to reserve buffer after flip\n");
135
765e7fbf 136 amdgpu_bo_unref(&work->old_abo);
1ffd2652 137 kfree(work->shared);
d38ceaf9
AD
138 kfree(work);
139}
140
cb341a31
AG
141
142static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work)
143{
144 int i;
145
146 amdgpu_bo_unref(&work->old_abo);
147 dma_fence_put(work->excl);
148 for (i = 0; i < work->shared_count; ++i)
149 dma_fence_put(work->shared[i]);
150 kfree(work->shared);
151 kfree(work);
152}
153
154static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
155 struct amdgpu_bo *new_abo)
156{
157 amdgpu_bo_unreserve(new_abo);
158 amdgpu_flip_work_cleanup(work);
159}
160
161static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
162 struct amdgpu_bo *new_abo)
163{
164 if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
165 DRM_ERROR("failed to unpin new abo in error path\n");
166 amdgpu_flip_cleanup_unreserve(work, new_abo);
167}
168
169void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
170 struct amdgpu_bo *new_abo)
171{
172 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
173 DRM_ERROR("failed to reserve new abo in error path\n");
174 amdgpu_flip_work_cleanup(work);
175 return;
176 }
177 amdgpu_flip_cleanup_unpin(work, new_abo);
178}
179
180int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
181 struct drm_framebuffer *fb,
182 struct drm_pending_vblank_event *event,
183 uint32_t page_flip_flags,
184 uint32_t target,
185 struct amdgpu_flip_work **work_p,
186 struct amdgpu_bo **new_abo_p)
d38ceaf9
AD
187{
188 struct drm_device *dev = crtc->dev;
189 struct amdgpu_device *adev = dev->dev_private;
190 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
191 struct amdgpu_framebuffer *old_amdgpu_fb;
192 struct amdgpu_framebuffer *new_amdgpu_fb;
193 struct drm_gem_object *obj;
194 struct amdgpu_flip_work *work;
765e7fbf 195 struct amdgpu_bo *new_abo;
d38ceaf9
AD
196 unsigned long flags;
197 u64 tiling_flags;
198 u64 base;
cb341a31 199 int r;
d38ceaf9
AD
200
201 work = kzalloc(sizeof *work, GFP_KERNEL);
202 if (work == NULL)
203 return -ENOMEM;
204
325cbba1 205 INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
d38ceaf9
AD
206 INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
207
208 work->event = event;
209 work->adev = adev;
210 work->crtc_id = amdgpu_crtc->crtc_id;
cb9e59d7 211 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
d38ceaf9
AD
212
213 /* schedule unpin of the old buffer */
214 old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
215 obj = old_amdgpu_fb->obj;
216
217 /* take a reference to the old object */
765e7fbf
CK
218 work->old_abo = gem_to_amdgpu_bo(obj);
219 amdgpu_bo_ref(work->old_abo);
d38ceaf9
AD
220
221 new_amdgpu_fb = to_amdgpu_framebuffer(fb);
222 obj = new_amdgpu_fb->obj;
765e7fbf 223 new_abo = gem_to_amdgpu_bo(obj);
d38ceaf9
AD
224
225 /* pin the new buffer */
765e7fbf 226 r = amdgpu_bo_reserve(new_abo, false);
d38ceaf9 227 if (unlikely(r != 0)) {
765e7fbf 228 DRM_ERROR("failed to reserve new abo buffer before flip\n");
d38ceaf9
AD
229 goto cleanup;
230 }
231
7fe28576 232 r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
d38ceaf9 233 if (unlikely(r != 0)) {
765e7fbf 234 DRM_ERROR("failed to pin new abo buffer before flip\n");
ee7fd957 235 goto unreserve;
d38ceaf9
AD
236 }
237
765e7fbf 238 r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
1ffd2652
CK
239 &work->shared_count,
240 &work->shared);
241 if (unlikely(r != 0)) {
1ffd2652 242 DRM_ERROR("failed to get fences for buffer\n");
ee7fd957 243 goto unpin;
1ffd2652
CK
244 }
245
765e7fbf
CK
246 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
247 amdgpu_bo_unreserve(new_abo);
d38ceaf9
AD
248
249 work->base = base;
325cbba1
MD
250 work->target_vblank = target - drm_crtc_vblank_count(crtc) +
251 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
d38ceaf9
AD
252
253 /* we borrow the event spin lock for protecting flip_wrok */
254 spin_lock_irqsave(&crtc->dev->event_lock, flags);
255 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
256 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
257 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
258 r = -EBUSY;
325cbba1 259 goto pflip_cleanup;
cb341a31 260
d38ceaf9 261 }
cb341a31
AG
262 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
263
264 *work_p = work;
265 *new_abo_p = new_abo;
266
267 return 0;
268
269pflip_cleanup:
270 amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
271 return r;
272
273unpin:
274 amdgpu_flip_cleanup_unpin(work, new_abo);
275 return r;
276
277unreserve:
278 amdgpu_flip_cleanup_unreserve(work, new_abo);
279 return r;
d38ceaf9 280
cb341a31
AG
281cleanup:
282 amdgpu_flip_work_cleanup(work);
283 return r;
284
285}
286
287void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
288 struct drm_framebuffer *fb,
289 struct amdgpu_flip_work *work,
290 struct amdgpu_bo *new_abo)
291{
292 unsigned long flags;
293 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
294
295 spin_lock_irqsave(&crtc->dev->event_lock, flags);
d38ceaf9
AD
296 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
297 amdgpu_crtc->pflip_works = work;
298
299 /* update crtc fb */
300 crtc->primary->fb = fb;
301 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
cb341a31
AG
302
303 DRM_DEBUG_DRIVER(
304 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
305 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
306
325cbba1 307 amdgpu_flip_work_func(&work->flip_work.work);
cb341a31 308}
d38ceaf9 309
cb341a31
AG
310int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
311 struct drm_framebuffer *fb,
312 struct drm_pending_vblank_event *event,
313 uint32_t page_flip_flags,
41292b1f
DV
314 uint32_t target,
315 struct drm_modeset_acquire_ctx *ctx)
cb341a31
AG
316{
317 struct amdgpu_bo *new_abo;
318 struct amdgpu_flip_work *work;
319 int r;
d38ceaf9 320
cb341a31
AG
321 r = amdgpu_crtc_prepare_flip(crtc,
322 fb,
323 event,
324 page_flip_flags,
325 target,
326 &work,
327 &new_abo);
328 if (r)
329 return r;
d38ceaf9 330
cb341a31
AG
331 amdgpu_crtc_submit_flip(crtc, fb, work, new_abo);
332
333 return 0;
d38ceaf9
AD
334}
335
a4eff9aa
DV
336int amdgpu_crtc_set_config(struct drm_mode_set *set,
337 struct drm_modeset_acquire_ctx *ctx)
d38ceaf9
AD
338{
339 struct drm_device *dev;
340 struct amdgpu_device *adev;
341 struct drm_crtc *crtc;
342 bool active = false;
343 int ret;
344
345 if (!set || !set->crtc)
346 return -EINVAL;
347
348 dev = set->crtc->dev;
349
350 ret = pm_runtime_get_sync(dev->dev);
351 if (ret < 0)
352 return ret;
353
a4eff9aa 354 ret = drm_crtc_helper_set_config(set, ctx);
d38ceaf9
AD
355
356 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
357 if (crtc->enabled)
358 active = true;
359
360 pm_runtime_mark_last_busy(dev->dev);
361
362 adev = dev->dev_private;
363 /* if we have active crtcs and we don't have a power ref,
364 take the current one */
365 if (active && !adev->have_disp_power_ref) {
366 adev->have_disp_power_ref = true;
367 return ret;
368 }
369 /* if we have no active crtcs, then drop the power ref
370 we got before */
371 if (!active && adev->have_disp_power_ref) {
372 pm_runtime_put_autosuspend(dev->dev);
373 adev->have_disp_power_ref = false;
374 }
375
376 /* drop the power reference we got coming in here */
377 pm_runtime_put_autosuspend(dev->dev);
378 return ret;
379}
380
c6e14f40 381static const char *encoder_names[41] = {
d38ceaf9
AD
382 "NONE",
383 "INTERNAL_LVDS",
384 "INTERNAL_TMDS1",
385 "INTERNAL_TMDS2",
386 "INTERNAL_DAC1",
387 "INTERNAL_DAC2",
388 "INTERNAL_SDVOA",
389 "INTERNAL_SDVOB",
390 "SI170B",
391 "CH7303",
392 "CH7301",
393 "INTERNAL_DVO1",
394 "EXTERNAL_SDVOA",
395 "EXTERNAL_SDVOB",
396 "TITFP513",
397 "INTERNAL_LVTM1",
398 "VT1623",
399 "HDMI_SI1930",
400 "HDMI_INTERNAL",
401 "INTERNAL_KLDSCP_TMDS1",
402 "INTERNAL_KLDSCP_DVO1",
403 "INTERNAL_KLDSCP_DAC1",
404 "INTERNAL_KLDSCP_DAC2",
405 "SI178",
406 "MVPU_FPGA",
407 "INTERNAL_DDI",
408 "VT1625",
409 "HDMI_SI1932",
410 "DP_AN9801",
411 "DP_DP501",
412 "INTERNAL_UNIPHY",
413 "INTERNAL_KLDSCP_LVTMA",
414 "INTERNAL_UNIPHY1",
415 "INTERNAL_UNIPHY2",
416 "NUTMEG",
417 "TRAVIS",
418 "INTERNAL_VCE",
419 "INTERNAL_UNIPHY3",
c6e14f40
ED
420 "HDMI_ANX9805",
421 "INTERNAL_AMCLK",
422 "VIRTUAL",
d38ceaf9
AD
423};
424
425static const char *hpd_names[6] = {
426 "HPD1",
427 "HPD2",
428 "HPD3",
429 "HPD4",
430 "HPD5",
431 "HPD6",
432};
433
434void amdgpu_print_display_setup(struct drm_device *dev)
435{
436 struct drm_connector *connector;
437 struct amdgpu_connector *amdgpu_connector;
438 struct drm_encoder *encoder;
439 struct amdgpu_encoder *amdgpu_encoder;
440 uint32_t devices;
441 int i = 0;
442
443 DRM_INFO("AMDGPU Display Connectors\n");
444 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
445 amdgpu_connector = to_amdgpu_connector(connector);
446 DRM_INFO("Connector %d:\n", i);
447 DRM_INFO(" %s\n", connector->name);
448 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
449 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
450 if (amdgpu_connector->ddc_bus) {
451 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
452 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
453 amdgpu_connector->ddc_bus->rec.mask_data_reg,
454 amdgpu_connector->ddc_bus->rec.a_clk_reg,
455 amdgpu_connector->ddc_bus->rec.a_data_reg,
456 amdgpu_connector->ddc_bus->rec.en_clk_reg,
457 amdgpu_connector->ddc_bus->rec.en_data_reg,
458 amdgpu_connector->ddc_bus->rec.y_clk_reg,
459 amdgpu_connector->ddc_bus->rec.y_data_reg);
460 if (amdgpu_connector->router.ddc_valid)
461 DRM_INFO(" DDC Router 0x%x/0x%x\n",
462 amdgpu_connector->router.ddc_mux_control_pin,
463 amdgpu_connector->router.ddc_mux_state);
464 if (amdgpu_connector->router.cd_valid)
465 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
466 amdgpu_connector->router.cd_mux_control_pin,
467 amdgpu_connector->router.cd_mux_state);
468 } else {
469 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
470 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
471 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
472 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
473 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
474 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
475 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
476 }
477 DRM_INFO(" Encoders:\n");
478 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
479 amdgpu_encoder = to_amdgpu_encoder(encoder);
480 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
481 if (devices) {
482 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
483 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
484 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
485 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
486 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
487 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
488 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
489 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
490 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
491 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
492 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
493 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
494 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
495 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
496 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
497 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
498 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
499 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
500 if (devices & ATOM_DEVICE_TV1_SUPPORT)
501 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
502 if (devices & ATOM_DEVICE_CV_SUPPORT)
503 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
504 }
505 }
506 i++;
507 }
508}
509
510/**
511 * amdgpu_ddc_probe
512 *
513 */
514bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
515 bool use_aux)
516{
517 u8 out = 0x0;
518 u8 buf[8];
519 int ret;
520 struct i2c_msg msgs[] = {
521 {
522 .addr = DDC_ADDR,
523 .flags = 0,
524 .len = 1,
525 .buf = &out,
526 },
527 {
528 .addr = DDC_ADDR,
529 .flags = I2C_M_RD,
530 .len = 8,
531 .buf = buf,
532 }
533 };
534
535 /* on hw with routers, select right port */
536 if (amdgpu_connector->router.ddc_valid)
537 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
538
539 if (use_aux) {
540 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
541 } else {
542 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
543 }
544
545 if (ret != 2)
546 /* Couldn't find an accessible DDC on this connector */
547 return false;
548 /* Probe also for valid EDID header
549 * EDID header starts with:
550 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
551 * Only the first 6 bytes must be valid as
552 * drm_edid_block_valid() can fix the last 2 bytes */
553 if (drm_edid_header_is_valid(buf) < 6) {
554 /* Couldn't find an accessible EDID on this
555 * connector */
556 return false;
557 }
558 return true;
559}
560
561static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
562{
563 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
564
1721c69c 565 drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
d38ceaf9
AD
566 drm_framebuffer_cleanup(fb);
567 kfree(amdgpu_fb);
568}
569
570static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
571 struct drm_file *file_priv,
572 unsigned int *handle)
573{
574 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
575
576 return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
577}
578
579static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
580 .destroy = amdgpu_user_framebuffer_destroy,
581 .create_handle = amdgpu_user_framebuffer_create_handle,
582};
583
584int
585amdgpu_framebuffer_init(struct drm_device *dev,
586 struct amdgpu_framebuffer *rfb,
1eb83451 587 const struct drm_mode_fb_cmd2 *mode_cmd,
d38ceaf9
AD
588 struct drm_gem_object *obj)
589{
590 int ret;
591 rfb->obj = obj;
a3f913ca 592 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
d38ceaf9
AD
593 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
594 if (ret) {
595 rfb->obj = NULL;
596 return ret;
597 }
598 return 0;
599}
600
601static struct drm_framebuffer *
602amdgpu_user_framebuffer_create(struct drm_device *dev,
603 struct drm_file *file_priv,
1eb83451 604 const struct drm_mode_fb_cmd2 *mode_cmd)
d38ceaf9
AD
605{
606 struct drm_gem_object *obj;
607 struct amdgpu_framebuffer *amdgpu_fb;
608 int ret;
609
a8ad0bd8 610 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
d38ceaf9
AD
611 if (obj == NULL) {
612 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
613 "can't create framebuffer\n", mode_cmd->handles[0]);
614 return ERR_PTR(-ENOENT);
615 }
616
617 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
618 if (amdgpu_fb == NULL) {
619 drm_gem_object_unreference_unlocked(obj);
620 return ERR_PTR(-ENOMEM);
621 }
622
623 ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
624 if (ret) {
625 kfree(amdgpu_fb);
626 drm_gem_object_unreference_unlocked(obj);
627 return ERR_PTR(ret);
628 }
629
630 return &amdgpu_fb->base;
631}
632
633static void amdgpu_output_poll_changed(struct drm_device *dev)
634{
635 struct amdgpu_device *adev = dev->dev_private;
636 amdgpu_fb_output_poll_changed(adev);
637}
638
639const struct drm_mode_config_funcs amdgpu_mode_funcs = {
640 .fb_create = amdgpu_user_framebuffer_create,
641 .output_poll_changed = amdgpu_output_poll_changed
642};
643
f498d9ed 644static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
d38ceaf9
AD
645{ { UNDERSCAN_OFF, "off" },
646 { UNDERSCAN_ON, "on" },
647 { UNDERSCAN_AUTO, "auto" },
648};
649
f498d9ed 650static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
d38ceaf9
AD
651{ { AMDGPU_AUDIO_DISABLE, "off" },
652 { AMDGPU_AUDIO_ENABLE, "on" },
653 { AMDGPU_AUDIO_AUTO, "auto" },
654};
655
656/* XXX support different dither options? spatial, temporal, both, etc. */
f498d9ed 657static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
d38ceaf9
AD
658{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
659 { AMDGPU_FMT_DITHER_ENABLE, "on" },
660};
661
662int amdgpu_modeset_create_props(struct amdgpu_device *adev)
663{
664 int sz;
665
f7e9e9fe
NW
666 adev->mode_info.coherent_mode_property =
667 drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
668 if (!adev->mode_info.coherent_mode_property)
669 return -ENOMEM;
d38ceaf9
AD
670
671 adev->mode_info.load_detect_property =
672 drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
673 if (!adev->mode_info.load_detect_property)
674 return -ENOMEM;
675
676 drm_mode_create_scaling_mode_property(adev->ddev);
677
678 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
679 adev->mode_info.underscan_property =
680 drm_property_create_enum(adev->ddev, 0,
681 "underscan",
682 amdgpu_underscan_enum_list, sz);
683
684 adev->mode_info.underscan_hborder_property =
685 drm_property_create_range(adev->ddev, 0,
686 "underscan hborder", 0, 128);
687 if (!adev->mode_info.underscan_hborder_property)
688 return -ENOMEM;
689
690 adev->mode_info.underscan_vborder_property =
691 drm_property_create_range(adev->ddev, 0,
692 "underscan vborder", 0, 128);
693 if (!adev->mode_info.underscan_vborder_property)
694 return -ENOMEM;
695
696 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
697 adev->mode_info.audio_property =
698 drm_property_create_enum(adev->ddev, 0,
699 "audio",
700 amdgpu_audio_enum_list, sz);
701
702 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
703 adev->mode_info.dither_property =
704 drm_property_create_enum(adev->ddev, 0,
705 "dither",
706 amdgpu_dither_enum_list, sz);
707
708 return 0;
709}
710
711void amdgpu_update_display_priority(struct amdgpu_device *adev)
712{
713 /* adjustment options for the display watermarks */
714 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
715 adev->mode_info.disp_priority = 0;
716 else
717 adev->mode_info.disp_priority = amdgpu_disp_priority;
718
719}
720
721static bool is_hdtv_mode(const struct drm_display_mode *mode)
722{
723 /* try and guess if this is a tv or a monitor */
724 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
725 (mode->vdisplay == 576) || /* 576p */
726 (mode->vdisplay == 720) || /* 720p */
727 (mode->vdisplay == 1080)) /* 1080p */
728 return true;
729 else
730 return false;
731}
732
733bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
734 const struct drm_display_mode *mode,
735 struct drm_display_mode *adjusted_mode)
736{
737 struct drm_device *dev = crtc->dev;
738 struct drm_encoder *encoder;
739 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
740 struct amdgpu_encoder *amdgpu_encoder;
741 struct drm_connector *connector;
742 struct amdgpu_connector *amdgpu_connector;
743 u32 src_v = 1, dst_v = 1;
744 u32 src_h = 1, dst_h = 1;
745
746 amdgpu_crtc->h_border = 0;
747 amdgpu_crtc->v_border = 0;
748
749 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
750 if (encoder->crtc != crtc)
751 continue;
752 amdgpu_encoder = to_amdgpu_encoder(encoder);
753 connector = amdgpu_get_connector_for_encoder(encoder);
754 amdgpu_connector = to_amdgpu_connector(connector);
755
756 /* set scaling */
757 if (amdgpu_encoder->rmx_type == RMX_OFF)
758 amdgpu_crtc->rmx_type = RMX_OFF;
759 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
760 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
761 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
762 else
763 amdgpu_crtc->rmx_type = RMX_OFF;
764 /* copy native mode */
765 memcpy(&amdgpu_crtc->native_mode,
766 &amdgpu_encoder->native_mode,
767 sizeof(struct drm_display_mode));
768 src_v = crtc->mode.vdisplay;
769 dst_v = amdgpu_crtc->native_mode.vdisplay;
770 src_h = crtc->mode.hdisplay;
771 dst_h = amdgpu_crtc->native_mode.hdisplay;
772
773 /* fix up for overscan on hdmi */
774 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
775 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
776 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
777 drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
778 is_hdtv_mode(mode)))) {
779 if (amdgpu_encoder->underscan_hborder != 0)
780 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
781 else
782 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
783 if (amdgpu_encoder->underscan_vborder != 0)
784 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
785 else
786 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
787 amdgpu_crtc->rmx_type = RMX_FULL;
788 src_v = crtc->mode.vdisplay;
789 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
790 src_h = crtc->mode.hdisplay;
791 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
792 }
793 }
794 if (amdgpu_crtc->rmx_type != RMX_OFF) {
795 fixed20_12 a, b;
796 a.full = dfixed_const(src_v);
797 b.full = dfixed_const(dst_v);
798 amdgpu_crtc->vsc.full = dfixed_div(a, b);
799 a.full = dfixed_const(src_h);
800 b.full = dfixed_const(dst_h);
801 amdgpu_crtc->hsc.full = dfixed_div(a, b);
802 } else {
803 amdgpu_crtc->vsc.full = dfixed_const(1);
804 amdgpu_crtc->hsc.full = dfixed_const(1);
805 }
806 return true;
807}
808
809/*
810 * Retrieve current video scanout position of crtc on a given gpu, and
811 * an optional accurate timestamp of when query happened.
812 *
813 * \param dev Device to query.
88e72717 814 * \param pipe Crtc to query.
d38ceaf9 815 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
8e36f9d3
AD
816 * For driver internal use only also supports these flags:
817 *
818 * USE_REAL_VBLANKSTART to use the real start of vblank instead
819 * of a fudged earlier start of vblank.
820 *
821 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
822 * fudged earlier start of vblank in *vpos and the distance
823 * to true start of vblank in *hpos.
824 *
d38ceaf9
AD
825 * \param *vpos Location where vertical scanout position should be stored.
826 * \param *hpos Location where horizontal scanout position should go.
827 * \param *stime Target location for timestamp taken immediately before
828 * scanout position query. Can be NULL to skip timestamp.
829 * \param *etime Target location for timestamp taken immediately after
830 * scanout position query. Can be NULL to skip timestamp.
831 *
832 * Returns vpos as a positive number while in active scanout area.
833 * Returns vpos as a negative number inside vblank, counting the number
834 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
835 * until start of active scanout / end of vblank."
836 *
837 * \return Flags, or'ed together as follows:
838 *
839 * DRM_SCANOUTPOS_VALID = Query successful.
840 * DRM_SCANOUTPOS_INVBL = Inside vblank.
841 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
842 * this flag means that returned position may be offset by a constant but
843 * unknown small number of scanlines wrt. real scanout position.
844 *
845 */
88e72717
TR
846int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
847 unsigned int flags, int *vpos, int *hpos,
848 ktime_t *stime, ktime_t *etime,
3bb403bf 849 const struct drm_display_mode *mode)
d38ceaf9
AD
850{
851 u32 vbl = 0, position = 0;
852 int vbl_start, vbl_end, vtotal, ret = 0;
853 bool in_vbl = true;
854
855 struct amdgpu_device *adev = dev->dev_private;
856
857 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
858
859 /* Get optional system timestamp before query. */
860 if (stime)
861 *stime = ktime_get();
862
88e72717 863 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
d38ceaf9
AD
864 ret |= DRM_SCANOUTPOS_VALID;
865
866 /* Get optional system timestamp after query. */
867 if (etime)
868 *etime = ktime_get();
869
870 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
871
872 /* Decode into vertical and horizontal scanout position. */
873 *vpos = position & 0x1fff;
874 *hpos = (position >> 16) & 0x1fff;
875
876 /* Valid vblank area boundaries from gpu retrieved? */
877 if (vbl > 0) {
878 /* Yes: Decode. */
879 ret |= DRM_SCANOUTPOS_ACCURATE;
880 vbl_start = vbl & 0x1fff;
881 vbl_end = (vbl >> 16) & 0x1fff;
882 }
883 else {
884 /* No: Fake something reasonable which gives at least ok results. */
3bb403bf 885 vbl_start = mode->crtc_vdisplay;
d38ceaf9
AD
886 vbl_end = 0;
887 }
888
8e36f9d3
AD
889 /* Called from driver internal vblank counter query code? */
890 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
891 /* Caller wants distance from real vbl_start in *hpos */
892 *hpos = *vpos - vbl_start;
893 }
894
895 /* Fudge vblank to start a few scanlines earlier to handle the
896 * problem that vblank irqs fire a few scanlines before start
897 * of vblank. Some driver internal callers need the true vblank
898 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
899 *
900 * The cause of the "early" vblank irq is that the irq is triggered
901 * by the line buffer logic when the line buffer read position enters
902 * the vblank, whereas our crtc scanout position naturally lags the
903 * line buffer read position.
904 */
905 if (!(flags & USE_REAL_VBLANKSTART))
906 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
907
d38ceaf9
AD
908 /* Test scanout position against vblank region. */
909 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
910 in_vbl = false;
911
8e36f9d3
AD
912 /* In vblank? */
913 if (in_vbl)
914 ret |= DRM_SCANOUTPOS_IN_VBLANK;
915
916 /* Called from driver internal vblank counter query code? */
917 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
918 /* Caller wants distance from fudged earlier vbl_start */
919 *vpos -= vbl_start;
920 return ret;
921 }
922
d38ceaf9
AD
923 /* Check if inside vblank area and apply corrective offsets:
924 * vpos will then be >=0 in video scanout area, but negative
925 * within vblank area, counting down the number of lines until
926 * start of scanout.
927 */
928
929 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
930 if (in_vbl && (*vpos >= vbl_start)) {
3bb403bf 931 vtotal = mode->crtc_vtotal;
d38ceaf9
AD
932 *vpos = *vpos - vtotal;
933 }
934
935 /* Correct for shifted end of vbl at vbl_end. */
936 *vpos = *vpos - vbl_end;
937
d38ceaf9
AD
938 return ret;
939}
940
941int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
942{
943 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
944 return AMDGPU_CRTC_IRQ_NONE;
945
946 switch (crtc) {
947 case 0:
948 return AMDGPU_CRTC_IRQ_VBLANK1;
949 case 1:
950 return AMDGPU_CRTC_IRQ_VBLANK2;
951 case 2:
952 return AMDGPU_CRTC_IRQ_VBLANK3;
953 case 3:
954 return AMDGPU_CRTC_IRQ_VBLANK4;
955 case 4:
956 return AMDGPU_CRTC_IRQ_VBLANK5;
957 case 5:
958 return AMDGPU_CRTC_IRQ_VBLANK6;
959 default:
960 return AMDGPU_CRTC_IRQ_NONE;
961 }
962}