2 * Copyright 2011 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/dma-mapping.h>
26 #include <linux/hdmi.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_dp_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include <drm/drm_plane_helper.h>
35 #include <drm/drm_edid.h>
37 #include <nvif/class.h>
38 #include <nvif/cl0002.h>
39 #include <nvif/cl5070.h>
40 #include <nvif/cl507a.h>
41 #include <nvif/cl507b.h>
42 #include <nvif/cl507c.h>
43 #include <nvif/cl507d.h>
44 #include <nvif/cl507e.h>
45 #include <nvif/event.h>
47 #include "nouveau_drv.h"
48 #include "nouveau_dma.h"
49 #include "nouveau_gem.h"
50 #include "nouveau_connector.h"
51 #include "nouveau_encoder.h"
52 #include "nouveau_crtc.h"
53 #include "nouveau_fence.h"
54 #include "nouveau_fbcon.h"
55 #include "nv50_display.h"
59 #define EVO_MASTER (0x00)
60 #define EVO_FLIP(c) (0x01 + (c))
61 #define EVO_OVLY(c) (0x05 + (c))
62 #define EVO_OIMM(c) (0x09 + (c))
63 #define EVO_CURS(c) (0x0d + (c))
65 /* offsets in shared sync bo of various structures */
66 #define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
67 #define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
68 #define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
69 #define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
70 #define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
71 #define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
73 /******************************************************************************
75 *****************************************************************************/
76 #define nv50_atom(p) container_of((p), struct nv50_atom, state)
79 struct drm_atomic_state state;
81 struct list_head outp;
86 struct nv50_outp_atom {
87 struct list_head head;
89 struct drm_encoder *encoder;
107 #define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
109 struct nv50_head_atom {
110 struct drm_crtc_state state;
119 struct nv50_head_mode {
220 static inline struct nv50_head_atom *
221 nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
223 struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
225 return (void *)statec;
226 return nv50_head_atom(statec);
229 #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
231 struct nv50_wndw_atom {
232 struct drm_plane_state state;
294 /******************************************************************************
296 *****************************************************************************/
299 struct nvif_object user;
300 struct nvif_device *device;
304 nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
305 const s32 *oclass, u8 head, void *data, u32 size,
306 struct nv50_chan *chan)
308 struct nvif_sclass *sclass;
311 chan->device = device;
313 ret = n = nvif_object_sclass_get(disp, &sclass);
318 for (i = 0; i < n; i++) {
319 if (sclass[i].oclass == oclass[0]) {
320 ret = nvif_object_init(disp, 0, oclass[0],
321 data, size, &chan->user);
323 nvif_object_map(&chan->user, NULL, 0);
324 nvif_object_sclass_put(&sclass);
331 nvif_object_sclass_put(&sclass);
336 nv50_chan_destroy(struct nv50_chan *chan)
338 nvif_object_fini(&chan->user);
341 /******************************************************************************
343 *****************************************************************************/
346 struct nv50_chan base;
350 nv50_pioc_destroy(struct nv50_pioc *pioc)
352 nv50_chan_destroy(&pioc->base);
356 nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
357 const s32 *oclass, u8 head, void *data, u32 size,
358 struct nv50_pioc *pioc)
360 return nv50_chan_create(device, disp, oclass, head, data, size,
364 /******************************************************************************
366 *****************************************************************************/
369 struct nv50_pioc base;
373 nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
374 int head, struct nv50_oimm *oimm)
376 struct nv50_disp_cursor_v0 args = {
379 static const s32 oclass[] = {
388 return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
392 /******************************************************************************
394 *****************************************************************************/
396 struct nv50_dmac_ctxdma {
397 struct list_head head;
398 struct nvif_object object;
402 struct nv50_chan base;
406 struct nvif_object sync;
407 struct nvif_object vram;
408 struct list_head ctxdma;
410 /* Protects against concurrent pushbuf access to this channel, lock is
411 * grabbed by evo_wait (if the pushbuf reservation is successful) and
412 * dropped again by evo_kick. */
417 nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
419 nvif_object_fini(&ctxdma->object);
420 list_del(&ctxdma->head);
424 static struct nv50_dmac_ctxdma *
425 nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
427 struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
428 struct nv50_dmac_ctxdma *ctxdma;
429 const u8 kind = fb->nvbo->kind;
430 const u32 handle = 0xfb000000 | kind;
432 struct nv_dma_v0 base;
434 struct nv50_dma_v0 nv50;
435 struct gf100_dma_v0 gf100;
436 struct gf119_dma_v0 gf119;
439 u32 argc = sizeof(args.base);
442 list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
443 if (ctxdma->object.handle == handle)
447 if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
448 return ERR_PTR(-ENOMEM);
449 list_add(&ctxdma->head, &dmac->ctxdma);
451 args.base.target = NV_DMA_V0_TARGET_VRAM;
452 args.base.access = NV_DMA_V0_ACCESS_RDWR;
454 args.base.limit = drm->client.device.info.ram_user - 1;
456 if (drm->client.device.info.chipset < 0x80) {
457 args.nv50.part = NV50_DMA_V0_PART_256;
458 argc += sizeof(args.nv50);
460 if (drm->client.device.info.chipset < 0xc0) {
461 args.nv50.part = NV50_DMA_V0_PART_256;
462 args.nv50.kind = kind;
463 argc += sizeof(args.nv50);
465 if (drm->client.device.info.chipset < 0xd0) {
466 args.gf100.kind = kind;
467 argc += sizeof(args.gf100);
469 args.gf119.page = GF119_DMA_V0_PAGE_LP;
470 args.gf119.kind = kind;
471 argc += sizeof(args.gf119);
474 ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
475 &args, argc, &ctxdma->object);
477 nv50_dmac_ctxdma_del(ctxdma);
485 nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
487 struct nvif_device *device = dmac->base.device;
488 struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
490 list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
491 nv50_dmac_ctxdma_del(ctxdma);
494 nvif_object_fini(&dmac->vram);
495 nvif_object_fini(&dmac->sync);
497 nv50_chan_destroy(&dmac->base);
500 struct device *dev = nvxx_device(device)->dev;
501 dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
506 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
507 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
508 struct nv50_dmac *dmac)
510 struct nv50_disp_core_channel_dma_v0 *args = data;
511 struct nvif_object pushbuf;
514 mutex_init(&dmac->lock);
515 INIT_LIST_HEAD(&dmac->ctxdma);
517 dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
518 &dmac->handle, GFP_KERNEL);
522 ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
523 &(struct nv_dma_v0) {
524 .target = NV_DMA_V0_TARGET_PCI_US,
525 .access = NV_DMA_V0_ACCESS_RD,
526 .start = dmac->handle + 0x0000,
527 .limit = dmac->handle + 0x0fff,
528 }, sizeof(struct nv_dma_v0), &pushbuf);
532 args->pushbuf = nvif_handle(&pushbuf);
534 ret = nv50_chan_create(device, disp, oclass, head, data, size,
536 nvif_object_fini(&pushbuf);
540 ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
541 &(struct nv_dma_v0) {
542 .target = NV_DMA_V0_TARGET_VRAM,
543 .access = NV_DMA_V0_ACCESS_RDWR,
544 .start = syncbuf + 0x0000,
545 .limit = syncbuf + 0x0fff,
546 }, sizeof(struct nv_dma_v0),
551 ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
552 &(struct nv_dma_v0) {
553 .target = NV_DMA_V0_TARGET_VRAM,
554 .access = NV_DMA_V0_ACCESS_RDWR,
556 .limit = device->info.ram_user - 1,
557 }, sizeof(struct nv_dma_v0),
565 /******************************************************************************
567 *****************************************************************************/
570 struct nv50_dmac base;
574 nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
575 u64 syncbuf, struct nv50_mast *core)
577 struct nv50_disp_core_channel_dma_v0 args = {
578 .pushbuf = 0xb0007d00,
580 static const s32 oclass[] = {
581 GP102_DISP_CORE_CHANNEL_DMA,
582 GP100_DISP_CORE_CHANNEL_DMA,
583 GM200_DISP_CORE_CHANNEL_DMA,
584 GM107_DISP_CORE_CHANNEL_DMA,
585 GK110_DISP_CORE_CHANNEL_DMA,
586 GK104_DISP_CORE_CHANNEL_DMA,
587 GF110_DISP_CORE_CHANNEL_DMA,
588 GT214_DISP_CORE_CHANNEL_DMA,
589 GT206_DISP_CORE_CHANNEL_DMA,
590 GT200_DISP_CORE_CHANNEL_DMA,
591 G82_DISP_CORE_CHANNEL_DMA,
592 NV50_DISP_CORE_CHANNEL_DMA,
596 return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
597 syncbuf, &core->base);
600 /******************************************************************************
602 *****************************************************************************/
605 struct nv50_dmac base;
611 nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
612 int head, u64 syncbuf, struct nv50_sync *base)
614 struct nv50_disp_base_channel_dma_v0 args = {
615 .pushbuf = 0xb0007c00 | head,
618 static const s32 oclass[] = {
619 GK110_DISP_BASE_CHANNEL_DMA,
620 GK104_DISP_BASE_CHANNEL_DMA,
621 GF110_DISP_BASE_CHANNEL_DMA,
622 GT214_DISP_BASE_CHANNEL_DMA,
623 GT200_DISP_BASE_CHANNEL_DMA,
624 G82_DISP_BASE_CHANNEL_DMA,
625 NV50_DISP_BASE_CHANNEL_DMA,
629 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
630 syncbuf, &base->base);
633 /******************************************************************************
635 *****************************************************************************/
638 struct nv50_dmac base;
642 nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
643 int head, u64 syncbuf, struct nv50_ovly *ovly)
645 struct nv50_disp_overlay_channel_dma_v0 args = {
646 .pushbuf = 0xb0007e00 | head,
649 static const s32 oclass[] = {
650 GK104_DISP_OVERLAY_CONTROL_DMA,
651 GF110_DISP_OVERLAY_CONTROL_DMA,
652 GT214_DISP_OVERLAY_CHANNEL_DMA,
653 GT200_DISP_OVERLAY_CHANNEL_DMA,
654 G82_DISP_OVERLAY_CHANNEL_DMA,
655 NV50_DISP_OVERLAY_CHANNEL_DMA,
659 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
660 syncbuf, &ovly->base);
664 struct nouveau_crtc base;
666 struct nouveau_bo *nvbo[2];
669 struct nv50_ovly ovly;
670 struct nv50_oimm oimm;
673 #define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
674 #define nv50_ovly(c) (&nv50_head(c)->ovly)
675 #define nv50_oimm(c) (&nv50_head(c)->oimm)
676 #define nv50_chan(c) (&(c)->base.base)
677 #define nv50_vers(c) nv50_chan(c)->user.oclass
680 struct nvif_object *disp;
681 struct nv50_mast mast;
683 struct nouveau_bo *sync;
688 static struct nv50_disp *
689 nv50_disp(struct drm_device *dev)
691 return nouveau_display(dev)->priv;
694 #define nv50_mast(d) (&nv50_disp(d)->mast)
696 /******************************************************************************
697 * EVO channel helpers
698 *****************************************************************************/
700 evo_wait(void *evoc, int nr)
702 struct nv50_dmac *dmac = evoc;
703 struct nvif_device *device = dmac->base.device;
704 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
706 mutex_lock(&dmac->lock);
707 if (put + nr >= (PAGE_SIZE / 4) - 8) {
708 dmac->ptr[put] = 0x20000000;
710 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
711 if (nvif_msec(device, 2000,
712 if (!nvif_rd32(&dmac->base.user, 0x0004))
715 mutex_unlock(&dmac->lock);
716 pr_err("nouveau: evo channel stalled\n");
723 return dmac->ptr + put;
727 evo_kick(u32 *push, void *evoc)
729 struct nv50_dmac *dmac = evoc;
730 nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
731 mutex_unlock(&dmac->lock);
734 #define evo_mthd(p, m, s) do { \
735 const u32 _m = (m), _s = (s); \
736 if (drm_debug & DRM_UT_KMS) \
737 pr_err("%04x %d %s\n", _m, _s, __func__); \
738 *((p)++) = ((_s << 18) | _m); \
741 #define evo_data(p, d) do { \
742 const u32 _d = (d); \
743 if (drm_debug & DRM_UT_KMS) \
744 pr_err("\t%08x\n", _d); \
748 /******************************************************************************
750 *****************************************************************************/
751 #define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
754 const struct nv50_wndw_func *func;
755 struct nv50_dmac *dmac;
757 struct drm_plane plane;
759 struct nvif_notify notify;
765 struct nv50_wndw_func {
766 void *(*dtor)(struct nv50_wndw *);
767 int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
768 struct nv50_head_atom *asyh);
769 void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
770 struct nv50_head_atom *asyh);
771 void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
772 struct nv50_wndw_atom *asyw);
774 void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
775 void (*sema_clr)(struct nv50_wndw *);
776 void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
777 void (*ntfy_clr)(struct nv50_wndw *);
778 int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
779 void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
780 void (*image_clr)(struct nv50_wndw *);
781 void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
782 void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
784 u32 (*update)(struct nv50_wndw *, u32 interlock);
788 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
791 return wndw->func->ntfy_wait_begun(wndw, asyw);
796 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
797 struct nv50_wndw_atom *asyw)
799 if (asyw->clr.sema && (!asyw->set.sema || flush))
800 wndw->func->sema_clr(wndw);
801 if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
802 wndw->func->ntfy_clr(wndw);
803 if (asyw->clr.image && (!asyw->set.image || flush))
804 wndw->func->image_clr(wndw);
806 return flush ? wndw->func->update(wndw, interlock) : 0;
810 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
811 struct nv50_wndw_atom *asyw)
814 asyw->image.mode = 0;
815 asyw->image.interval = 1;
818 if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
819 if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
820 if (asyw->set.image) wndw->func->image_set(wndw, asyw);
821 if (asyw->set.lut ) wndw->func->lut (wndw, asyw);
822 if (asyw->set.point) wndw->func->point (wndw, asyw);
824 return wndw->func->update(wndw, interlock);
828 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
829 struct nv50_wndw_atom *asyw,
830 struct nv50_head_atom *asyh)
832 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
833 NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
834 wndw->func->release(wndw, asyw, asyh);
835 asyw->ntfy.handle = 0;
836 asyw->sema.handle = 0;
840 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
841 struct nv50_wndw_atom *asyw,
842 struct nv50_head_atom *asyh)
844 struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
845 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
848 NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
850 asyw->image.w = fb->base.width;
851 asyw->image.h = fb->base.height;
852 asyw->image.kind = fb->nvbo->kind;
854 if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
859 if (asyw->image.kind) {
860 asyw->image.layout = 0;
861 if (drm->client.device.info.chipset >= 0xc0)
862 asyw->image.block = fb->nvbo->mode >> 4;
864 asyw->image.block = fb->nvbo->mode;
865 asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
867 asyw->image.layout = 1;
868 asyw->image.block = 0;
869 asyw->image.pitch = fb->base.pitches[0];
872 ret = wndw->func->acquire(wndw, asyw, asyh);
876 if (asyw->set.image) {
877 if (!(asyw->image.mode = asyw->interval ? 0 : 1))
878 asyw->image.interval = asyw->interval;
880 asyw->image.interval = 0;
887 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
889 struct nouveau_drm *drm = nouveau_drm(plane->dev);
890 struct nv50_wndw *wndw = nv50_wndw(plane);
891 struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
892 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
893 struct nv50_head_atom *harm = NULL, *asyh = NULL;
894 bool varm = false, asyv = false, asym = false;
897 NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
898 if (asyw->state.crtc) {
899 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
901 return PTR_ERR(asyh);
902 asym = drm_atomic_crtc_needs_modeset(&asyh->state);
903 asyv = asyh->state.active;
906 if (armw->state.crtc) {
907 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
909 return PTR_ERR(harm);
910 varm = harm->state.crtc->state->active;
914 asyw->point.x = asyw->state.crtc_x;
915 asyw->point.y = asyw->state.crtc_y;
916 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
917 asyw->set.point = true;
919 ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
924 nv50_wndw_atomic_check_release(wndw, asyw, harm);
930 asyw->clr.ntfy = armw->ntfy.handle != 0;
931 asyw->clr.sema = armw->sema.handle != 0;
932 if (wndw->func->image_clr)
933 asyw->clr.image = armw->image.handle != 0;
934 asyw->set.lut = wndw->func->lut && asyv;
941 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
943 struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
944 struct nouveau_drm *drm = nouveau_drm(plane->dev);
946 NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
950 nouveau_bo_unpin(fb->nvbo);
954 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
956 struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
957 struct nouveau_drm *drm = nouveau_drm(plane->dev);
958 struct nv50_wndw *wndw = nv50_wndw(plane);
959 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
960 struct nv50_head_atom *asyh;
961 struct nv50_dmac_ctxdma *ctxdma;
964 NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
968 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
972 ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
973 if (IS_ERR(ctxdma)) {
974 nouveau_bo_unpin(fb->nvbo);
975 return PTR_ERR(ctxdma);
978 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
979 asyw->image.handle = ctxdma->object.handle;
980 asyw->image.offset = fb->nvbo->bo.offset;
982 if (wndw->func->prepare) {
983 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
985 return PTR_ERR(asyh);
987 wndw->func->prepare(wndw, asyh, asyw);
993 static const struct drm_plane_helper_funcs
995 .prepare_fb = nv50_wndw_prepare_fb,
996 .cleanup_fb = nv50_wndw_cleanup_fb,
997 .atomic_check = nv50_wndw_atomic_check,
1001 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
1002 struct drm_plane_state *state)
1004 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
1005 __drm_atomic_helper_plane_destroy_state(&asyw->state);
1009 static struct drm_plane_state *
1010 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
1012 struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
1013 struct nv50_wndw_atom *asyw;
1014 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
1016 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
1018 asyw->sema = armw->sema;
1019 asyw->ntfy = armw->ntfy;
1020 asyw->image = armw->image;
1021 asyw->point = armw->point;
1022 asyw->lut = armw->lut;
1025 return &asyw->state;
1029 nv50_wndw_reset(struct drm_plane *plane)
1031 struct nv50_wndw_atom *asyw;
1033 if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
1037 plane->funcs->atomic_destroy_state(plane, plane->state);
1038 plane->state = &asyw->state;
1039 plane->state->plane = plane;
1040 plane->state->rotation = DRM_MODE_ROTATE_0;
1044 nv50_wndw_destroy(struct drm_plane *plane)
1046 struct nv50_wndw *wndw = nv50_wndw(plane);
1048 nvif_notify_fini(&wndw->notify);
1049 data = wndw->func->dtor(wndw);
1050 drm_plane_cleanup(&wndw->plane);
1054 static const struct drm_plane_funcs
1056 .update_plane = drm_atomic_helper_update_plane,
1057 .disable_plane = drm_atomic_helper_disable_plane,
1058 .destroy = nv50_wndw_destroy,
1059 .reset = nv50_wndw_reset,
1060 .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
1061 .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
1065 nv50_wndw_fini(struct nv50_wndw *wndw)
1067 nvif_notify_put(&wndw->notify);
1071 nv50_wndw_init(struct nv50_wndw *wndw)
1073 nvif_notify_get(&wndw->notify);
1077 nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
1078 enum drm_plane_type type, const char *name, int index,
1079 struct nv50_dmac *dmac, const u32 *format, int nformat,
1080 struct nv50_wndw *wndw)
1087 ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
1088 format, nformat, NULL,
1089 type, "%s-%d", name, index);
1093 drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
1097 /******************************************************************************
1099 *****************************************************************************/
1100 #define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
1103 struct nv50_wndw wndw;
1104 struct nvif_object chan;
1108 nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
1110 struct nv50_curs *curs = nv50_curs(wndw);
1111 nvif_wr32(&curs->chan, 0x0080, 0x00000000);
1116 nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1118 struct nv50_curs *curs = nv50_curs(wndw);
1119 nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
1123 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
1124 struct nv50_wndw_atom *asyw)
1126 u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
1127 u32 offset = asyw->image.offset;
1128 if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
1129 asyh->curs.handle = handle;
1130 asyh->curs.offset = offset;
1131 asyh->set.curs = asyh->curs.visible;
1136 nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1137 struct nv50_head_atom *asyh)
1139 asyh->curs.visible = false;
1143 nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1144 struct nv50_head_atom *asyh)
1148 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1149 DRM_PLANE_HELPER_NO_SCALING,
1150 DRM_PLANE_HELPER_NO_SCALING,
1152 asyh->curs.visible = asyw->state.visible;
1153 if (ret || !asyh->curs.visible)
1156 switch (asyw->state.fb->width) {
1157 case 32: asyh->curs.layout = 0; break;
1158 case 64: asyh->curs.layout = 1; break;
1163 if (asyw->state.fb->width != asyw->state.fb->height)
1166 switch (asyw->state.fb->format->format) {
1167 case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
1177 nv50_curs_dtor(struct nv50_wndw *wndw)
1179 struct nv50_curs *curs = nv50_curs(wndw);
1180 nvif_object_fini(&curs->chan);
1185 nv50_curs_format[] = {
1186 DRM_FORMAT_ARGB8888,
1189 static const struct nv50_wndw_func
1191 .dtor = nv50_curs_dtor,
1192 .acquire = nv50_curs_acquire,
1193 .release = nv50_curs_release,
1194 .prepare = nv50_curs_prepare,
1195 .point = nv50_curs_point,
1196 .update = nv50_curs_update,
1200 nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
1201 struct nv50_curs **pcurs)
1203 static const struct nvif_mclass curses[] = {
1204 { GK104_DISP_CURSOR, 0 },
1205 { GF110_DISP_CURSOR, 0 },
1206 { GT214_DISP_CURSOR, 0 },
1207 { G82_DISP_CURSOR, 0 },
1208 { NV50_DISP_CURSOR, 0 },
1211 struct nv50_disp_cursor_v0 args = {
1212 .head = head->base.index,
1214 struct nv50_disp *disp = nv50_disp(drm->dev);
1215 struct nv50_curs *curs;
1218 cid = nvif_mclass(disp->disp, curses);
1220 NV_ERROR(drm, "No supported cursor immediate class\n");
1224 if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
1227 ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
1228 "curs", head->base.index, &disp->mast.base,
1229 nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
1236 ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
1237 sizeof(args), &curs->chan);
1239 NV_ERROR(drm, "curs%04x allocation failed: %d\n",
1240 curses[cid].oclass, ret);
1247 /******************************************************************************
1249 *****************************************************************************/
1250 #define nv50_base(p) container_of((p), struct nv50_base, wndw)
1253 struct nv50_wndw wndw;
1254 struct nv50_sync chan;
1259 nv50_base_notify(struct nvif_notify *notify)
1261 return NVIF_NOTIFY_KEEP;
1265 nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1267 struct nv50_base *base = nv50_base(wndw);
1269 if ((push = evo_wait(&base->chan, 2))) {
1270 evo_mthd(push, 0x00e0, 1);
1271 evo_data(push, asyw->lut.enable << 30);
1272 evo_kick(push, &base->chan);
1277 nv50_base_image_clr(struct nv50_wndw *wndw)
1279 struct nv50_base *base = nv50_base(wndw);
1281 if ((push = evo_wait(&base->chan, 4))) {
1282 evo_mthd(push, 0x0084, 1);
1283 evo_data(push, 0x00000000);
1284 evo_mthd(push, 0x00c0, 1);
1285 evo_data(push, 0x00000000);
1286 evo_kick(push, &base->chan);
1291 nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1293 struct nv50_base *base = nv50_base(wndw);
1294 const s32 oclass = base->chan.base.base.user.oclass;
1296 if ((push = evo_wait(&base->chan, 10))) {
1297 evo_mthd(push, 0x0084, 1);
1298 evo_data(push, (asyw->image.mode << 8) |
1299 (asyw->image.interval << 4));
1300 evo_mthd(push, 0x00c0, 1);
1301 evo_data(push, asyw->image.handle);
1302 if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
1303 evo_mthd(push, 0x0800, 5);
1304 evo_data(push, asyw->image.offset >> 8);
1305 evo_data(push, 0x00000000);
1306 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1307 evo_data(push, (asyw->image.layout << 20) |
1310 evo_data(push, (asyw->image.kind << 16) |
1311 (asyw->image.format << 8));
1313 if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1314 evo_mthd(push, 0x0800, 5);
1315 evo_data(push, asyw->image.offset >> 8);
1316 evo_data(push, 0x00000000);
1317 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1318 evo_data(push, (asyw->image.layout << 20) |
1321 evo_data(push, asyw->image.format << 8);
1323 evo_mthd(push, 0x0400, 5);
1324 evo_data(push, asyw->image.offset >> 8);
1325 evo_data(push, 0x00000000);
1326 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1327 evo_data(push, (asyw->image.layout << 24) |
1330 evo_data(push, asyw->image.format << 8);
1332 evo_kick(push, &base->chan);
1337 nv50_base_ntfy_clr(struct nv50_wndw *wndw)
1339 struct nv50_base *base = nv50_base(wndw);
1341 if ((push = evo_wait(&base->chan, 2))) {
1342 evo_mthd(push, 0x00a4, 1);
1343 evo_data(push, 0x00000000);
1344 evo_kick(push, &base->chan);
1349 nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1351 struct nv50_base *base = nv50_base(wndw);
1353 if ((push = evo_wait(&base->chan, 3))) {
1354 evo_mthd(push, 0x00a0, 2);
1355 evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
1356 evo_data(push, asyw->ntfy.handle);
1357 evo_kick(push, &base->chan);
1362 nv50_base_sema_clr(struct nv50_wndw *wndw)
1364 struct nv50_base *base = nv50_base(wndw);
1366 if ((push = evo_wait(&base->chan, 2))) {
1367 evo_mthd(push, 0x0094, 1);
1368 evo_data(push, 0x00000000);
1369 evo_kick(push, &base->chan);
1374 nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1376 struct nv50_base *base = nv50_base(wndw);
1378 if ((push = evo_wait(&base->chan, 5))) {
1379 evo_mthd(push, 0x0088, 4);
1380 evo_data(push, asyw->sema.offset);
1381 evo_data(push, asyw->sema.acquire);
1382 evo_data(push, asyw->sema.release);
1383 evo_data(push, asyw->sema.handle);
1384 evo_kick(push, &base->chan);
1389 nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
1391 struct nv50_base *base = nv50_base(wndw);
1394 if (!(push = evo_wait(&base->chan, 2)))
1396 evo_mthd(push, 0x0080, 1);
1397 evo_data(push, interlock);
1398 evo_kick(push, &base->chan);
1400 if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
1401 return interlock ? 2 << (base->id * 8) : 0;
1402 return interlock ? 2 << (base->id * 4) : 0;
1406 nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1408 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
1409 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1410 if (nvif_msec(&drm->client.device, 2000ULL,
1411 u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
1412 if ((data & 0xc0000000) == 0x40000000)
1421 nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1422 struct nv50_head_atom *asyh)
1428 nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1429 struct nv50_head_atom *asyh)
1431 const struct drm_framebuffer *fb = asyw->state.fb;
1434 if (!fb->format->depth)
1437 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1438 DRM_PLANE_HELPER_NO_SCALING,
1439 DRM_PLANE_HELPER_NO_SCALING,
1444 asyh->base.depth = fb->format->depth;
1445 asyh->base.cpp = fb->format->cpp[0];
1446 asyh->base.x = asyw->state.src.x1 >> 16;
1447 asyh->base.y = asyw->state.src.y1 >> 16;
1448 asyh->base.w = asyw->state.fb->width;
1449 asyh->base.h = asyw->state.fb->height;
1451 switch (fb->format->format) {
1452 case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
1453 case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
1454 case DRM_FORMAT_XRGB1555 :
1455 case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
1456 case DRM_FORMAT_XRGB8888 :
1457 case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
1458 case DRM_FORMAT_XBGR2101010:
1459 case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
1460 case DRM_FORMAT_XBGR8888 :
1461 case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
1467 asyw->lut.enable = 1;
1468 asyw->set.image = true;
1473 nv50_base_dtor(struct nv50_wndw *wndw)
1475 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1476 struct nv50_base *base = nv50_base(wndw);
1477 nv50_dmac_destroy(&base->chan.base, disp->disp);
1482 nv50_base_format[] = {
1485 DRM_FORMAT_XRGB1555,
1486 DRM_FORMAT_ARGB1555,
1487 DRM_FORMAT_XRGB8888,
1488 DRM_FORMAT_ARGB8888,
1489 DRM_FORMAT_XBGR2101010,
1490 DRM_FORMAT_ABGR2101010,
1491 DRM_FORMAT_XBGR8888,
1492 DRM_FORMAT_ABGR8888,
1495 static const struct nv50_wndw_func
1497 .dtor = nv50_base_dtor,
1498 .acquire = nv50_base_acquire,
1499 .release = nv50_base_release,
1500 .sema_set = nv50_base_sema_set,
1501 .sema_clr = nv50_base_sema_clr,
1502 .ntfy_set = nv50_base_ntfy_set,
1503 .ntfy_clr = nv50_base_ntfy_clr,
1504 .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
1505 .image_set = nv50_base_image_set,
1506 .image_clr = nv50_base_image_clr,
1507 .lut = nv50_base_lut,
1508 .update = nv50_base_update,
1512 nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
1513 struct nv50_base **pbase)
1515 struct nv50_disp *disp = nv50_disp(drm->dev);
1516 struct nv50_base *base;
1519 if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
1521 base->id = head->base.index;
1522 base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
1523 base->wndw.sema = EVO_FLIP_SEM0(base->id);
1524 base->wndw.data = 0x00000000;
1526 ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
1527 "base", base->id, &base->chan.base,
1528 nv50_base_format, ARRAY_SIZE(nv50_base_format),
1535 ret = nv50_base_create(&drm->client.device, disp->disp, base->id,
1536 disp->sync->bo.offset, &base->chan);
1540 return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
1542 NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
1543 &(struct nvif_notify_uevent_req) {},
1544 sizeof(struct nvif_notify_uevent_req),
1545 sizeof(struct nvif_notify_uevent_rep),
1546 &base->wndw.notify);
1549 /******************************************************************************
1551 *****************************************************************************/
1553 nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
1555 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1557 if ((push = evo_wait(core, 2))) {
1558 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1559 evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
1561 evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
1562 evo_data(push, (asyh->procamp.sat.sin << 20) |
1563 (asyh->procamp.sat.cos << 8));
1564 evo_kick(push, core);
1569 nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
1571 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1573 if ((push = evo_wait(core, 2))) {
1574 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1575 evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
1577 if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
1578 evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
1580 evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
1581 evo_data(push, (asyh->dither.mode << 3) |
1582 (asyh->dither.bits << 1) |
1583 asyh->dither.enable);
1584 evo_kick(push, core);
1589 nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
1591 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1595 if (asyh->base.cpp) {
1596 switch (asyh->base.cpp) {
1597 case 8: bounds |= 0x00000500; break;
1598 case 4: bounds |= 0x00000300; break;
1599 case 2: bounds |= 0x00000100; break;
1604 bounds |= 0x00000001;
1607 if ((push = evo_wait(core, 2))) {
1608 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1609 evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
1611 evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
1612 evo_data(push, bounds);
1613 evo_kick(push, core);
1618 nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
1620 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1624 if (asyh->base.cpp) {
1625 switch (asyh->base.cpp) {
1626 case 8: bounds |= 0x00000500; break;
1627 case 4: bounds |= 0x00000300; break;
1628 case 2: bounds |= 0x00000100; break;
1629 case 1: bounds |= 0x00000000; break;
1634 bounds |= 0x00000001;
1637 if ((push = evo_wait(core, 2))) {
1638 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1639 evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
1641 evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
1642 evo_data(push, bounds);
1643 evo_kick(push, core);
1648 nv50_head_curs_clr(struct nv50_head *head)
1650 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1652 if ((push = evo_wait(core, 4))) {
1653 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1654 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1655 evo_data(push, 0x05000000);
1657 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1658 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1659 evo_data(push, 0x05000000);
1660 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1661 evo_data(push, 0x00000000);
1663 evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
1664 evo_data(push, 0x05000000);
1665 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1666 evo_data(push, 0x00000000);
1668 evo_kick(push, core);
1673 nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1675 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1677 if ((push = evo_wait(core, 5))) {
1678 if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
1679 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1680 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1681 (asyh->curs.format << 24));
1682 evo_data(push, asyh->curs.offset >> 8);
1684 if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1685 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1686 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1687 (asyh->curs.format << 24));
1688 evo_data(push, asyh->curs.offset >> 8);
1689 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1690 evo_data(push, asyh->curs.handle);
1692 evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
1693 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1694 (asyh->curs.format << 24));
1695 evo_data(push, asyh->curs.offset >> 8);
1696 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1697 evo_data(push, asyh->curs.handle);
1699 evo_kick(push, core);
1704 nv50_head_core_clr(struct nv50_head *head)
1706 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1708 if ((push = evo_wait(core, 2))) {
1709 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1710 evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
1712 evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
1713 evo_data(push, 0x00000000);
1714 evo_kick(push, core);
1719 nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1721 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1723 if ((push = evo_wait(core, 9))) {
1724 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1725 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1726 evo_data(push, asyh->core.offset >> 8);
1727 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1728 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1729 evo_data(push, asyh->core.layout << 20 |
1730 (asyh->core.pitch >> 8) << 8 |
1732 evo_data(push, asyh->core.kind << 16 |
1733 asyh->core.format << 8);
1734 evo_data(push, asyh->core.handle);
1735 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1736 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1737 /* EVO will complain with INVALID_STATE if we have an
1738 * active cursor and (re)specify HeadSetContextDmaIso
1739 * without also updating HeadSetOffsetCursor.
1741 asyh->set.curs = asyh->curs.visible;
1743 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1744 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1745 evo_data(push, asyh->core.offset >> 8);
1746 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1747 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1748 evo_data(push, asyh->core.layout << 20 |
1749 (asyh->core.pitch >> 8) << 8 |
1751 evo_data(push, asyh->core.format << 8);
1752 evo_data(push, asyh->core.handle);
1753 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1754 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1756 evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
1757 evo_data(push, asyh->core.offset >> 8);
1758 evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
1759 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1760 evo_data(push, asyh->core.layout << 24 |
1761 (asyh->core.pitch >> 8) << 8 |
1763 evo_data(push, asyh->core.format << 8);
1764 evo_data(push, asyh->core.handle);
1765 evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
1766 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1768 evo_kick(push, core);
1773 nv50_head_lut_clr(struct nv50_head *head)
1775 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1777 if ((push = evo_wait(core, 4))) {
1778 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1779 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1780 evo_data(push, 0x40000000);
1782 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1783 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1784 evo_data(push, 0x40000000);
1785 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1786 evo_data(push, 0x00000000);
1788 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
1789 evo_data(push, 0x03000000);
1790 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1791 evo_data(push, 0x00000000);
1793 evo_kick(push, core);
1798 nv50_head_lut_load(struct drm_property_blob *blob, int mode,
1799 struct nouveau_bo *nvbo)
1801 struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
1802 void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
1803 const int size = blob->length / sizeof(*in);
1807 /* This can't happen.. But it shuts the compiler up. */
1808 if (WARN_ON(size != 256))
1812 case 0: /* LORES. */
1813 case 1: /* HIRES. */
1818 case 7: /* INTERPOLATE_257_UNITY_RANGE. */
1828 for (i = 0; i < size; i++) {
1829 r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
1830 g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
1831 b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
1832 writew(r, lut + (i * 0x08) + 0);
1833 writew(g, lut + (i * 0x08) + 2);
1834 writew(b, lut + (i * 0x08) + 4);
1837 /* INTERPOLATE modes require a "next" entry to interpolate with,
1838 * so we replicate the last entry to deal with this for now.
1840 writew(r, lut + (i * 0x08) + 0);
1841 writew(g, lut + (i * 0x08) + 2);
1842 writew(b, lut + (i * 0x08) + 4);
1846 nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1848 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1850 if ((push = evo_wait(core, 7))) {
1851 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1852 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1853 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1854 evo_data(push, asyh->lut.offset >> 8);
1856 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1857 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1858 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1859 evo_data(push, asyh->lut.offset >> 8);
1860 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1861 evo_data(push, asyh->lut.handle);
1863 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
1864 evo_data(push, 0x80000000 | asyh->lut.mode << 24);
1865 evo_data(push, asyh->lut.offset >> 8);
1866 evo_data(push, 0x00000000);
1867 evo_data(push, 0x00000000);
1868 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1869 evo_data(push, asyh->lut.handle);
1871 evo_kick(push, core);
1876 nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
1878 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1879 struct nv50_head_mode *m = &asyh->mode;
1881 if ((push = evo_wait(core, 14))) {
1882 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1883 evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
1884 evo_data(push, 0x00800000 | m->clock);
1885 evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
1886 evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
1887 evo_data(push, 0x00000000);
1888 evo_data(push, (m->v.active << 16) | m->h.active );
1889 evo_data(push, (m->v.synce << 16) | m->h.synce );
1890 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
1891 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
1892 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1893 evo_data(push, asyh->mode.v.blankus);
1894 evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
1895 evo_data(push, 0x00000000);
1897 evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
1898 evo_data(push, 0x00000000);
1899 evo_data(push, (m->v.active << 16) | m->h.active );
1900 evo_data(push, (m->v.synce << 16) | m->h.synce );
1901 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
1902 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
1903 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1904 evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
1905 evo_data(push, 0x00000000); /* ??? */
1906 evo_data(push, 0xffffff00);
1907 evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
1908 evo_data(push, m->clock * 1000);
1909 evo_data(push, 0x00200000); /* ??? */
1910 evo_data(push, m->clock * 1000);
1912 evo_kick(push, core);
1917 nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
1919 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1921 if ((push = evo_wait(core, 10))) {
1922 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1923 evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
1924 evo_data(push, 0x00000000);
1925 evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
1926 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1927 evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
1928 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1929 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1931 evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
1932 evo_data(push, 0x00000000);
1933 evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
1934 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1935 evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
1936 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1937 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1938 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1940 evo_kick(push, core);
1945 nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
1947 if (asyh->clr.ilut && (!asyh->set.ilut || y))
1948 nv50_head_lut_clr(head);
1949 if (asyh->clr.core && (!asyh->set.core || y))
1950 nv50_head_core_clr(head);
1951 if (asyh->clr.curs && (!asyh->set.curs || y))
1952 nv50_head_curs_clr(head);
1956 nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1958 if (asyh->set.view ) nv50_head_view (head, asyh);
1959 if (asyh->set.mode ) nv50_head_mode (head, asyh);
1960 if (asyh->set.ilut ) {
1961 struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
1962 struct drm_property_blob *blob = asyh->state.gamma_lut;
1964 nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
1965 asyh->lut.offset = nvbo->bo.offset;
1966 head->lut.next ^= 1;
1967 nv50_head_lut_set(head, asyh);
1969 if (asyh->set.core ) nv50_head_core_set(head, asyh);
1970 if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
1971 if (asyh->set.base ) nv50_head_base (head, asyh);
1972 if (asyh->set.ovly ) nv50_head_ovly (head, asyh);
1973 if (asyh->set.dither ) nv50_head_dither (head, asyh);
1974 if (asyh->set.procamp) nv50_head_procamp (head, asyh);
1978 nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
1979 struct nv50_head_atom *asyh,
1980 struct nouveau_conn_atom *asyc)
1982 const int vib = asyc->procamp.color_vibrance - 100;
1983 const int hue = asyc->procamp.vibrant_hue - 90;
1984 const int adj = (vib > 0) ? 50 : 0;
1985 asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
1986 asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
1987 asyh->set.procamp = true;
1991 nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
1992 struct nv50_head_atom *asyh,
1993 struct nouveau_conn_atom *asyc)
1995 struct drm_connector *connector = asyc->state.connector;
1998 if (asyc->dither.mode == DITHERING_MODE_AUTO) {
1999 if (asyh->base.depth > connector->display_info.bpc * 3)
2000 mode = DITHERING_MODE_DYNAMIC2X2;
2002 mode = asyc->dither.mode;
2005 if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
2006 if (connector->display_info.bpc >= 8)
2007 mode |= DITHERING_DEPTH_8BPC;
2009 mode |= asyc->dither.depth;
2012 asyh->dither.enable = mode;
2013 asyh->dither.bits = mode >> 1;
2014 asyh->dither.mode = mode >> 3;
2015 asyh->set.dither = true;
2019 nv50_head_atomic_check_view(struct nv50_head_atom *armh,
2020 struct nv50_head_atom *asyh,
2021 struct nouveau_conn_atom *asyc)
2023 struct drm_connector *connector = asyc->state.connector;
2024 struct drm_display_mode *omode = &asyh->state.adjusted_mode;
2025 struct drm_display_mode *umode = &asyh->state.mode;
2026 int mode = asyc->scaler.mode;
2028 int umode_vdisplay, omode_hdisplay, omode_vdisplay;
2030 if (connector->edid_blob_ptr)
2031 edid = (struct edid *)connector->edid_blob_ptr->data;
2035 if (!asyc->scaler.full) {
2036 if (mode == DRM_MODE_SCALE_NONE)
2039 /* Non-EDID LVDS/eDP mode. */
2040 mode = DRM_MODE_SCALE_FULLSCREEN;
2043 /* For the user-specified mode, we must ignore doublescan and
2044 * the like, but honor frame packing.
2046 umode_vdisplay = umode->vdisplay;
2047 if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
2048 umode_vdisplay += umode->vtotal;
2049 asyh->view.iW = umode->hdisplay;
2050 asyh->view.iH = umode_vdisplay;
2051 /* For the output mode, we can just use the stock helper. */
2052 drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
2053 asyh->view.oW = omode_hdisplay;
2054 asyh->view.oH = omode_vdisplay;
2056 /* Add overscan compensation if necessary, will keep the aspect
2057 * ratio the same as the backend mode unless overridden by the
2058 * user setting both hborder and vborder properties.
2060 if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
2061 (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
2062 drm_detect_hdmi_monitor(edid)))) {
2063 u32 bX = asyc->scaler.underscan.hborder;
2064 u32 bY = asyc->scaler.underscan.vborder;
2065 u32 r = (asyh->view.oH << 19) / asyh->view.oW;
2068 asyh->view.oW -= (bX * 2);
2069 if (bY) asyh->view.oH -= (bY * 2);
2070 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2072 asyh->view.oW -= (asyh->view.oW >> 4) + 32;
2073 if (bY) asyh->view.oH -= (bY * 2);
2074 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2078 /* Handle CENTER/ASPECT scaling, taking into account the areas
2079 * removed already for overscan compensation.
2082 case DRM_MODE_SCALE_CENTER:
2083 asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
2084 asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
2086 case DRM_MODE_SCALE_ASPECT:
2087 if (asyh->view.oH < asyh->view.oW) {
2088 u32 r = (asyh->view.iW << 19) / asyh->view.iH;
2089 asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
2091 u32 r = (asyh->view.iH << 19) / asyh->view.iW;
2092 asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2099 asyh->set.view = true;
2103 nv50_head_atomic_check_lut(struct nv50_head *head,
2104 struct nv50_head_atom *armh,
2105 struct nv50_head_atom *asyh)
2107 struct nv50_disp *disp = nv50_disp(head->base.base.dev);
2109 /* An I8 surface without an input LUT makes no sense, and
2110 * EVO will throw an error if you try.
2112 * Legacy clients actually cause this due to the order in
2113 * which they call ioctls, so we will enable the LUT with
2114 * whatever contents the buffer already contains to avoid
2115 * triggering the error check.
2117 if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
2118 asyh->lut.handle = 0;
2119 asyh->clr.ilut = armh->lut.visible;
2123 if (disp->disp->oclass < GF110_DISP) {
2124 asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
2125 asyh->set.ilut = true;
2128 asyh->set.ilut = asyh->state.color_mgmt_changed;
2130 asyh->lut.handle = disp->mast.base.vram.handle;
2134 nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2136 struct drm_display_mode *mode = &asyh->state.adjusted_mode;
2137 struct nv50_head_mode *m = &asyh->mode;
2140 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
2143 * DRM modes are defined in terms of a repeating interval
2144 * starting with the active display area. The hardware modes
2145 * are defined in terms of a repeating interval starting one
2146 * unit (pixel or line) into the sync pulse. So, add bias.
2149 m->h.active = mode->crtc_htotal;
2150 m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
2151 m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
2152 m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
2154 m->v.active = mode->crtc_vtotal;
2155 m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
2156 m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
2157 m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
2159 /*XXX: Safe underestimate, even "0" works */
2160 blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
2162 blankus /= mode->crtc_clock;
2163 m->v.blankus = blankus;
2165 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2166 m->v.blank2e = m->v.active + m->v.blanke;
2167 m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay;
2168 m->v.active = (m->v.active * 2) + 1;
2169 m->interlace = true;
2173 m->interlace = false;
2175 m->clock = mode->crtc_clock;
2177 asyh->set.mode = true;
2181 nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2183 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
2184 struct nv50_disp *disp = nv50_disp(crtc->dev);
2185 struct nv50_head *head = nv50_head(crtc);
2186 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2187 struct nv50_head_atom *asyh = nv50_head_atom(state);
2188 struct nouveau_conn_atom *asyc = NULL;
2189 struct drm_connector_state *conns;
2190 struct drm_connector *conn;
2193 NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
2194 if (asyh->state.active) {
2195 for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
2196 if (conns->crtc == crtc) {
2197 asyc = nouveau_conn_atom(conns);
2202 if (armh->state.active) {
2204 if (asyh->state.mode_changed)
2205 asyc->set.scaler = true;
2206 if (armh->base.depth != asyh->base.depth)
2207 asyc->set.dither = true;
2211 asyc->set.mask = ~0;
2212 asyh->set.mask = ~0;
2215 if (asyh->state.mode_changed)
2216 nv50_head_atomic_check_mode(head, asyh);
2218 if (asyh->state.color_mgmt_changed ||
2219 asyh->base.cpp != armh->base.cpp)
2220 nv50_head_atomic_check_lut(head, armh, asyh);
2221 asyh->lut.visible = asyh->lut.handle != 0;
2224 if (asyc->set.scaler)
2225 nv50_head_atomic_check_view(armh, asyh, asyc);
2226 if (asyc->set.dither)
2227 nv50_head_atomic_check_dither(armh, asyh, asyc);
2228 if (asyc->set.procamp)
2229 nv50_head_atomic_check_procamp(armh, asyh, asyc);
2232 if ((asyh->core.visible = (asyh->base.cpp != 0))) {
2233 asyh->core.x = asyh->base.x;
2234 asyh->core.y = asyh->base.y;
2235 asyh->core.w = asyh->base.w;
2236 asyh->core.h = asyh->base.h;
2238 if ((asyh->core.visible = asyh->curs.visible) ||
2239 (asyh->core.visible = asyh->lut.visible)) {
2240 /*XXX: We need to either find some way of having the
2241 * primary base layer appear black, while still
2242 * being able to display the other layers, or we
2243 * need to allocate a dummy black surface here.
2247 asyh->core.w = asyh->state.mode.hdisplay;
2248 asyh->core.h = asyh->state.mode.vdisplay;
2250 asyh->core.handle = disp->mast.base.vram.handle;
2251 asyh->core.offset = 0;
2252 asyh->core.format = 0xcf;
2253 asyh->core.kind = 0;
2254 asyh->core.layout = 1;
2255 asyh->core.block = 0;
2256 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
2257 asyh->set.base = armh->base.cpp != asyh->base.cpp;
2258 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
2260 asyh->lut.visible = false;
2261 asyh->core.visible = false;
2262 asyh->curs.visible = false;
2267 if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
2268 if (asyh->core.visible) {
2269 if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
2270 asyh->set.core = true;
2272 if (armh->core.visible) {
2273 asyh->clr.core = true;
2276 if (asyh->curs.visible) {
2277 if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
2278 asyh->set.curs = true;
2280 if (armh->curs.visible) {
2281 asyh->clr.curs = true;
2284 asyh->clr.ilut = armh->lut.visible;
2285 asyh->clr.core = armh->core.visible;
2286 asyh->clr.curs = armh->curs.visible;
2287 asyh->set.ilut = asyh->lut.visible;
2288 asyh->set.core = asyh->core.visible;
2289 asyh->set.curs = asyh->curs.visible;
2292 if (asyh->clr.mask || asyh->set.mask)
2293 nv50_atom(asyh->state.state)->lock_core = true;
2297 static const struct drm_crtc_helper_funcs
2299 .atomic_check = nv50_head_atomic_check,
2303 nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
2304 struct drm_crtc_state *state)
2306 struct nv50_head_atom *asyh = nv50_head_atom(state);
2307 __drm_atomic_helper_crtc_destroy_state(&asyh->state);
2311 static struct drm_crtc_state *
2312 nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
2314 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2315 struct nv50_head_atom *asyh;
2316 if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
2318 __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
2319 asyh->view = armh->view;
2320 asyh->mode = armh->mode;
2321 asyh->lut = armh->lut;
2322 asyh->core = armh->core;
2323 asyh->curs = armh->curs;
2324 asyh->base = armh->base;
2325 asyh->ovly = armh->ovly;
2326 asyh->dither = armh->dither;
2327 asyh->procamp = armh->procamp;
2330 return &asyh->state;
2334 __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
2335 struct drm_crtc_state *state)
2338 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
2339 crtc->state = state;
2340 crtc->state->crtc = crtc;
2344 nv50_head_reset(struct drm_crtc *crtc)
2346 struct nv50_head_atom *asyh;
2348 if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
2351 __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
2355 nv50_head_destroy(struct drm_crtc *crtc)
2357 struct nv50_disp *disp = nv50_disp(crtc->dev);
2358 struct nv50_head *head = nv50_head(crtc);
2361 nv50_dmac_destroy(&head->ovly.base, disp->disp);
2362 nv50_pioc_destroy(&head->oimm.base);
2364 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
2365 nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
2367 drm_crtc_cleanup(crtc);
2371 static const struct drm_crtc_funcs
2373 .reset = nv50_head_reset,
2374 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2375 .destroy = nv50_head_destroy,
2376 .set_config = drm_atomic_helper_set_config,
2377 .page_flip = drm_atomic_helper_page_flip,
2378 .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
2379 .atomic_destroy_state = nv50_head_atomic_destroy_state,
2383 nv50_head_create(struct drm_device *dev, int index)
2385 struct nouveau_drm *drm = nouveau_drm(dev);
2386 struct nvif_device *device = &drm->client.device;
2387 struct nv50_disp *disp = nv50_disp(dev);
2388 struct nv50_head *head;
2389 struct nv50_base *base;
2390 struct nv50_curs *curs;
2391 struct drm_crtc *crtc;
2394 head = kzalloc(sizeof(*head), GFP_KERNEL);
2398 head->base.index = index;
2399 ret = nv50_base_new(drm, head, &base);
2401 ret = nv50_curs_new(drm, head, &curs);
2407 crtc = &head->base.base;
2408 drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
2409 &curs->wndw.plane, &nv50_head_func,
2410 "head-%d", head->base.index);
2411 drm_crtc_helper_add(crtc, &nv50_head_help);
2412 drm_mode_crtc_set_gamma_size(crtc, 256);
2414 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
2415 ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
2417 &head->lut.nvbo[i]);
2422 /* allocate overlay resources */
2423 ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
2427 ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
2434 nv50_head_destroy(crtc);
2438 /******************************************************************************
2439 * Output path helpers
2440 *****************************************************************************/
2442 nv50_outp_release(struct nouveau_encoder *nv_encoder)
2444 struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
2446 struct nv50_disp_mthd_v1 base;
2449 .base.method = NV50_DISP_MTHD_V1_RELEASE,
2450 .base.hasht = nv_encoder->dcb->hasht,
2451 .base.hashm = nv_encoder->dcb->hashm,
2454 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2455 nv_encoder->or = -1;
2456 nv_encoder->link = 0;
2460 nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
2462 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
2463 struct nv50_disp *disp = nv50_disp(drm->dev);
2465 struct nv50_disp_mthd_v1 base;
2466 struct nv50_disp_acquire_v0 info;
2469 .base.method = NV50_DISP_MTHD_V1_ACQUIRE,
2470 .base.hasht = nv_encoder->dcb->hasht,
2471 .base.hashm = nv_encoder->dcb->hashm,
2475 ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
2477 NV_ERROR(drm, "error acquiring output path: %d\n", ret);
2481 nv_encoder->or = args.info.or;
2482 nv_encoder->link = args.info.link;
2487 nv50_outp_atomic_check_view(struct drm_encoder *encoder,
2488 struct drm_crtc_state *crtc_state,
2489 struct drm_connector_state *conn_state,
2490 struct drm_display_mode *native_mode)
2492 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
2493 struct drm_display_mode *mode = &crtc_state->mode;
2494 struct drm_connector *connector = conn_state->connector;
2495 struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
2496 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
2498 NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
2499 asyc->scaler.full = false;
2503 if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
2504 switch (connector->connector_type) {
2505 case DRM_MODE_CONNECTOR_LVDS:
2506 case DRM_MODE_CONNECTOR_eDP:
2507 /* Force use of scaler for non-EDID modes. */
2508 if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
2511 asyc->scaler.full = true;
2520 if (!drm_mode_equal(adjusted_mode, mode)) {
2521 drm_mode_copy(adjusted_mode, mode);
2522 crtc_state->mode_changed = true;
2529 nv50_outp_atomic_check(struct drm_encoder *encoder,
2530 struct drm_crtc_state *crtc_state,
2531 struct drm_connector_state *conn_state)
2533 struct nouveau_connector *nv_connector =
2534 nouveau_connector(conn_state->connector);
2535 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2536 nv_connector->native_mode);
2539 /******************************************************************************
2541 *****************************************************************************/
2543 nv50_dac_disable(struct drm_encoder *encoder)
2545 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2546 struct nv50_mast *mast = nv50_mast(encoder->dev);
2547 const int or = nv_encoder->or;
2550 if (nv_encoder->crtc) {
2551 push = evo_wait(mast, 4);
2553 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2554 evo_mthd(push, 0x0400 + (or * 0x080), 1);
2555 evo_data(push, 0x00000000);
2557 evo_mthd(push, 0x0180 + (or * 0x020), 1);
2558 evo_data(push, 0x00000000);
2560 evo_kick(push, mast);
2564 nv_encoder->crtc = NULL;
2565 nv50_outp_release(nv_encoder);
2569 nv50_dac_enable(struct drm_encoder *encoder)
2571 struct nv50_mast *mast = nv50_mast(encoder->dev);
2572 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2573 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2574 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
2577 nv50_outp_acquire(nv_encoder);
2579 push = evo_wait(mast, 8);
2581 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2582 u32 syncs = 0x00000000;
2584 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2585 syncs |= 0x00000001;
2586 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2587 syncs |= 0x00000002;
2589 evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
2590 evo_data(push, 1 << nv_crtc->index);
2591 evo_data(push, syncs);
2593 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
2594 u32 syncs = 0x00000001;
2596 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2597 syncs |= 0x00000008;
2598 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2599 syncs |= 0x00000010;
2601 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2602 magic |= 0x00000001;
2604 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
2605 evo_data(push, syncs);
2606 evo_data(push, magic);
2607 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
2608 evo_data(push, 1 << nv_crtc->index);
2611 evo_kick(push, mast);
2614 nv_encoder->crtc = encoder->crtc;
2617 static enum drm_connector_status
2618 nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2620 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2621 struct nv50_disp *disp = nv50_disp(encoder->dev);
2623 struct nv50_disp_mthd_v1 base;
2624 struct nv50_disp_dac_load_v0 load;
2627 .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
2628 .base.hasht = nv_encoder->dcb->hasht,
2629 .base.hashm = nv_encoder->dcb->hashm,
2633 args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
2634 if (args.load.data == 0)
2635 args.load.data = 340;
2637 ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
2638 if (ret || !args.load.load)
2639 return connector_status_disconnected;
2641 return connector_status_connected;
2644 static const struct drm_encoder_helper_funcs
2646 .atomic_check = nv50_outp_atomic_check,
2647 .enable = nv50_dac_enable,
2648 .disable = nv50_dac_disable,
2649 .detect = nv50_dac_detect
2653 nv50_dac_destroy(struct drm_encoder *encoder)
2655 drm_encoder_cleanup(encoder);
2659 static const struct drm_encoder_funcs
2661 .destroy = nv50_dac_destroy,
2665 nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
2667 struct nouveau_drm *drm = nouveau_drm(connector->dev);
2668 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
2669 struct nvkm_i2c_bus *bus;
2670 struct nouveau_encoder *nv_encoder;
2671 struct drm_encoder *encoder;
2672 int type = DRM_MODE_ENCODER_DAC;
2674 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
2677 nv_encoder->dcb = dcbe;
2679 bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
2681 nv_encoder->i2c = &bus->i2c;
2683 encoder = to_drm_encoder(nv_encoder);
2684 encoder->possible_crtcs = dcbe->heads;
2685 encoder->possible_clones = 0;
2686 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
2687 "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
2688 drm_encoder_helper_add(encoder, &nv50_dac_help);
2690 drm_mode_connector_attach_encoder(connector, encoder);
2694 /******************************************************************************
2696 *****************************************************************************/
2698 nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2700 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2701 struct nv50_disp *disp = nv50_disp(encoder->dev);
2703 struct nv50_disp_mthd_v1 base;
2704 struct nv50_disp_sor_hda_eld_v0 eld;
2707 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2708 .base.hasht = nv_encoder->dcb->hasht,
2709 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2710 (0x0100 << nv_crtc->index),
2713 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2717 nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2719 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2720 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2721 struct nouveau_connector *nv_connector;
2722 struct nv50_disp *disp = nv50_disp(encoder->dev);
2725 struct nv50_disp_mthd_v1 mthd;
2726 struct nv50_disp_sor_hda_eld_v0 eld;
2728 u8 data[sizeof(nv_connector->base.eld)];
2730 .base.mthd.version = 1,
2731 .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2732 .base.mthd.hasht = nv_encoder->dcb->hasht,
2733 .base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2734 (0x0100 << nv_crtc->index),
2737 nv_connector = nouveau_encoder_connector_get(nv_encoder);
2738 if (!drm_detect_monitor_audio(nv_connector->edid))
2741 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
2743 nvif_mthd(disp->disp, 0, &args,
2744 sizeof(args.base) + drm_eld_size(args.data));
2747 /******************************************************************************
2749 *****************************************************************************/
2751 nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2753 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2754 struct nv50_disp *disp = nv50_disp(encoder->dev);
2756 struct nv50_disp_mthd_v1 base;
2757 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2760 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2761 .base.hasht = nv_encoder->dcb->hasht,
2762 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2763 (0x0100 << nv_crtc->index),
2766 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2770 nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2772 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2773 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2774 struct nv50_disp *disp = nv50_disp(encoder->dev);
2776 struct nv50_disp_mthd_v1 base;
2777 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2778 u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
2781 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2782 .base.hasht = nv_encoder->dcb->hasht,
2783 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2784 (0x0100 << nv_crtc->index),
2786 .pwr.rekey = 56, /* binary driver, and tegra, constant */
2788 struct nouveau_connector *nv_connector;
2790 union hdmi_infoframe avi_frame;
2791 union hdmi_infoframe vendor_frame;
2795 nv_connector = nouveau_encoder_connector_get(nv_encoder);
2796 if (!drm_detect_hdmi_monitor(nv_connector->edid))
2799 ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
2802 /* We have an AVI InfoFrame, populate it to the display */
2803 args.pwr.avi_infoframe_length
2804 = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
2807 ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
2808 &nv_connector->base, mode);
2810 /* We have a Vendor InfoFrame, populate it to the display */
2811 args.pwr.vendor_infoframe_length
2812 = hdmi_infoframe_pack(&vendor_frame,
2814 + args.pwr.avi_infoframe_length,
2818 max_ac_packet = mode->htotal - mode->hdisplay;
2819 max_ac_packet -= args.pwr.rekey;
2820 max_ac_packet -= 18; /* constant from tegra */
2821 args.pwr.max_ac_packet = max_ac_packet / 32;
2823 size = sizeof(args.base)
2825 + args.pwr.avi_infoframe_length
2826 + args.pwr.vendor_infoframe_length;
2827 nvif_mthd(disp->disp, 0, &args, size);
2828 nv50_audio_enable(encoder, mode);
2831 /******************************************************************************
2833 *****************************************************************************/
2834 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
2835 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
2836 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
2839 struct nouveau_encoder *outp;
2841 struct drm_dp_mst_topology_mgr mgr;
2842 struct nv50_msto *msto[4];
2850 struct nv50_mstm *mstm;
2851 struct drm_dp_mst_port *port;
2852 struct drm_connector connector;
2854 struct drm_display_mode *native;
2861 struct drm_encoder encoder;
2863 struct nv50_head *head;
2864 struct nv50_mstc *mstc;
2868 static struct drm_dp_payload *
2869 nv50_msto_payload(struct nv50_msto *msto)
2871 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2872 struct nv50_mstc *mstc = msto->mstc;
2873 struct nv50_mstm *mstm = mstc->mstm;
2874 int vcpi = mstc->port->vcpi.vcpi, i;
2876 NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
2877 for (i = 0; i < mstm->mgr.max_payloads; i++) {
2878 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2879 NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
2880 mstm->outp->base.base.name, i, payload->vcpi,
2881 payload->start_slot, payload->num_slots);
2884 for (i = 0; i < mstm->mgr.max_payloads; i++) {
2885 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2886 if (payload->vcpi == vcpi)
2894 nv50_msto_cleanup(struct nv50_msto *msto)
2896 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2897 struct nv50_mstc *mstc = msto->mstc;
2898 struct nv50_mstm *mstm = mstc->mstm;
2900 NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
2901 if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
2902 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
2903 if (msto->disabled) {
2906 msto->disabled = false;
2911 nv50_msto_prepare(struct nv50_msto *msto)
2913 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2914 struct nv50_mstc *mstc = msto->mstc;
2915 struct nv50_mstm *mstm = mstc->mstm;
2917 struct nv50_disp_mthd_v1 base;
2918 struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
2921 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
2922 .base.hasht = mstm->outp->dcb->hasht,
2923 .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
2924 (0x0100 << msto->head->base.index),
2927 NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
2928 if (mstc->port && mstc->port->vcpi.vcpi > 0) {
2929 struct drm_dp_payload *payload = nv50_msto_payload(msto);
2931 args.vcpi.start_slot = payload->start_slot;
2932 args.vcpi.num_slots = payload->num_slots;
2933 args.vcpi.pbn = mstc->port->vcpi.pbn;
2934 args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
2938 NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
2939 msto->encoder.name, msto->head->base.base.name,
2940 args.vcpi.start_slot, args.vcpi.num_slots,
2941 args.vcpi.pbn, args.vcpi.aligned_pbn);
2942 nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
2946 nv50_msto_atomic_check(struct drm_encoder *encoder,
2947 struct drm_crtc_state *crtc_state,
2948 struct drm_connector_state *conn_state)
2950 struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
2951 struct nv50_mstm *mstm = mstc->mstm;
2952 int bpp = conn_state->connector->display_info.bpc * 3;
2955 mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
2957 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
2961 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2966 nv50_msto_enable(struct drm_encoder *encoder)
2968 struct nv50_head *head = nv50_head(encoder->crtc);
2969 struct nv50_msto *msto = nv50_msto(encoder);
2970 struct nv50_mstc *mstc = NULL;
2971 struct nv50_mstm *mstm = NULL;
2972 struct drm_connector *connector;
2973 struct drm_connector_list_iter conn_iter;
2978 drm_connector_list_iter_begin(encoder->dev, &conn_iter);
2979 drm_for_each_connector_iter(connector, &conn_iter) {
2980 if (connector->state->best_encoder == &msto->encoder) {
2981 mstc = nv50_mstc(connector);
2986 drm_connector_list_iter_end(&conn_iter);
2991 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
2992 r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
2996 nv50_outp_acquire(mstm->outp);
2998 if (mstm->outp->link & 1)
3003 switch (mstc->connector.display_info.bpc) {
3004 case 6: depth = 0x2; break;
3005 case 8: depth = 0x5; break;
3007 default: depth = 0x6; break;
3010 mstm->outp->update(mstm->outp, head->base.index,
3011 &head->base.base.state->adjusted_mode, proto, depth);
3015 mstm->modified = true;
3019 nv50_msto_disable(struct drm_encoder *encoder)
3021 struct nv50_msto *msto = nv50_msto(encoder);
3022 struct nv50_mstc *mstc = msto->mstc;
3023 struct nv50_mstm *mstm = mstc->mstm;
3026 drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
3028 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
3029 mstm->modified = true;
3031 mstm->disabled = true;
3032 msto->disabled = true;
3035 static const struct drm_encoder_helper_funcs
3037 .disable = nv50_msto_disable,
3038 .enable = nv50_msto_enable,
3039 .atomic_check = nv50_msto_atomic_check,
3043 nv50_msto_destroy(struct drm_encoder *encoder)
3045 struct nv50_msto *msto = nv50_msto(encoder);
3046 drm_encoder_cleanup(&msto->encoder);
3050 static const struct drm_encoder_funcs
3052 .destroy = nv50_msto_destroy,
3056 nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
3057 struct nv50_msto **pmsto)
3059 struct nv50_msto *msto;
3062 if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
3065 ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
3066 DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
3073 drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
3074 msto->encoder.possible_crtcs = heads;
3078 static struct drm_encoder *
3079 nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
3080 struct drm_connector_state *connector_state)
3082 struct nv50_head *head = nv50_head(connector_state->crtc);
3083 struct nv50_mstc *mstc = nv50_mstc(connector);
3085 struct nv50_mstm *mstm = mstc->mstm;
3086 return &mstm->msto[head->base.index]->encoder;
3091 static struct drm_encoder *
3092 nv50_mstc_best_encoder(struct drm_connector *connector)
3094 struct nv50_mstc *mstc = nv50_mstc(connector);
3096 struct nv50_mstm *mstm = mstc->mstm;
3097 return &mstm->msto[0]->encoder;
3102 static enum drm_mode_status
3103 nv50_mstc_mode_valid(struct drm_connector *connector,
3104 struct drm_display_mode *mode)
3110 nv50_mstc_get_modes(struct drm_connector *connector)
3112 struct nv50_mstc *mstc = nv50_mstc(connector);
3115 mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
3116 drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
3118 ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
3120 if (!mstc->connector.display_info.bpc)
3121 mstc->connector.display_info.bpc = 8;
3124 drm_mode_destroy(mstc->connector.dev, mstc->native);
3125 mstc->native = nouveau_conn_native_mode(&mstc->connector);
3129 static const struct drm_connector_helper_funcs
3131 .get_modes = nv50_mstc_get_modes,
3132 .mode_valid = nv50_mstc_mode_valid,
3133 .best_encoder = nv50_mstc_best_encoder,
3134 .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
3137 static enum drm_connector_status
3138 nv50_mstc_detect(struct drm_connector *connector, bool force)
3140 struct nv50_mstc *mstc = nv50_mstc(connector);
3142 return connector_status_disconnected;
3143 return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
3147 nv50_mstc_destroy(struct drm_connector *connector)
3149 struct nv50_mstc *mstc = nv50_mstc(connector);
3150 drm_connector_cleanup(&mstc->connector);
3154 static const struct drm_connector_funcs
3156 .reset = nouveau_conn_reset,
3157 .detect = nv50_mstc_detect,
3158 .fill_modes = drm_helper_probe_single_connector_modes,
3159 .destroy = nv50_mstc_destroy,
3160 .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
3161 .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
3162 .atomic_set_property = nouveau_conn_atomic_set_property,
3163 .atomic_get_property = nouveau_conn_atomic_get_property,
3167 nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
3168 const char *path, struct nv50_mstc **pmstc)
3170 struct drm_device *dev = mstm->outp->base.base.dev;
3171 struct nv50_mstc *mstc;
3174 if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
3179 ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
3180 DRM_MODE_CONNECTOR_DisplayPort);
3187 drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
3189 mstc->connector.funcs->reset(&mstc->connector);
3190 nouveau_conn_attach_properties(&mstc->connector);
3192 for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
3193 drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
3195 drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
3196 drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
3197 drm_mode_connector_set_path_property(&mstc->connector, path);
3202 nv50_mstm_cleanup(struct nv50_mstm *mstm)
3204 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3205 struct drm_encoder *encoder;
3208 NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
3209 ret = drm_dp_check_act_status(&mstm->mgr);
3211 ret = drm_dp_update_payload_part2(&mstm->mgr);
3213 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3214 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3215 struct nv50_msto *msto = nv50_msto(encoder);
3216 struct nv50_mstc *mstc = msto->mstc;
3217 if (mstc && mstc->mstm == mstm)
3218 nv50_msto_cleanup(msto);
3222 mstm->modified = false;
3226 nv50_mstm_prepare(struct nv50_mstm *mstm)
3228 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3229 struct drm_encoder *encoder;
3232 NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
3233 ret = drm_dp_update_payload_part1(&mstm->mgr);
3235 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3236 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3237 struct nv50_msto *msto = nv50_msto(encoder);
3238 struct nv50_mstc *mstc = msto->mstc;
3239 if (mstc && mstc->mstm == mstm)
3240 nv50_msto_prepare(msto);
3244 if (mstm->disabled) {
3246 nv50_outp_release(mstm->outp);
3247 mstm->disabled = false;
3252 nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
3254 struct nv50_mstm *mstm = nv50_mstm(mgr);
3255 drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
3259 nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
3260 struct drm_connector *connector)
3262 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3263 struct nv50_mstc *mstc = nv50_mstc(connector);
3265 drm_connector_unregister(&mstc->connector);
3267 drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
3269 drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
3271 drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
3273 drm_connector_unreference(&mstc->connector);
3277 nv50_mstm_register_connector(struct drm_connector *connector)
3279 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3281 drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
3283 drm_connector_register(connector);
3286 static struct drm_connector *
3287 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
3288 struct drm_dp_mst_port *port, const char *path)
3290 struct nv50_mstm *mstm = nv50_mstm(mgr);
3291 struct nv50_mstc *mstc;
3294 ret = nv50_mstc_new(mstm, port, path, &mstc);
3297 mstc->connector.funcs->destroy(&mstc->connector);
3301 return &mstc->connector;
3304 static const struct drm_dp_mst_topology_cbs
3306 .add_connector = nv50_mstm_add_connector,
3307 .register_connector = nv50_mstm_register_connector,
3308 .destroy_connector = nv50_mstm_destroy_connector,
3309 .hotplug = nv50_mstm_hotplug,
3313 nv50_mstm_service(struct nv50_mstm *mstm)
3315 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3316 bool handled = true;
3324 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3326 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3330 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
3334 drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
3339 nv50_mstm_remove(struct nv50_mstm *mstm)
3342 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3346 nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
3348 struct nouveau_encoder *outp = mstm->outp;
3350 struct nv50_disp_mthd_v1 base;
3351 struct nv50_disp_sor_dp_mst_link_v0 mst;
3354 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
3355 .base.hasht = outp->dcb->hasht,
3356 .base.hashm = outp->dcb->hashm,
3359 struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
3360 struct nvif_object *disp = &drm->display->disp;
3364 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
3372 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
3377 return nvif_mthd(disp, 0, &args, sizeof(args));
3381 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
3388 if (dpcd[0] >= 0x12) {
3389 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
3393 if (!(dpcd[1] & DP_MST_CAP))
3399 ret = nv50_mstm_enable(mstm, dpcd[0], state);
3403 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
3405 return nv50_mstm_enable(mstm, dpcd[0], 0);
3407 return mstm->mgr.mst_state;
3411 nv50_mstm_fini(struct nv50_mstm *mstm)
3413 if (mstm && mstm->mgr.mst_state)
3414 drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
3418 nv50_mstm_init(struct nv50_mstm *mstm)
3420 if (mstm && mstm->mgr.mst_state)
3421 drm_dp_mst_topology_mgr_resume(&mstm->mgr);
3425 nv50_mstm_del(struct nv50_mstm **pmstm)
3427 struct nv50_mstm *mstm = *pmstm;
3435 nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
3436 int conn_base_id, struct nv50_mstm **pmstm)
3438 const int max_payloads = hweight8(outp->dcb->heads);
3439 struct drm_device *dev = outp->base.base.dev;
3440 struct nv50_mstm *mstm;
3444 /* This is a workaround for some monitors not functioning
3445 * correctly in MST mode on initial module load. I think
3446 * some bad interaction with the VBIOS may be responsible.
3448 * A good ol' off and on again seems to work here ;)
3450 ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
3451 if (ret >= 0 && dpcd >= 0x12)
3452 drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
3454 if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
3457 mstm->mgr.cbs = &nv50_mstm;
3459 ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
3460 max_payloads, conn_base_id);
3464 for (i = 0; i < max_payloads; i++) {
3465 ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
3474 /******************************************************************************
3476 *****************************************************************************/
3478 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
3479 struct drm_display_mode *mode, u8 proto, u8 depth)
3481 struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
3485 nv_encoder->ctrl &= ~BIT(head);
3486 if (!(nv_encoder->ctrl & 0x0000000f))
3487 nv_encoder->ctrl = 0;
3489 nv_encoder->ctrl |= proto << 8;
3490 nv_encoder->ctrl |= BIT(head);
3493 if ((push = evo_wait(core, 6))) {
3494 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
3496 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3497 nv_encoder->ctrl |= 0x00001000;
3498 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3499 nv_encoder->ctrl |= 0x00002000;
3500 nv_encoder->ctrl |= depth << 16;
3502 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
3505 u32 magic = 0x31ec6000 | (head << 25);
3506 u32 syncs = 0x00000001;
3507 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3508 syncs |= 0x00000008;
3509 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3510 syncs |= 0x00000010;
3511 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
3512 magic |= 0x00000001;
3514 evo_mthd(push, 0x0404 + (head * 0x300), 2);
3515 evo_data(push, syncs | (depth << 6));
3516 evo_data(push, magic);
3518 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
3520 evo_data(push, nv_encoder->ctrl);
3521 evo_kick(push, core);
3526 nv50_sor_disable(struct drm_encoder *encoder)
3528 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3529 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
3531 nv_encoder->crtc = NULL;
3534 struct nvkm_i2c_aux *aux = nv_encoder->aux;
3538 int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
3540 pwr &= ~DP_SET_POWER_MASK;
3541 pwr |= DP_SET_POWER_D3;
3542 nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
3546 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
3547 nv50_audio_disable(encoder, nv_crtc);
3548 nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
3549 nv50_outp_release(nv_encoder);
3554 nv50_sor_enable(struct drm_encoder *encoder)
3556 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3557 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3558 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3560 struct nv50_disp_mthd_v1 base;
3561 struct nv50_disp_sor_lvds_script_v0 lvds;
3564 .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
3565 .base.hasht = nv_encoder->dcb->hasht,
3566 .base.hashm = nv_encoder->dcb->hashm,
3568 struct nv50_disp *disp = nv50_disp(encoder->dev);
3569 struct drm_device *dev = encoder->dev;
3570 struct nouveau_drm *drm = nouveau_drm(dev);
3571 struct nouveau_connector *nv_connector;
3572 struct nvbios *bios = &drm->vbios;
3576 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3577 nv_encoder->crtc = encoder->crtc;
3578 nv50_outp_acquire(nv_encoder);
3580 switch (nv_encoder->dcb->type) {
3581 case DCB_OUTPUT_TMDS:
3582 if (nv_encoder->link & 1) {
3584 /* Only enable dual-link if:
3585 * - Need to (i.e. rate > 165MHz)
3587 * - Not an HDMI monitor, since there's no dual-link
3590 if (mode->clock >= 165000 &&
3591 nv_encoder->dcb->duallink_possible &&
3592 !drm_detect_hdmi_monitor(nv_connector->edid))
3598 nv50_hdmi_enable(&nv_encoder->base.base, mode);
3600 case DCB_OUTPUT_LVDS:
3603 if (bios->fp_no_ddc) {
3604 if (bios->fp.dual_link)
3605 lvds.lvds.script |= 0x0100;
3606 if (bios->fp.if_is_24bit)
3607 lvds.lvds.script |= 0x0200;
3609 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
3610 if (((u8 *)nv_connector->edid)[121] == 2)
3611 lvds.lvds.script |= 0x0100;
3613 if (mode->clock >= bios->fp.duallink_transition_clk) {
3614 lvds.lvds.script |= 0x0100;
3617 if (lvds.lvds.script & 0x0100) {
3618 if (bios->fp.strapless_is_24bit & 2)
3619 lvds.lvds.script |= 0x0200;
3621 if (bios->fp.strapless_is_24bit & 1)
3622 lvds.lvds.script |= 0x0200;
3625 if (nv_connector->base.display_info.bpc == 8)
3626 lvds.lvds.script |= 0x0200;
3629 nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
3632 if (nv_connector->base.display_info.bpc == 6)
3635 if (nv_connector->base.display_info.bpc == 8)
3640 if (nv_encoder->link & 1)
3645 nv50_audio_enable(encoder, mode);
3652 nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
3655 static const struct drm_encoder_helper_funcs
3657 .atomic_check = nv50_outp_atomic_check,
3658 .enable = nv50_sor_enable,
3659 .disable = nv50_sor_disable,
3663 nv50_sor_destroy(struct drm_encoder *encoder)
3665 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3666 nv50_mstm_del(&nv_encoder->dp.mstm);
3667 drm_encoder_cleanup(encoder);
3671 static const struct drm_encoder_funcs
3673 .destroy = nv50_sor_destroy,
3677 nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3679 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3680 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3681 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3682 struct nouveau_encoder *nv_encoder;
3683 struct drm_encoder *encoder;
3686 switch (dcbe->type) {
3687 case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
3688 case DCB_OUTPUT_TMDS:
3691 type = DRM_MODE_ENCODER_TMDS;
3695 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3698 nv_encoder->dcb = dcbe;
3699 nv_encoder->update = nv50_sor_update;
3701 encoder = to_drm_encoder(nv_encoder);
3702 encoder->possible_crtcs = dcbe->heads;
3703 encoder->possible_clones = 0;
3704 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
3705 "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
3706 drm_encoder_helper_add(encoder, &nv50_sor_help);
3708 drm_mode_connector_attach_encoder(connector, encoder);
3710 if (dcbe->type == DCB_OUTPUT_DP) {
3711 struct nv50_disp *disp = nv50_disp(encoder->dev);
3712 struct nvkm_i2c_aux *aux =
3713 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3715 if (disp->disp->oclass < GF110_DISP) {
3716 /* HW has no support for address-only
3717 * transactions, so we're required to
3718 * use custom I2C-over-AUX code.
3720 nv_encoder->i2c = &aux->i2c;
3722 nv_encoder->i2c = &nv_connector->aux.ddc;
3724 nv_encoder->aux = aux;
3727 /*TODO: Use DP Info Table to check for support. */
3728 if (disp->disp->oclass >= GF110_DISP) {
3729 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
3730 nv_connector->base.base.id,
3731 &nv_encoder->dp.mstm);
3736 struct nvkm_i2c_bus *bus =
3737 nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
3739 nv_encoder->i2c = &bus->i2c;
3745 /******************************************************************************
3747 *****************************************************************************/
3749 nv50_pior_atomic_check(struct drm_encoder *encoder,
3750 struct drm_crtc_state *crtc_state,
3751 struct drm_connector_state *conn_state)
3753 int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
3756 crtc_state->adjusted_mode.clock *= 2;
3761 nv50_pior_disable(struct drm_encoder *encoder)
3763 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3764 struct nv50_mast *mast = nv50_mast(encoder->dev);
3765 const int or = nv_encoder->or;
3768 if (nv_encoder->crtc) {
3769 push = evo_wait(mast, 4);
3771 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3772 evo_mthd(push, 0x0700 + (or * 0x040), 1);
3773 evo_data(push, 0x00000000);
3775 evo_kick(push, mast);
3779 nv_encoder->crtc = NULL;
3780 nv50_outp_release(nv_encoder);
3784 nv50_pior_enable(struct drm_encoder *encoder)
3786 struct nv50_mast *mast = nv50_mast(encoder->dev);
3787 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3788 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3789 struct nouveau_connector *nv_connector;
3790 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3791 u8 owner = 1 << nv_crtc->index;
3795 nv50_outp_acquire(nv_encoder);
3797 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3798 switch (nv_connector->base.display_info.bpc) {
3799 case 10: depth = 0x6; break;
3800 case 8: depth = 0x5; break;
3801 case 6: depth = 0x2; break;
3802 default: depth = 0x0; break;
3805 switch (nv_encoder->dcb->type) {
3806 case DCB_OUTPUT_TMDS:
3815 push = evo_wait(mast, 8);
3817 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3818 u32 ctrl = (depth << 16) | (proto << 8) | owner;
3819 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3821 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3823 evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
3824 evo_data(push, ctrl);
3827 evo_kick(push, mast);
3830 nv_encoder->crtc = encoder->crtc;
3833 static const struct drm_encoder_helper_funcs
3835 .atomic_check = nv50_pior_atomic_check,
3836 .enable = nv50_pior_enable,
3837 .disable = nv50_pior_disable,
3841 nv50_pior_destroy(struct drm_encoder *encoder)
3843 drm_encoder_cleanup(encoder);
3847 static const struct drm_encoder_funcs
3849 .destroy = nv50_pior_destroy,
3853 nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3855 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3856 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3857 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3858 struct nvkm_i2c_bus *bus = NULL;
3859 struct nvkm_i2c_aux *aux = NULL;
3860 struct i2c_adapter *ddc;
3861 struct nouveau_encoder *nv_encoder;
3862 struct drm_encoder *encoder;
3865 switch (dcbe->type) {
3866 case DCB_OUTPUT_TMDS:
3867 bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
3868 ddc = bus ? &bus->i2c : NULL;
3869 type = DRM_MODE_ENCODER_TMDS;
3872 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
3873 ddc = aux ? &nv_connector->aux.ddc : NULL;
3874 type = DRM_MODE_ENCODER_TMDS;
3880 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3883 nv_encoder->dcb = dcbe;
3884 nv_encoder->i2c = ddc;
3885 nv_encoder->aux = aux;
3887 encoder = to_drm_encoder(nv_encoder);
3888 encoder->possible_crtcs = dcbe->heads;
3889 encoder->possible_clones = 0;
3890 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
3891 "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
3892 drm_encoder_helper_add(encoder, &nv50_pior_help);
3894 drm_mode_connector_attach_encoder(connector, encoder);
3898 /******************************************************************************
3900 *****************************************************************************/
3903 nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
3905 struct nv50_disp *disp = nv50_disp(drm->dev);
3906 struct nv50_dmac *core = &disp->mast.base;
3907 struct nv50_mstm *mstm;
3908 struct drm_encoder *encoder;
3911 NV_ATOMIC(drm, "commit core %08x\n", interlock);
3913 drm_for_each_encoder(encoder, drm->dev) {
3914 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3915 mstm = nouveau_encoder(encoder)->dp.mstm;
3916 if (mstm && mstm->modified)
3917 nv50_mstm_prepare(mstm);
3921 if ((push = evo_wait(core, 5))) {
3922 evo_mthd(push, 0x0084, 1);
3923 evo_data(push, 0x80000000);
3924 evo_mthd(push, 0x0080, 2);
3925 evo_data(push, interlock);
3926 evo_data(push, 0x00000000);
3927 nouveau_bo_wr32(disp->sync, 0, 0x00000000);
3928 evo_kick(push, core);
3929 if (nvif_msec(&drm->client.device, 2000ULL,
3930 if (nouveau_bo_rd32(disp->sync, 0))
3934 NV_ERROR(drm, "EVO timeout\n");
3937 drm_for_each_encoder(encoder, drm->dev) {
3938 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3939 mstm = nouveau_encoder(encoder)->dp.mstm;
3940 if (mstm && mstm->modified)
3941 nv50_mstm_cleanup(mstm);
3947 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
3949 struct drm_device *dev = state->dev;
3950 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
3951 struct drm_crtc *crtc;
3952 struct drm_plane_state *new_plane_state;
3953 struct drm_plane *plane;
3954 struct nouveau_drm *drm = nouveau_drm(dev);
3955 struct nv50_disp *disp = nv50_disp(dev);
3956 struct nv50_atom *atom = nv50_atom(state);
3957 struct nv50_outp_atom *outp, *outt;
3958 u32 interlock_core = 0;
3959 u32 interlock_chan = 0;
3962 NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
3963 drm_atomic_helper_wait_for_fences(dev, state, false);
3964 drm_atomic_helper_wait_for_dependencies(state);
3965 drm_atomic_helper_update_legacy_modeset_state(dev, state);
3967 if (atom->lock_core)
3968 mutex_lock(&disp->mutex);
3970 /* Disable head(s). */
3971 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3972 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
3973 struct nv50_head *head = nv50_head(crtc);
3975 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
3976 asyh->clr.mask, asyh->set.mask);
3977 if (old_crtc_state->active && !new_crtc_state->active)
3978 drm_crtc_vblank_off(crtc);
3980 if (asyh->clr.mask) {
3981 nv50_head_flush_clr(head, asyh, atom->flush_disable);
3982 interlock_core |= 1;
3986 /* Disable plane(s). */
3987 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
3988 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
3989 struct nv50_wndw *wndw = nv50_wndw(plane);
3991 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
3992 asyw->clr.mask, asyw->set.mask);
3993 if (!asyw->clr.mask)
3996 interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
3997 atom->flush_disable,
4001 /* Disable output path(s). */
4002 list_for_each_entry(outp, &atom->outp, head) {
4003 const struct drm_encoder_helper_funcs *help;
4004 struct drm_encoder *encoder;
4006 encoder = outp->encoder;
4007 help = encoder->helper_private;
4009 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
4010 outp->clr.mask, outp->set.mask);
4012 if (outp->clr.mask) {
4013 help->disable(encoder);
4014 interlock_core |= 1;
4015 if (outp->flush_disable) {
4016 nv50_disp_atomic_commit_core(drm, interlock_chan);
4023 /* Flush disable. */
4024 if (interlock_core) {
4025 if (atom->flush_disable) {
4026 nv50_disp_atomic_commit_core(drm, interlock_chan);
4032 /* Update output path(s). */
4033 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4034 const struct drm_encoder_helper_funcs *help;
4035 struct drm_encoder *encoder;
4037 encoder = outp->encoder;
4038 help = encoder->helper_private;
4040 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
4041 outp->set.mask, outp->clr.mask);
4043 if (outp->set.mask) {
4044 help->enable(encoder);
4048 list_del(&outp->head);
4052 /* Update head(s). */
4053 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4054 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
4055 struct nv50_head *head = nv50_head(crtc);
4057 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
4058 asyh->set.mask, asyh->clr.mask);
4060 if (asyh->set.mask) {
4061 nv50_head_flush_set(head, asyh);
4065 if (new_crtc_state->active) {
4066 if (!old_crtc_state->active)
4067 drm_crtc_vblank_on(crtc);
4068 if (new_crtc_state->event)
4069 drm_crtc_vblank_get(crtc);
4073 /* Update plane(s). */
4074 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4075 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4076 struct nv50_wndw *wndw = nv50_wndw(plane);
4078 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
4079 asyw->set.mask, asyw->clr.mask);
4080 if ( !asyw->set.mask &&
4081 (!asyw->clr.mask || atom->flush_disable))
4084 interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
4088 if (interlock_core) {
4089 if (!interlock_chan && atom->state.legacy_cursor_update) {
4090 u32 *push = evo_wait(&disp->mast, 2);
4092 evo_mthd(push, 0x0080, 1);
4093 evo_data(push, 0x00000000);
4094 evo_kick(push, &disp->mast);
4097 nv50_disp_atomic_commit_core(drm, interlock_chan);
4101 if (atom->lock_core)
4102 mutex_unlock(&disp->mutex);
4104 /* Wait for HW to signal completion. */
4105 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4106 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4107 struct nv50_wndw *wndw = nv50_wndw(plane);
4108 int ret = nv50_wndw_wait_armed(wndw, asyw);
4110 NV_ERROR(drm, "%s: timeout\n", plane->name);
4113 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4114 if (new_crtc_state->event) {
4115 unsigned long flags;
4116 /* Get correct count/ts if racing with vblank irq */
4117 if (new_crtc_state->active)
4118 drm_crtc_accurate_vblank_count(crtc);
4119 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4120 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
4121 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4123 new_crtc_state->event = NULL;
4124 if (new_crtc_state->active)
4125 drm_crtc_vblank_put(crtc);
4129 drm_atomic_helper_commit_hw_done(state);
4130 drm_atomic_helper_cleanup_planes(dev, state);
4131 drm_atomic_helper_commit_cleanup_done(state);
4132 drm_atomic_state_put(state);
4136 nv50_disp_atomic_commit_work(struct work_struct *work)
4138 struct drm_atomic_state *state =
4139 container_of(work, typeof(*state), commit_work);
4140 nv50_disp_atomic_commit_tail(state);
4144 nv50_disp_atomic_commit(struct drm_device *dev,
4145 struct drm_atomic_state *state, bool nonblock)
4147 struct nouveau_drm *drm = nouveau_drm(dev);
4148 struct nv50_disp *disp = nv50_disp(dev);
4149 struct drm_plane_state *new_plane_state;
4150 struct drm_plane *plane;
4151 struct drm_crtc *crtc;
4152 bool active = false;
4155 ret = pm_runtime_get_sync(dev->dev);
4156 if (ret < 0 && ret != -EACCES)
4159 ret = drm_atomic_helper_setup_commit(state, nonblock);
4163 INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
4165 ret = drm_atomic_helper_prepare_planes(dev, state);
4170 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
4175 ret = drm_atomic_helper_swap_state(state, true);
4179 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4180 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4181 struct nv50_wndw *wndw = nv50_wndw(plane);
4183 if (asyw->set.image) {
4184 asyw->ntfy.handle = wndw->dmac->sync.handle;
4185 asyw->ntfy.offset = wndw->ntfy;
4186 asyw->ntfy.awaken = false;
4187 asyw->set.ntfy = true;
4188 nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
4193 drm_atomic_state_get(state);
4196 queue_work(system_unbound_wq, &state->commit_work);
4198 nv50_disp_atomic_commit_tail(state);
4200 drm_for_each_crtc(crtc, dev) {
4201 if (crtc->state->enable) {
4202 if (!drm->have_disp_power_ref) {
4203 drm->have_disp_power_ref = true;
4211 if (!active && drm->have_disp_power_ref) {
4212 pm_runtime_put_autosuspend(dev->dev);
4213 drm->have_disp_power_ref = false;
4218 drm_atomic_helper_cleanup_planes(dev, state);
4220 pm_runtime_put_autosuspend(dev->dev);
4224 static struct nv50_outp_atom *
4225 nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
4227 struct nv50_outp_atom *outp;
4229 list_for_each_entry(outp, &atom->outp, head) {
4230 if (outp->encoder == encoder)
4234 outp = kzalloc(sizeof(*outp), GFP_KERNEL);
4236 return ERR_PTR(-ENOMEM);
4238 list_add(&outp->head, &atom->outp);
4239 outp->encoder = encoder;
4244 nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
4245 struct drm_connector_state *old_connector_state)
4247 struct drm_encoder *encoder = old_connector_state->best_encoder;
4248 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4249 struct drm_crtc *crtc;
4250 struct nv50_outp_atom *outp;
4252 if (!(crtc = old_connector_state->crtc))
4255 old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
4256 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4257 if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4258 outp = nv50_disp_outp_atomic_add(atom, encoder);
4260 return PTR_ERR(outp);
4262 if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
4263 outp->flush_disable = true;
4264 atom->flush_disable = true;
4266 outp->clr.ctrl = true;
4267 atom->lock_core = true;
4274 nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
4275 struct drm_connector_state *connector_state)
4277 struct drm_encoder *encoder = connector_state->best_encoder;
4278 struct drm_crtc_state *new_crtc_state;
4279 struct drm_crtc *crtc;
4280 struct nv50_outp_atom *outp;
4282 if (!(crtc = connector_state->crtc))
4285 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4286 if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4287 outp = nv50_disp_outp_atomic_add(atom, encoder);
4289 return PTR_ERR(outp);
4291 outp->set.ctrl = true;
4292 atom->lock_core = true;
4299 nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
4301 struct nv50_atom *atom = nv50_atom(state);
4302 struct drm_connector_state *old_connector_state, *new_connector_state;
4303 struct drm_connector *connector;
4306 ret = drm_atomic_helper_check(dev, state);
4310 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
4311 ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
4315 ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
4324 nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
4326 struct nv50_atom *atom = nv50_atom(state);
4327 struct nv50_outp_atom *outp, *outt;
4329 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4330 list_del(&outp->head);
4334 drm_atomic_state_default_clear(state);
4338 nv50_disp_atomic_state_free(struct drm_atomic_state *state)
4340 struct nv50_atom *atom = nv50_atom(state);
4341 drm_atomic_state_default_release(&atom->state);
4345 static struct drm_atomic_state *
4346 nv50_disp_atomic_state_alloc(struct drm_device *dev)
4348 struct nv50_atom *atom;
4349 if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
4350 drm_atomic_state_init(dev, &atom->state) < 0) {
4354 INIT_LIST_HEAD(&atom->outp);
4355 return &atom->state;
4358 static const struct drm_mode_config_funcs
4360 .fb_create = nouveau_user_framebuffer_create,
4361 .output_poll_changed = drm_fb_helper_output_poll_changed,
4362 .atomic_check = nv50_disp_atomic_check,
4363 .atomic_commit = nv50_disp_atomic_commit,
4364 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
4365 .atomic_state_clear = nv50_disp_atomic_state_clear,
4366 .atomic_state_free = nv50_disp_atomic_state_free,
4369 /******************************************************************************
4371 *****************************************************************************/
4374 nv50_display_fini(struct drm_device *dev)
4376 struct nouveau_encoder *nv_encoder;
4377 struct drm_encoder *encoder;
4378 struct drm_plane *plane;
4380 drm_for_each_plane(plane, dev) {
4381 struct nv50_wndw *wndw = nv50_wndw(plane);
4382 if (plane->funcs != &nv50_wndw)
4384 nv50_wndw_fini(wndw);
4387 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4388 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4389 nv_encoder = nouveau_encoder(encoder);
4390 nv50_mstm_fini(nv_encoder->dp.mstm);
4396 nv50_display_init(struct drm_device *dev)
4398 struct drm_encoder *encoder;
4399 struct drm_plane *plane;
4402 push = evo_wait(nv50_mast(dev), 32);
4406 evo_mthd(push, 0x0088, 1);
4407 evo_data(push, nv50_mast(dev)->base.sync.handle);
4408 evo_kick(push, nv50_mast(dev));
4410 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4411 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4412 struct nouveau_encoder *nv_encoder =
4413 nouveau_encoder(encoder);
4414 nv50_mstm_init(nv_encoder->dp.mstm);
4418 drm_for_each_plane(plane, dev) {
4419 struct nv50_wndw *wndw = nv50_wndw(plane);
4420 if (plane->funcs != &nv50_wndw)
4422 nv50_wndw_init(wndw);
4429 nv50_display_destroy(struct drm_device *dev)
4431 struct nv50_disp *disp = nv50_disp(dev);
4433 nv50_dmac_destroy(&disp->mast.base, disp->disp);
4435 nouveau_bo_unmap(disp->sync);
4437 nouveau_bo_unpin(disp->sync);
4438 nouveau_bo_ref(NULL, &disp->sync);
4440 nouveau_display(dev)->priv = NULL;
4444 MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
4445 static int nouveau_atomic = 0;
4446 module_param_named(atomic, nouveau_atomic, int, 0400);
4449 nv50_display_create(struct drm_device *dev)
4451 struct nvif_device *device = &nouveau_drm(dev)->client.device;
4452 struct nouveau_drm *drm = nouveau_drm(dev);
4453 struct dcb_table *dcb = &drm->vbios.dcb;
4454 struct drm_connector *connector, *tmp;
4455 struct nv50_disp *disp;
4456 struct dcb_output *dcbe;
4459 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
4463 mutex_init(&disp->mutex);
4465 nouveau_display(dev)->priv = disp;
4466 nouveau_display(dev)->dtor = nv50_display_destroy;
4467 nouveau_display(dev)->init = nv50_display_init;
4468 nouveau_display(dev)->fini = nv50_display_fini;
4469 disp->disp = &nouveau_display(dev)->disp;
4470 dev->mode_config.funcs = &nv50_disp_func;
4471 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
4473 dev->driver->driver_features |= DRIVER_ATOMIC;
4475 /* small shared memory area we use for notifiers and semaphores */
4476 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
4477 0, 0x0000, NULL, NULL, &disp->sync);
4479 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
4481 ret = nouveau_bo_map(disp->sync);
4483 nouveau_bo_unpin(disp->sync);
4486 nouveau_bo_ref(NULL, &disp->sync);
4492 /* allocate master evo channel */
4493 ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
4498 /* create crtc objects to represent the hw heads */
4499 if (disp->disp->oclass >= GF110_DISP)
4500 crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
4504 for (i = 0; i < fls(crtcs); i++) {
4505 if (!(crtcs & (1 << i)))
4507 ret = nv50_head_create(dev, i);
4512 /* create encoder/connector objects based on VBIOS DCB table */
4513 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
4514 connector = nouveau_connector_create(dev, dcbe->connector);
4515 if (IS_ERR(connector))
4518 if (dcbe->location == DCB_LOC_ON_CHIP) {
4519 switch (dcbe->type) {
4520 case DCB_OUTPUT_TMDS:
4521 case DCB_OUTPUT_LVDS:
4523 ret = nv50_sor_create(connector, dcbe);
4525 case DCB_OUTPUT_ANALOG:
4526 ret = nv50_dac_create(connector, dcbe);
4533 ret = nv50_pior_create(connector, dcbe);
4537 NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
4538 dcbe->location, dcbe->type,
4539 ffs(dcbe->or) - 1, ret);
4544 /* cull any connectors we created that don't have an encoder */
4545 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
4546 if (connector->encoder_ids[0])
4549 NV_WARN(drm, "%s has no encoders, removing\n",
4551 connector->funcs->destroy(connector);
4556 nv50_display_destroy(dev);