2 * Copyright 2011 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/dma-mapping.h>
26 #include <linux/hdmi.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_dp_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include <drm/drm_plane_helper.h>
35 #include <drm/drm_edid.h>
37 #include <nvif/class.h>
38 #include <nvif/cl0002.h>
39 #include <nvif/cl5070.h>
40 #include <nvif/cl507a.h>
41 #include <nvif/cl507b.h>
42 #include <nvif/cl507c.h>
43 #include <nvif/cl507d.h>
44 #include <nvif/cl507e.h>
45 #include <nvif/event.h>
47 #include "nouveau_drv.h"
48 #include "nouveau_dma.h"
49 #include "nouveau_gem.h"
50 #include "nouveau_connector.h"
51 #include "nouveau_encoder.h"
52 #include "nouveau_crtc.h"
53 #include "nouveau_fence.h"
54 #include "nouveau_fbcon.h"
55 #include "nv50_display.h"
59 #define EVO_MASTER (0x00)
60 #define EVO_FLIP(c) (0x01 + (c))
61 #define EVO_OVLY(c) (0x05 + (c))
62 #define EVO_OIMM(c) (0x09 + (c))
63 #define EVO_CURS(c) (0x0d + (c))
65 /* offsets in shared sync bo of various structures */
66 #define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
67 #define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
68 #define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
69 #define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
70 #define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
71 #define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
73 /******************************************************************************
75 *****************************************************************************/
76 #define nv50_atom(p) container_of((p), struct nv50_atom, state)
79 struct drm_atomic_state state;
81 struct list_head outp;
86 struct nv50_outp_atom {
87 struct list_head head;
89 struct drm_encoder *encoder;
107 #define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
109 struct nv50_head_atom {
110 struct drm_crtc_state state;
119 struct nv50_head_mode {
220 static inline struct nv50_head_atom *
221 nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
223 struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
225 return (void *)statec;
226 return nv50_head_atom(statec);
229 #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
231 struct nv50_wndw_atom {
232 struct drm_plane_state state;
294 /******************************************************************************
296 *****************************************************************************/
299 struct nvif_object user;
300 struct nvif_device *device;
304 nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
305 const s32 *oclass, u8 head, void *data, u32 size,
306 struct nv50_chan *chan)
308 struct nvif_sclass *sclass;
311 chan->device = device;
313 ret = n = nvif_object_sclass_get(disp, &sclass);
318 for (i = 0; i < n; i++) {
319 if (sclass[i].oclass == oclass[0]) {
320 ret = nvif_object_init(disp, 0, oclass[0],
321 data, size, &chan->user);
323 nvif_object_map(&chan->user, NULL, 0);
324 nvif_object_sclass_put(&sclass);
331 nvif_object_sclass_put(&sclass);
336 nv50_chan_destroy(struct nv50_chan *chan)
338 nvif_object_fini(&chan->user);
341 /******************************************************************************
343 *****************************************************************************/
346 struct nv50_chan base;
350 nv50_pioc_destroy(struct nv50_pioc *pioc)
352 nv50_chan_destroy(&pioc->base);
356 nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
357 const s32 *oclass, u8 head, void *data, u32 size,
358 struct nv50_pioc *pioc)
360 return nv50_chan_create(device, disp, oclass, head, data, size,
364 /******************************************************************************
366 *****************************************************************************/
369 struct nv50_pioc base;
373 nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
374 int head, struct nv50_oimm *oimm)
376 struct nv50_disp_cursor_v0 args = {
379 static const s32 oclass[] = {
388 return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
392 /******************************************************************************
394 *****************************************************************************/
396 struct nv50_dmac_ctxdma {
397 struct list_head head;
398 struct nvif_object object;
402 struct nv50_chan base;
406 struct nvif_object sync;
407 struct nvif_object vram;
408 struct list_head ctxdma;
410 /* Protects against concurrent pushbuf access to this channel, lock is
411 * grabbed by evo_wait (if the pushbuf reservation is successful) and
412 * dropped again by evo_kick. */
417 nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
419 nvif_object_fini(&ctxdma->object);
420 list_del(&ctxdma->head);
424 static struct nv50_dmac_ctxdma *
425 nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
427 struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
428 struct nv50_dmac_ctxdma *ctxdma;
429 const u8 kind = fb->nvbo->kind;
430 const u32 handle = 0xfb000000 | kind;
432 struct nv_dma_v0 base;
434 struct nv50_dma_v0 nv50;
435 struct gf100_dma_v0 gf100;
436 struct gf119_dma_v0 gf119;
439 u32 argc = sizeof(args.base);
442 list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
443 if (ctxdma->object.handle == handle)
447 if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
448 return ERR_PTR(-ENOMEM);
449 list_add(&ctxdma->head, &dmac->ctxdma);
451 args.base.target = NV_DMA_V0_TARGET_VRAM;
452 args.base.access = NV_DMA_V0_ACCESS_RDWR;
454 args.base.limit = drm->client.device.info.ram_user - 1;
456 if (drm->client.device.info.chipset < 0x80) {
457 args.nv50.part = NV50_DMA_V0_PART_256;
458 argc += sizeof(args.nv50);
460 if (drm->client.device.info.chipset < 0xc0) {
461 args.nv50.part = NV50_DMA_V0_PART_256;
462 args.nv50.kind = kind;
463 argc += sizeof(args.nv50);
465 if (drm->client.device.info.chipset < 0xd0) {
466 args.gf100.kind = kind;
467 argc += sizeof(args.gf100);
469 args.gf119.page = GF119_DMA_V0_PAGE_LP;
470 args.gf119.kind = kind;
471 argc += sizeof(args.gf119);
474 ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
475 &args, argc, &ctxdma->object);
477 nv50_dmac_ctxdma_del(ctxdma);
485 nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
487 struct nvif_device *device = dmac->base.device;
488 struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
490 list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
491 nv50_dmac_ctxdma_del(ctxdma);
494 nvif_object_fini(&dmac->vram);
495 nvif_object_fini(&dmac->sync);
497 nv50_chan_destroy(&dmac->base);
500 struct device *dev = nvxx_device(device)->dev;
501 dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
506 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
507 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
508 struct nv50_dmac *dmac)
510 struct nv50_disp_core_channel_dma_v0 *args = data;
511 struct nvif_object pushbuf;
514 mutex_init(&dmac->lock);
515 INIT_LIST_HEAD(&dmac->ctxdma);
517 dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
518 &dmac->handle, GFP_KERNEL);
522 ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
523 &(struct nv_dma_v0) {
524 .target = NV_DMA_V0_TARGET_PCI_US,
525 .access = NV_DMA_V0_ACCESS_RD,
526 .start = dmac->handle + 0x0000,
527 .limit = dmac->handle + 0x0fff,
528 }, sizeof(struct nv_dma_v0), &pushbuf);
532 args->pushbuf = nvif_handle(&pushbuf);
534 ret = nv50_chan_create(device, disp, oclass, head, data, size,
536 nvif_object_fini(&pushbuf);
540 ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
541 &(struct nv_dma_v0) {
542 .target = NV_DMA_V0_TARGET_VRAM,
543 .access = NV_DMA_V0_ACCESS_RDWR,
544 .start = syncbuf + 0x0000,
545 .limit = syncbuf + 0x0fff,
546 }, sizeof(struct nv_dma_v0),
551 ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
552 &(struct nv_dma_v0) {
553 .target = NV_DMA_V0_TARGET_VRAM,
554 .access = NV_DMA_V0_ACCESS_RDWR,
556 .limit = device->info.ram_user - 1,
557 }, sizeof(struct nv_dma_v0),
565 /******************************************************************************
567 *****************************************************************************/
570 struct nv50_dmac base;
574 nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
575 u64 syncbuf, struct nv50_mast *core)
577 struct nv50_disp_core_channel_dma_v0 args = {
578 .pushbuf = 0xb0007d00,
580 static const s32 oclass[] = {
581 GP102_DISP_CORE_CHANNEL_DMA,
582 GP100_DISP_CORE_CHANNEL_DMA,
583 GM200_DISP_CORE_CHANNEL_DMA,
584 GM107_DISP_CORE_CHANNEL_DMA,
585 GK110_DISP_CORE_CHANNEL_DMA,
586 GK104_DISP_CORE_CHANNEL_DMA,
587 GF110_DISP_CORE_CHANNEL_DMA,
588 GT214_DISP_CORE_CHANNEL_DMA,
589 GT206_DISP_CORE_CHANNEL_DMA,
590 GT200_DISP_CORE_CHANNEL_DMA,
591 G82_DISP_CORE_CHANNEL_DMA,
592 NV50_DISP_CORE_CHANNEL_DMA,
596 return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
597 syncbuf, &core->base);
600 /******************************************************************************
602 *****************************************************************************/
605 struct nv50_dmac base;
611 nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
612 int head, u64 syncbuf, struct nv50_sync *base)
614 struct nv50_disp_base_channel_dma_v0 args = {
615 .pushbuf = 0xb0007c00 | head,
618 static const s32 oclass[] = {
619 GK110_DISP_BASE_CHANNEL_DMA,
620 GK104_DISP_BASE_CHANNEL_DMA,
621 GF110_DISP_BASE_CHANNEL_DMA,
622 GT214_DISP_BASE_CHANNEL_DMA,
623 GT200_DISP_BASE_CHANNEL_DMA,
624 G82_DISP_BASE_CHANNEL_DMA,
625 NV50_DISP_BASE_CHANNEL_DMA,
629 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
630 syncbuf, &base->base);
633 /******************************************************************************
635 *****************************************************************************/
638 struct nv50_dmac base;
642 nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
643 int head, u64 syncbuf, struct nv50_ovly *ovly)
645 struct nv50_disp_overlay_channel_dma_v0 args = {
646 .pushbuf = 0xb0007e00 | head,
649 static const s32 oclass[] = {
650 GK104_DISP_OVERLAY_CONTROL_DMA,
651 GF110_DISP_OVERLAY_CONTROL_DMA,
652 GT214_DISP_OVERLAY_CHANNEL_DMA,
653 GT200_DISP_OVERLAY_CHANNEL_DMA,
654 G82_DISP_OVERLAY_CHANNEL_DMA,
655 NV50_DISP_OVERLAY_CHANNEL_DMA,
659 return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
660 syncbuf, &ovly->base);
664 struct nouveau_crtc base;
666 struct nouveau_bo *nvbo[2];
669 struct nv50_ovly ovly;
670 struct nv50_oimm oimm;
673 #define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
674 #define nv50_ovly(c) (&nv50_head(c)->ovly)
675 #define nv50_oimm(c) (&nv50_head(c)->oimm)
676 #define nv50_chan(c) (&(c)->base.base)
677 #define nv50_vers(c) nv50_chan(c)->user.oclass
680 struct nvif_object *disp;
681 struct nv50_mast mast;
683 struct nouveau_bo *sync;
688 static struct nv50_disp *
689 nv50_disp(struct drm_device *dev)
691 return nouveau_display(dev)->priv;
694 #define nv50_mast(d) (&nv50_disp(d)->mast)
696 /******************************************************************************
697 * EVO channel helpers
698 *****************************************************************************/
700 evo_wait(void *evoc, int nr)
702 struct nv50_dmac *dmac = evoc;
703 struct nvif_device *device = dmac->base.device;
704 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
706 mutex_lock(&dmac->lock);
707 if (put + nr >= (PAGE_SIZE / 4) - 8) {
708 dmac->ptr[put] = 0x20000000;
710 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
711 if (nvif_msec(device, 2000,
712 if (!nvif_rd32(&dmac->base.user, 0x0004))
715 mutex_unlock(&dmac->lock);
716 pr_err("nouveau: evo channel stalled\n");
723 return dmac->ptr + put;
727 evo_kick(u32 *push, void *evoc)
729 struct nv50_dmac *dmac = evoc;
730 nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
731 mutex_unlock(&dmac->lock);
734 #define evo_mthd(p, m, s) do { \
735 const u32 _m = (m), _s = (s); \
736 if (drm_debug & DRM_UT_KMS) \
737 pr_err("%04x %d %s\n", _m, _s, __func__); \
738 *((p)++) = ((_s << 18) | _m); \
741 #define evo_data(p, d) do { \
742 const u32 _d = (d); \
743 if (drm_debug & DRM_UT_KMS) \
744 pr_err("\t%08x\n", _d); \
748 /******************************************************************************
750 *****************************************************************************/
751 #define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
754 const struct nv50_wndw_func *func;
755 struct nv50_dmac *dmac;
757 struct drm_plane plane;
759 struct nvif_notify notify;
765 struct nv50_wndw_func {
766 void *(*dtor)(struct nv50_wndw *);
767 int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
768 struct nv50_head_atom *asyh);
769 void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
770 struct nv50_head_atom *asyh);
771 void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
772 struct nv50_wndw_atom *asyw);
774 void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
775 void (*sema_clr)(struct nv50_wndw *);
776 void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
777 void (*ntfy_clr)(struct nv50_wndw *);
778 int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
779 void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
780 void (*image_clr)(struct nv50_wndw *);
781 void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
782 void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
784 u32 (*update)(struct nv50_wndw *, u32 interlock);
788 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
791 return wndw->func->ntfy_wait_begun(wndw, asyw);
796 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
797 struct nv50_wndw_atom *asyw)
799 if (asyw->clr.sema && (!asyw->set.sema || flush))
800 wndw->func->sema_clr(wndw);
801 if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
802 wndw->func->ntfy_clr(wndw);
803 if (asyw->clr.image && (!asyw->set.image || flush))
804 wndw->func->image_clr(wndw);
806 return flush ? wndw->func->update(wndw, interlock) : 0;
810 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
811 struct nv50_wndw_atom *asyw)
814 asyw->image.mode = 0;
815 asyw->image.interval = 1;
818 if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
819 if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
820 if (asyw->set.image) wndw->func->image_set(wndw, asyw);
821 if (asyw->set.lut ) wndw->func->lut (wndw, asyw);
822 if (asyw->set.point) wndw->func->point (wndw, asyw);
824 return wndw->func->update(wndw, interlock);
828 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
829 struct nv50_wndw_atom *asyw,
830 struct nv50_head_atom *asyh)
832 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
833 NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
834 wndw->func->release(wndw, asyw, asyh);
835 asyw->ntfy.handle = 0;
836 asyw->sema.handle = 0;
840 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
841 struct nv50_wndw_atom *asyw,
842 struct nv50_head_atom *asyh)
844 struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
845 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
848 NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
850 asyw->image.w = fb->base.width;
851 asyw->image.h = fb->base.height;
852 asyw->image.kind = fb->nvbo->kind;
854 if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
859 if (asyw->image.kind) {
860 asyw->image.layout = 0;
861 if (drm->client.device.info.chipset >= 0xc0)
862 asyw->image.block = fb->nvbo->mode >> 4;
864 asyw->image.block = fb->nvbo->mode;
865 asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
867 asyw->image.layout = 1;
868 asyw->image.block = 0;
869 asyw->image.pitch = fb->base.pitches[0];
872 ret = wndw->func->acquire(wndw, asyw, asyh);
876 if (asyw->set.image) {
877 if (!(asyw->image.mode = asyw->interval ? 0 : 1))
878 asyw->image.interval = asyw->interval;
880 asyw->image.interval = 0;
887 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
889 struct nouveau_drm *drm = nouveau_drm(plane->dev);
890 struct nv50_wndw *wndw = nv50_wndw(plane);
891 struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
892 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
893 struct nv50_head_atom *harm = NULL, *asyh = NULL;
894 bool varm = false, asyv = false, asym = false;
897 NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
898 if (asyw->state.crtc) {
899 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
901 return PTR_ERR(asyh);
902 asym = drm_atomic_crtc_needs_modeset(&asyh->state);
903 asyv = asyh->state.active;
906 if (armw->state.crtc) {
907 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
909 return PTR_ERR(harm);
910 varm = harm->state.crtc->state->active;
914 asyw->point.x = asyw->state.crtc_x;
915 asyw->point.y = asyw->state.crtc_y;
916 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
917 asyw->set.point = true;
919 ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
924 nv50_wndw_atomic_check_release(wndw, asyw, harm);
930 asyw->clr.ntfy = armw->ntfy.handle != 0;
931 asyw->clr.sema = armw->sema.handle != 0;
932 if (wndw->func->image_clr)
933 asyw->clr.image = armw->image.handle != 0;
934 asyw->set.lut = wndw->func->lut && asyv;
941 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
943 struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
944 struct nouveau_drm *drm = nouveau_drm(plane->dev);
946 NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
950 nouveau_bo_unpin(fb->nvbo);
954 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
956 struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
957 struct nouveau_drm *drm = nouveau_drm(plane->dev);
958 struct nv50_wndw *wndw = nv50_wndw(plane);
959 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
960 struct nv50_head_atom *asyh;
961 struct nv50_dmac_ctxdma *ctxdma;
964 NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
968 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
972 ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
973 if (IS_ERR(ctxdma)) {
974 nouveau_bo_unpin(fb->nvbo);
975 return PTR_ERR(ctxdma);
978 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
979 asyw->image.handle = ctxdma->object.handle;
980 asyw->image.offset = fb->nvbo->bo.offset;
982 if (wndw->func->prepare) {
983 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
985 return PTR_ERR(asyh);
987 wndw->func->prepare(wndw, asyh, asyw);
993 static const struct drm_plane_helper_funcs
995 .prepare_fb = nv50_wndw_prepare_fb,
996 .cleanup_fb = nv50_wndw_cleanup_fb,
997 .atomic_check = nv50_wndw_atomic_check,
1001 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
1002 struct drm_plane_state *state)
1004 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
1005 __drm_atomic_helper_plane_destroy_state(&asyw->state);
1009 static struct drm_plane_state *
1010 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
1012 struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
1013 struct nv50_wndw_atom *asyw;
1014 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
1016 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
1018 asyw->sema = armw->sema;
1019 asyw->ntfy = armw->ntfy;
1020 asyw->image = armw->image;
1021 asyw->point = armw->point;
1022 asyw->lut = armw->lut;
1025 return &asyw->state;
1029 nv50_wndw_reset(struct drm_plane *plane)
1031 struct nv50_wndw_atom *asyw;
1033 if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
1037 plane->funcs->atomic_destroy_state(plane, plane->state);
1038 plane->state = &asyw->state;
1039 plane->state->plane = plane;
1040 plane->state->rotation = DRM_MODE_ROTATE_0;
1044 nv50_wndw_destroy(struct drm_plane *plane)
1046 struct nv50_wndw *wndw = nv50_wndw(plane);
1048 nvif_notify_fini(&wndw->notify);
1049 data = wndw->func->dtor(wndw);
1050 drm_plane_cleanup(&wndw->plane);
1054 static const struct drm_plane_funcs
1056 .update_plane = drm_atomic_helper_update_plane,
1057 .disable_plane = drm_atomic_helper_disable_plane,
1058 .destroy = nv50_wndw_destroy,
1059 .reset = nv50_wndw_reset,
1060 .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
1061 .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
1065 nv50_wndw_fini(struct nv50_wndw *wndw)
1067 nvif_notify_put(&wndw->notify);
1071 nv50_wndw_init(struct nv50_wndw *wndw)
1073 nvif_notify_get(&wndw->notify);
1077 nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
1078 enum drm_plane_type type, const char *name, int index,
1079 struct nv50_dmac *dmac, const u32 *format, int nformat,
1080 struct nv50_wndw *wndw)
1087 ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
1088 format, nformat, NULL,
1089 type, "%s-%d", name, index);
1093 drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
1097 /******************************************************************************
1099 *****************************************************************************/
1100 #define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
1103 struct nv50_wndw wndw;
1104 struct nvif_object chan;
1108 nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
1110 struct nv50_curs *curs = nv50_curs(wndw);
1111 nvif_wr32(&curs->chan, 0x0080, 0x00000000);
1116 nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1118 struct nv50_curs *curs = nv50_curs(wndw);
1119 nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
1123 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
1124 struct nv50_wndw_atom *asyw)
1126 u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
1127 u32 offset = asyw->image.offset;
1128 if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
1129 asyh->curs.handle = handle;
1130 asyh->curs.offset = offset;
1131 asyh->set.curs = asyh->curs.visible;
1136 nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1137 struct nv50_head_atom *asyh)
1139 asyh->curs.visible = false;
1143 nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1144 struct nv50_head_atom *asyh)
1146 struct drm_rect clip = {};
1149 if (asyh->state.enable)
1150 drm_mode_get_hv_timing(&asyh->state.mode,
1151 &clip.x2, &clip.y2);
1153 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1155 DRM_PLANE_HELPER_NO_SCALING,
1156 DRM_PLANE_HELPER_NO_SCALING,
1158 asyh->curs.visible = asyw->state.visible;
1159 if (ret || !asyh->curs.visible)
1162 switch (asyw->state.fb->width) {
1163 case 32: asyh->curs.layout = 0; break;
1164 case 64: asyh->curs.layout = 1; break;
1169 if (asyw->state.fb->width != asyw->state.fb->height)
1172 switch (asyw->state.fb->format->format) {
1173 case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
1183 nv50_curs_dtor(struct nv50_wndw *wndw)
1185 struct nv50_curs *curs = nv50_curs(wndw);
1186 nvif_object_fini(&curs->chan);
1191 nv50_curs_format[] = {
1192 DRM_FORMAT_ARGB8888,
1195 static const struct nv50_wndw_func
1197 .dtor = nv50_curs_dtor,
1198 .acquire = nv50_curs_acquire,
1199 .release = nv50_curs_release,
1200 .prepare = nv50_curs_prepare,
1201 .point = nv50_curs_point,
1202 .update = nv50_curs_update,
1206 nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
1207 struct nv50_curs **pcurs)
1209 static const struct nvif_mclass curses[] = {
1210 { GK104_DISP_CURSOR, 0 },
1211 { GF110_DISP_CURSOR, 0 },
1212 { GT214_DISP_CURSOR, 0 },
1213 { G82_DISP_CURSOR, 0 },
1214 { NV50_DISP_CURSOR, 0 },
1217 struct nv50_disp_cursor_v0 args = {
1218 .head = head->base.index,
1220 struct nv50_disp *disp = nv50_disp(drm->dev);
1221 struct nv50_curs *curs;
1224 cid = nvif_mclass(disp->disp, curses);
1226 NV_ERROR(drm, "No supported cursor immediate class\n");
1230 if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
1233 ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
1234 "curs", head->base.index, &disp->mast.base,
1235 nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
1242 ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
1243 sizeof(args), &curs->chan);
1245 NV_ERROR(drm, "curs%04x allocation failed: %d\n",
1246 curses[cid].oclass, ret);
1253 /******************************************************************************
1255 *****************************************************************************/
1256 #define nv50_base(p) container_of((p), struct nv50_base, wndw)
1259 struct nv50_wndw wndw;
1260 struct nv50_sync chan;
1265 nv50_base_notify(struct nvif_notify *notify)
1267 return NVIF_NOTIFY_KEEP;
1271 nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1273 struct nv50_base *base = nv50_base(wndw);
1275 if ((push = evo_wait(&base->chan, 2))) {
1276 evo_mthd(push, 0x00e0, 1);
1277 evo_data(push, asyw->lut.enable << 30);
1278 evo_kick(push, &base->chan);
1283 nv50_base_image_clr(struct nv50_wndw *wndw)
1285 struct nv50_base *base = nv50_base(wndw);
1287 if ((push = evo_wait(&base->chan, 4))) {
1288 evo_mthd(push, 0x0084, 1);
1289 evo_data(push, 0x00000000);
1290 evo_mthd(push, 0x00c0, 1);
1291 evo_data(push, 0x00000000);
1292 evo_kick(push, &base->chan);
1297 nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1299 struct nv50_base *base = nv50_base(wndw);
1300 const s32 oclass = base->chan.base.base.user.oclass;
1302 if ((push = evo_wait(&base->chan, 10))) {
1303 evo_mthd(push, 0x0084, 1);
1304 evo_data(push, (asyw->image.mode << 8) |
1305 (asyw->image.interval << 4));
1306 evo_mthd(push, 0x00c0, 1);
1307 evo_data(push, asyw->image.handle);
1308 if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
1309 evo_mthd(push, 0x0800, 5);
1310 evo_data(push, asyw->image.offset >> 8);
1311 evo_data(push, 0x00000000);
1312 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1313 evo_data(push, (asyw->image.layout << 20) |
1316 evo_data(push, (asyw->image.kind << 16) |
1317 (asyw->image.format << 8));
1319 if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1320 evo_mthd(push, 0x0800, 5);
1321 evo_data(push, asyw->image.offset >> 8);
1322 evo_data(push, 0x00000000);
1323 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1324 evo_data(push, (asyw->image.layout << 20) |
1327 evo_data(push, asyw->image.format << 8);
1329 evo_mthd(push, 0x0400, 5);
1330 evo_data(push, asyw->image.offset >> 8);
1331 evo_data(push, 0x00000000);
1332 evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1333 evo_data(push, (asyw->image.layout << 24) |
1336 evo_data(push, asyw->image.format << 8);
1338 evo_kick(push, &base->chan);
1343 nv50_base_ntfy_clr(struct nv50_wndw *wndw)
1345 struct nv50_base *base = nv50_base(wndw);
1347 if ((push = evo_wait(&base->chan, 2))) {
1348 evo_mthd(push, 0x00a4, 1);
1349 evo_data(push, 0x00000000);
1350 evo_kick(push, &base->chan);
1355 nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1357 struct nv50_base *base = nv50_base(wndw);
1359 if ((push = evo_wait(&base->chan, 3))) {
1360 evo_mthd(push, 0x00a0, 2);
1361 evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
1362 evo_data(push, asyw->ntfy.handle);
1363 evo_kick(push, &base->chan);
1368 nv50_base_sema_clr(struct nv50_wndw *wndw)
1370 struct nv50_base *base = nv50_base(wndw);
1372 if ((push = evo_wait(&base->chan, 2))) {
1373 evo_mthd(push, 0x0094, 1);
1374 evo_data(push, 0x00000000);
1375 evo_kick(push, &base->chan);
1380 nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1382 struct nv50_base *base = nv50_base(wndw);
1384 if ((push = evo_wait(&base->chan, 5))) {
1385 evo_mthd(push, 0x0088, 4);
1386 evo_data(push, asyw->sema.offset);
1387 evo_data(push, asyw->sema.acquire);
1388 evo_data(push, asyw->sema.release);
1389 evo_data(push, asyw->sema.handle);
1390 evo_kick(push, &base->chan);
1395 nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
1397 struct nv50_base *base = nv50_base(wndw);
1400 if (!(push = evo_wait(&base->chan, 2)))
1402 evo_mthd(push, 0x0080, 1);
1403 evo_data(push, interlock);
1404 evo_kick(push, &base->chan);
1406 if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
1407 return interlock ? 2 << (base->id * 8) : 0;
1408 return interlock ? 2 << (base->id * 4) : 0;
1412 nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1414 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
1415 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1416 if (nvif_msec(&drm->client.device, 2000ULL,
1417 u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
1418 if ((data & 0xc0000000) == 0x40000000)
1427 nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1428 struct nv50_head_atom *asyh)
1434 nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1435 struct nv50_head_atom *asyh)
1437 const struct drm_framebuffer *fb = asyw->state.fb;
1438 struct drm_rect clip = {};
1441 if (!fb->format->depth)
1444 if (asyh->state.enable)
1445 drm_mode_get_hv_timing(&asyh->state.mode,
1446 &clip.x2, &clip.y2);
1448 ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1450 DRM_PLANE_HELPER_NO_SCALING,
1451 DRM_PLANE_HELPER_NO_SCALING,
1456 asyh->base.depth = fb->format->depth;
1457 asyh->base.cpp = fb->format->cpp[0];
1458 asyh->base.x = asyw->state.src.x1 >> 16;
1459 asyh->base.y = asyw->state.src.y1 >> 16;
1460 asyh->base.w = asyw->state.fb->width;
1461 asyh->base.h = asyw->state.fb->height;
1463 switch (fb->format->format) {
1464 case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
1465 case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
1466 case DRM_FORMAT_XRGB1555 :
1467 case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
1468 case DRM_FORMAT_XRGB8888 :
1469 case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
1470 case DRM_FORMAT_XBGR2101010:
1471 case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
1472 case DRM_FORMAT_XBGR8888 :
1473 case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
1479 asyw->lut.enable = 1;
1480 asyw->set.image = true;
1485 nv50_base_dtor(struct nv50_wndw *wndw)
1487 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1488 struct nv50_base *base = nv50_base(wndw);
1489 nv50_dmac_destroy(&base->chan.base, disp->disp);
1494 nv50_base_format[] = {
1497 DRM_FORMAT_XRGB1555,
1498 DRM_FORMAT_ARGB1555,
1499 DRM_FORMAT_XRGB8888,
1500 DRM_FORMAT_ARGB8888,
1501 DRM_FORMAT_XBGR2101010,
1502 DRM_FORMAT_ABGR2101010,
1503 DRM_FORMAT_XBGR8888,
1504 DRM_FORMAT_ABGR8888,
1507 static const struct nv50_wndw_func
1509 .dtor = nv50_base_dtor,
1510 .acquire = nv50_base_acquire,
1511 .release = nv50_base_release,
1512 .sema_set = nv50_base_sema_set,
1513 .sema_clr = nv50_base_sema_clr,
1514 .ntfy_set = nv50_base_ntfy_set,
1515 .ntfy_clr = nv50_base_ntfy_clr,
1516 .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
1517 .image_set = nv50_base_image_set,
1518 .image_clr = nv50_base_image_clr,
1519 .lut = nv50_base_lut,
1520 .update = nv50_base_update,
1524 nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
1525 struct nv50_base **pbase)
1527 struct nv50_disp *disp = nv50_disp(drm->dev);
1528 struct nv50_base *base;
1531 if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
1533 base->id = head->base.index;
1534 base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
1535 base->wndw.sema = EVO_FLIP_SEM0(base->id);
1536 base->wndw.data = 0x00000000;
1538 ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
1539 "base", base->id, &base->chan.base,
1540 nv50_base_format, ARRAY_SIZE(nv50_base_format),
1547 ret = nv50_base_create(&drm->client.device, disp->disp, base->id,
1548 disp->sync->bo.offset, &base->chan);
1552 return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
1554 NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
1555 &(struct nvif_notify_uevent_req) {},
1556 sizeof(struct nvif_notify_uevent_req),
1557 sizeof(struct nvif_notify_uevent_rep),
1558 &base->wndw.notify);
1561 /******************************************************************************
1563 *****************************************************************************/
1565 nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
1567 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1569 if ((push = evo_wait(core, 2))) {
1570 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1571 evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
1573 evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
1574 evo_data(push, (asyh->procamp.sat.sin << 20) |
1575 (asyh->procamp.sat.cos << 8));
1576 evo_kick(push, core);
1581 nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
1583 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1585 if ((push = evo_wait(core, 2))) {
1586 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1587 evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
1589 if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
1590 evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
1592 evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
1593 evo_data(push, (asyh->dither.mode << 3) |
1594 (asyh->dither.bits << 1) |
1595 asyh->dither.enable);
1596 evo_kick(push, core);
1601 nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
1603 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1607 if (asyh->base.cpp) {
1608 switch (asyh->base.cpp) {
1609 case 8: bounds |= 0x00000500; break;
1610 case 4: bounds |= 0x00000300; break;
1611 case 2: bounds |= 0x00000100; break;
1616 bounds |= 0x00000001;
1619 if ((push = evo_wait(core, 2))) {
1620 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1621 evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
1623 evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
1624 evo_data(push, bounds);
1625 evo_kick(push, core);
1630 nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
1632 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1636 if (asyh->base.cpp) {
1637 switch (asyh->base.cpp) {
1638 case 8: bounds |= 0x00000500; break;
1639 case 4: bounds |= 0x00000300; break;
1640 case 2: bounds |= 0x00000100; break;
1641 case 1: bounds |= 0x00000000; break;
1646 bounds |= 0x00000001;
1649 if ((push = evo_wait(core, 2))) {
1650 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1651 evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
1653 evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
1654 evo_data(push, bounds);
1655 evo_kick(push, core);
1660 nv50_head_curs_clr(struct nv50_head *head)
1662 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1664 if ((push = evo_wait(core, 4))) {
1665 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1666 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1667 evo_data(push, 0x05000000);
1669 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1670 evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1671 evo_data(push, 0x05000000);
1672 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1673 evo_data(push, 0x00000000);
1675 evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
1676 evo_data(push, 0x05000000);
1677 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1678 evo_data(push, 0x00000000);
1680 evo_kick(push, core);
1685 nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1687 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1689 if ((push = evo_wait(core, 5))) {
1690 if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
1691 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1692 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1693 (asyh->curs.format << 24));
1694 evo_data(push, asyh->curs.offset >> 8);
1696 if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1697 evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1698 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1699 (asyh->curs.format << 24));
1700 evo_data(push, asyh->curs.offset >> 8);
1701 evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1702 evo_data(push, asyh->curs.handle);
1704 evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
1705 evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1706 (asyh->curs.format << 24));
1707 evo_data(push, asyh->curs.offset >> 8);
1708 evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1709 evo_data(push, asyh->curs.handle);
1711 evo_kick(push, core);
1716 nv50_head_core_clr(struct nv50_head *head)
1718 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1720 if ((push = evo_wait(core, 2))) {
1721 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1722 evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
1724 evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
1725 evo_data(push, 0x00000000);
1726 evo_kick(push, core);
1731 nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1733 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1735 if ((push = evo_wait(core, 9))) {
1736 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1737 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1738 evo_data(push, asyh->core.offset >> 8);
1739 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1740 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1741 evo_data(push, asyh->core.layout << 20 |
1742 (asyh->core.pitch >> 8) << 8 |
1744 evo_data(push, asyh->core.kind << 16 |
1745 asyh->core.format << 8);
1746 evo_data(push, asyh->core.handle);
1747 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1748 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1749 /* EVO will complain with INVALID_STATE if we have an
1750 * active cursor and (re)specify HeadSetContextDmaIso
1751 * without also updating HeadSetOffsetCursor.
1753 asyh->set.curs = asyh->curs.visible;
1755 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1756 evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1757 evo_data(push, asyh->core.offset >> 8);
1758 evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1759 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1760 evo_data(push, asyh->core.layout << 20 |
1761 (asyh->core.pitch >> 8) << 8 |
1763 evo_data(push, asyh->core.format << 8);
1764 evo_data(push, asyh->core.handle);
1765 evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1766 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1768 evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
1769 evo_data(push, asyh->core.offset >> 8);
1770 evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
1771 evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1772 evo_data(push, asyh->core.layout << 24 |
1773 (asyh->core.pitch >> 8) << 8 |
1775 evo_data(push, asyh->core.format << 8);
1776 evo_data(push, asyh->core.handle);
1777 evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
1778 evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1780 evo_kick(push, core);
1785 nv50_head_lut_clr(struct nv50_head *head)
1787 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1789 if ((push = evo_wait(core, 4))) {
1790 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1791 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1792 evo_data(push, 0x40000000);
1794 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1795 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1796 evo_data(push, 0x40000000);
1797 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1798 evo_data(push, 0x00000000);
1800 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
1801 evo_data(push, 0x03000000);
1802 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1803 evo_data(push, 0x00000000);
1805 evo_kick(push, core);
1810 nv50_head_lut_load(struct drm_property_blob *blob, int mode,
1811 struct nouveau_bo *nvbo)
1813 struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
1814 void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
1815 const int size = blob->length / sizeof(*in);
1819 /* This can't happen.. But it shuts the compiler up. */
1820 if (WARN_ON(size != 256))
1824 case 0: /* LORES. */
1825 case 1: /* HIRES. */
1830 case 7: /* INTERPOLATE_257_UNITY_RANGE. */
1840 for (i = 0; i < size; i++) {
1841 r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
1842 g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
1843 b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
1844 writew(r, lut + (i * 0x08) + 0);
1845 writew(g, lut + (i * 0x08) + 2);
1846 writew(b, lut + (i * 0x08) + 4);
1849 /* INTERPOLATE modes require a "next" entry to interpolate with,
1850 * so we replicate the last entry to deal with this for now.
1852 writew(r, lut + (i * 0x08) + 0);
1853 writew(g, lut + (i * 0x08) + 2);
1854 writew(b, lut + (i * 0x08) + 4);
1858 nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1860 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1862 if ((push = evo_wait(core, 7))) {
1863 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1864 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1865 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1866 evo_data(push, asyh->lut.offset >> 8);
1868 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1869 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1870 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1871 evo_data(push, asyh->lut.offset >> 8);
1872 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1873 evo_data(push, asyh->lut.handle);
1875 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
1876 evo_data(push, 0x80000000 | asyh->lut.mode << 24);
1877 evo_data(push, asyh->lut.offset >> 8);
1878 evo_data(push, 0x00000000);
1879 evo_data(push, 0x00000000);
1880 evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1881 evo_data(push, asyh->lut.handle);
1883 evo_kick(push, core);
1888 nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
1890 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1891 struct nv50_head_mode *m = &asyh->mode;
1893 if ((push = evo_wait(core, 14))) {
1894 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1895 evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
1896 evo_data(push, 0x00800000 | m->clock);
1897 evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
1898 evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
1899 evo_data(push, 0x00000000);
1900 evo_data(push, (m->v.active << 16) | m->h.active );
1901 evo_data(push, (m->v.synce << 16) | m->h.synce );
1902 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
1903 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
1904 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1905 evo_data(push, asyh->mode.v.blankus);
1906 evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
1907 evo_data(push, 0x00000000);
1909 evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
1910 evo_data(push, 0x00000000);
1911 evo_data(push, (m->v.active << 16) | m->h.active );
1912 evo_data(push, (m->v.synce << 16) | m->h.synce );
1913 evo_data(push, (m->v.blanke << 16) | m->h.blanke );
1914 evo_data(push, (m->v.blanks << 16) | m->h.blanks );
1915 evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1916 evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
1917 evo_data(push, 0x00000000); /* ??? */
1918 evo_data(push, 0xffffff00);
1919 evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
1920 evo_data(push, m->clock * 1000);
1921 evo_data(push, 0x00200000); /* ??? */
1922 evo_data(push, m->clock * 1000);
1924 evo_kick(push, core);
1929 nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
1931 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1933 if ((push = evo_wait(core, 10))) {
1934 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1935 evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
1936 evo_data(push, 0x00000000);
1937 evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
1938 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1939 evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
1940 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1941 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1943 evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
1944 evo_data(push, 0x00000000);
1945 evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
1946 evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1947 evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
1948 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1949 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1950 evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1952 evo_kick(push, core);
1957 nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
1959 if (asyh->clr.ilut && (!asyh->set.ilut || y))
1960 nv50_head_lut_clr(head);
1961 if (asyh->clr.core && (!asyh->set.core || y))
1962 nv50_head_core_clr(head);
1963 if (asyh->clr.curs && (!asyh->set.curs || y))
1964 nv50_head_curs_clr(head);
1968 nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1970 if (asyh->set.view ) nv50_head_view (head, asyh);
1971 if (asyh->set.mode ) nv50_head_mode (head, asyh);
1972 if (asyh->set.ilut ) {
1973 struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
1974 struct drm_property_blob *blob = asyh->state.gamma_lut;
1976 nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
1977 asyh->lut.offset = nvbo->bo.offset;
1978 head->lut.next ^= 1;
1979 nv50_head_lut_set(head, asyh);
1981 if (asyh->set.core ) nv50_head_core_set(head, asyh);
1982 if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
1983 if (asyh->set.base ) nv50_head_base (head, asyh);
1984 if (asyh->set.ovly ) nv50_head_ovly (head, asyh);
1985 if (asyh->set.dither ) nv50_head_dither (head, asyh);
1986 if (asyh->set.procamp) nv50_head_procamp (head, asyh);
1990 nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
1991 struct nv50_head_atom *asyh,
1992 struct nouveau_conn_atom *asyc)
1994 const int vib = asyc->procamp.color_vibrance - 100;
1995 const int hue = asyc->procamp.vibrant_hue - 90;
1996 const int adj = (vib > 0) ? 50 : 0;
1997 asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
1998 asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
1999 asyh->set.procamp = true;
2003 nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
2004 struct nv50_head_atom *asyh,
2005 struct nouveau_conn_atom *asyc)
2007 struct drm_connector *connector = asyc->state.connector;
2010 if (asyc->dither.mode == DITHERING_MODE_AUTO) {
2011 if (asyh->base.depth > connector->display_info.bpc * 3)
2012 mode = DITHERING_MODE_DYNAMIC2X2;
2014 mode = asyc->dither.mode;
2017 if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
2018 if (connector->display_info.bpc >= 8)
2019 mode |= DITHERING_DEPTH_8BPC;
2021 mode |= asyc->dither.depth;
2024 asyh->dither.enable = mode;
2025 asyh->dither.bits = mode >> 1;
2026 asyh->dither.mode = mode >> 3;
2027 asyh->set.dither = true;
2031 nv50_head_atomic_check_view(struct nv50_head_atom *armh,
2032 struct nv50_head_atom *asyh,
2033 struct nouveau_conn_atom *asyc)
2035 struct drm_connector *connector = asyc->state.connector;
2036 struct drm_display_mode *omode = &asyh->state.adjusted_mode;
2037 struct drm_display_mode *umode = &asyh->state.mode;
2038 int mode = asyc->scaler.mode;
2040 int umode_vdisplay, omode_hdisplay, omode_vdisplay;
2042 if (connector->edid_blob_ptr)
2043 edid = (struct edid *)connector->edid_blob_ptr->data;
2047 if (!asyc->scaler.full) {
2048 if (mode == DRM_MODE_SCALE_NONE)
2051 /* Non-EDID LVDS/eDP mode. */
2052 mode = DRM_MODE_SCALE_FULLSCREEN;
2055 /* For the user-specified mode, we must ignore doublescan and
2056 * the like, but honor frame packing.
2058 umode_vdisplay = umode->vdisplay;
2059 if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
2060 umode_vdisplay += umode->vtotal;
2061 asyh->view.iW = umode->hdisplay;
2062 asyh->view.iH = umode_vdisplay;
2063 /* For the output mode, we can just use the stock helper. */
2064 drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
2065 asyh->view.oW = omode_hdisplay;
2066 asyh->view.oH = omode_vdisplay;
2068 /* Add overscan compensation if necessary, will keep the aspect
2069 * ratio the same as the backend mode unless overridden by the
2070 * user setting both hborder and vborder properties.
2072 if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
2073 (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
2074 drm_detect_hdmi_monitor(edid)))) {
2075 u32 bX = asyc->scaler.underscan.hborder;
2076 u32 bY = asyc->scaler.underscan.vborder;
2077 u32 r = (asyh->view.oH << 19) / asyh->view.oW;
2080 asyh->view.oW -= (bX * 2);
2081 if (bY) asyh->view.oH -= (bY * 2);
2082 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2084 asyh->view.oW -= (asyh->view.oW >> 4) + 32;
2085 if (bY) asyh->view.oH -= (bY * 2);
2086 else asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2090 /* Handle CENTER/ASPECT scaling, taking into account the areas
2091 * removed already for overscan compensation.
2094 case DRM_MODE_SCALE_CENTER:
2095 asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
2096 asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
2098 case DRM_MODE_SCALE_ASPECT:
2099 if (asyh->view.oH < asyh->view.oW) {
2100 u32 r = (asyh->view.iW << 19) / asyh->view.iH;
2101 asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
2103 u32 r = (asyh->view.iH << 19) / asyh->view.iW;
2104 asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2111 asyh->set.view = true;
2115 nv50_head_atomic_check_lut(struct nv50_head *head,
2116 struct nv50_head_atom *armh,
2117 struct nv50_head_atom *asyh)
2119 struct nv50_disp *disp = nv50_disp(head->base.base.dev);
2121 /* An I8 surface without an input LUT makes no sense, and
2122 * EVO will throw an error if you try.
2124 * Legacy clients actually cause this due to the order in
2125 * which they call ioctls, so we will enable the LUT with
2126 * whatever contents the buffer already contains to avoid
2127 * triggering the error check.
2129 if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
2130 asyh->lut.handle = 0;
2131 asyh->clr.ilut = armh->lut.visible;
2135 if (disp->disp->oclass < GF110_DISP) {
2136 asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
2137 asyh->set.ilut = true;
2140 asyh->set.ilut = asyh->state.color_mgmt_changed;
2142 asyh->lut.handle = disp->mast.base.vram.handle;
2146 nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2148 struct drm_display_mode *mode = &asyh->state.adjusted_mode;
2149 struct nv50_head_mode *m = &asyh->mode;
2152 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
2155 * DRM modes are defined in terms of a repeating interval
2156 * starting with the active display area. The hardware modes
2157 * are defined in terms of a repeating interval starting one
2158 * unit (pixel or line) into the sync pulse. So, add bias.
2161 m->h.active = mode->crtc_htotal;
2162 m->h.synce = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
2163 m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
2164 m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
2166 m->v.active = mode->crtc_vtotal;
2167 m->v.synce = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
2168 m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
2169 m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
2171 /*XXX: Safe underestimate, even "0" works */
2172 blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
2174 blankus /= mode->crtc_clock;
2175 m->v.blankus = blankus;
2177 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2178 m->v.blank2e = m->v.active + m->v.blanke;
2179 m->v.blank2s = m->v.blank2e + mode->crtc_vdisplay;
2180 m->v.active = (m->v.active * 2) + 1;
2181 m->interlace = true;
2185 m->interlace = false;
2187 m->clock = mode->crtc_clock;
2189 asyh->set.mode = true;
2193 nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2195 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
2196 struct nv50_disp *disp = nv50_disp(crtc->dev);
2197 struct nv50_head *head = nv50_head(crtc);
2198 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2199 struct nv50_head_atom *asyh = nv50_head_atom(state);
2200 struct nouveau_conn_atom *asyc = NULL;
2201 struct drm_connector_state *conns;
2202 struct drm_connector *conn;
2205 NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
2206 if (asyh->state.active) {
2207 for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
2208 if (conns->crtc == crtc) {
2209 asyc = nouveau_conn_atom(conns);
2214 if (armh->state.active) {
2216 if (asyh->state.mode_changed)
2217 asyc->set.scaler = true;
2218 if (armh->base.depth != asyh->base.depth)
2219 asyc->set.dither = true;
2223 asyc->set.mask = ~0;
2224 asyh->set.mask = ~0;
2227 if (asyh->state.mode_changed)
2228 nv50_head_atomic_check_mode(head, asyh);
2230 if (asyh->state.color_mgmt_changed ||
2231 asyh->base.cpp != armh->base.cpp)
2232 nv50_head_atomic_check_lut(head, armh, asyh);
2233 asyh->lut.visible = asyh->lut.handle != 0;
2236 if (asyc->set.scaler)
2237 nv50_head_atomic_check_view(armh, asyh, asyc);
2238 if (asyc->set.dither)
2239 nv50_head_atomic_check_dither(armh, asyh, asyc);
2240 if (asyc->set.procamp)
2241 nv50_head_atomic_check_procamp(armh, asyh, asyc);
2244 if ((asyh->core.visible = (asyh->base.cpp != 0))) {
2245 asyh->core.x = asyh->base.x;
2246 asyh->core.y = asyh->base.y;
2247 asyh->core.w = asyh->base.w;
2248 asyh->core.h = asyh->base.h;
2250 if ((asyh->core.visible = asyh->curs.visible) ||
2251 (asyh->core.visible = asyh->lut.visible)) {
2252 /*XXX: We need to either find some way of having the
2253 * primary base layer appear black, while still
2254 * being able to display the other layers, or we
2255 * need to allocate a dummy black surface here.
2259 asyh->core.w = asyh->state.mode.hdisplay;
2260 asyh->core.h = asyh->state.mode.vdisplay;
2262 asyh->core.handle = disp->mast.base.vram.handle;
2263 asyh->core.offset = 0;
2264 asyh->core.format = 0xcf;
2265 asyh->core.kind = 0;
2266 asyh->core.layout = 1;
2267 asyh->core.block = 0;
2268 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
2269 asyh->set.base = armh->base.cpp != asyh->base.cpp;
2270 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
2272 asyh->lut.visible = false;
2273 asyh->core.visible = false;
2274 asyh->curs.visible = false;
2279 if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
2280 if (asyh->core.visible) {
2281 if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
2282 asyh->set.core = true;
2284 if (armh->core.visible) {
2285 asyh->clr.core = true;
2288 if (asyh->curs.visible) {
2289 if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
2290 asyh->set.curs = true;
2292 if (armh->curs.visible) {
2293 asyh->clr.curs = true;
2296 asyh->clr.ilut = armh->lut.visible;
2297 asyh->clr.core = armh->core.visible;
2298 asyh->clr.curs = armh->curs.visible;
2299 asyh->set.ilut = asyh->lut.visible;
2300 asyh->set.core = asyh->core.visible;
2301 asyh->set.curs = asyh->curs.visible;
2304 if (asyh->clr.mask || asyh->set.mask)
2305 nv50_atom(asyh->state.state)->lock_core = true;
2309 static const struct drm_crtc_helper_funcs
2311 .atomic_check = nv50_head_atomic_check,
2315 nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
2316 struct drm_crtc_state *state)
2318 struct nv50_head_atom *asyh = nv50_head_atom(state);
2319 __drm_atomic_helper_crtc_destroy_state(&asyh->state);
2323 static struct drm_crtc_state *
2324 nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
2326 struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2327 struct nv50_head_atom *asyh;
2328 if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
2330 __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
2331 asyh->view = armh->view;
2332 asyh->mode = armh->mode;
2333 asyh->lut = armh->lut;
2334 asyh->core = armh->core;
2335 asyh->curs = armh->curs;
2336 asyh->base = armh->base;
2337 asyh->ovly = armh->ovly;
2338 asyh->dither = armh->dither;
2339 asyh->procamp = armh->procamp;
2342 return &asyh->state;
2346 __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
2347 struct drm_crtc_state *state)
2350 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
2351 crtc->state = state;
2352 crtc->state->crtc = crtc;
2356 nv50_head_reset(struct drm_crtc *crtc)
2358 struct nv50_head_atom *asyh;
2360 if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
2363 __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
2367 nv50_head_destroy(struct drm_crtc *crtc)
2369 struct nv50_disp *disp = nv50_disp(crtc->dev);
2370 struct nv50_head *head = nv50_head(crtc);
2373 nv50_dmac_destroy(&head->ovly.base, disp->disp);
2374 nv50_pioc_destroy(&head->oimm.base);
2376 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
2377 nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
2379 drm_crtc_cleanup(crtc);
2383 static const struct drm_crtc_funcs
2385 .reset = nv50_head_reset,
2386 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2387 .destroy = nv50_head_destroy,
2388 .set_config = drm_atomic_helper_set_config,
2389 .page_flip = drm_atomic_helper_page_flip,
2390 .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
2391 .atomic_destroy_state = nv50_head_atomic_destroy_state,
2395 nv50_head_create(struct drm_device *dev, int index)
2397 struct nouveau_drm *drm = nouveau_drm(dev);
2398 struct nvif_device *device = &drm->client.device;
2399 struct nv50_disp *disp = nv50_disp(dev);
2400 struct nv50_head *head;
2401 struct nv50_base *base;
2402 struct nv50_curs *curs;
2403 struct drm_crtc *crtc;
2406 head = kzalloc(sizeof(*head), GFP_KERNEL);
2410 head->base.index = index;
2411 ret = nv50_base_new(drm, head, &base);
2413 ret = nv50_curs_new(drm, head, &curs);
2419 crtc = &head->base.base;
2420 drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
2421 &curs->wndw.plane, &nv50_head_func,
2422 "head-%d", head->base.index);
2423 drm_crtc_helper_add(crtc, &nv50_head_help);
2424 drm_mode_crtc_set_gamma_size(crtc, 256);
2426 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
2427 ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
2429 &head->lut.nvbo[i]);
2434 /* allocate overlay resources */
2435 ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
2439 ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
2446 nv50_head_destroy(crtc);
2450 /******************************************************************************
2451 * Output path helpers
2452 *****************************************************************************/
2454 nv50_outp_release(struct nouveau_encoder *nv_encoder)
2456 struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
2458 struct nv50_disp_mthd_v1 base;
2461 .base.method = NV50_DISP_MTHD_V1_RELEASE,
2462 .base.hasht = nv_encoder->dcb->hasht,
2463 .base.hashm = nv_encoder->dcb->hashm,
2466 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2467 nv_encoder->or = -1;
2468 nv_encoder->link = 0;
2472 nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
2474 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
2475 struct nv50_disp *disp = nv50_disp(drm->dev);
2477 struct nv50_disp_mthd_v1 base;
2478 struct nv50_disp_acquire_v0 info;
2481 .base.method = NV50_DISP_MTHD_V1_ACQUIRE,
2482 .base.hasht = nv_encoder->dcb->hasht,
2483 .base.hashm = nv_encoder->dcb->hashm,
2487 ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
2489 NV_ERROR(drm, "error acquiring output path: %d\n", ret);
2493 nv_encoder->or = args.info.or;
2494 nv_encoder->link = args.info.link;
2499 nv50_outp_atomic_check_view(struct drm_encoder *encoder,
2500 struct drm_crtc_state *crtc_state,
2501 struct drm_connector_state *conn_state,
2502 struct drm_display_mode *native_mode)
2504 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
2505 struct drm_display_mode *mode = &crtc_state->mode;
2506 struct drm_connector *connector = conn_state->connector;
2507 struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
2508 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
2510 NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
2511 asyc->scaler.full = false;
2515 if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
2516 switch (connector->connector_type) {
2517 case DRM_MODE_CONNECTOR_LVDS:
2518 case DRM_MODE_CONNECTOR_eDP:
2519 /* Force use of scaler for non-EDID modes. */
2520 if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
2523 asyc->scaler.full = true;
2532 if (!drm_mode_equal(adjusted_mode, mode)) {
2533 drm_mode_copy(adjusted_mode, mode);
2534 crtc_state->mode_changed = true;
2541 nv50_outp_atomic_check(struct drm_encoder *encoder,
2542 struct drm_crtc_state *crtc_state,
2543 struct drm_connector_state *conn_state)
2545 struct nouveau_connector *nv_connector =
2546 nouveau_connector(conn_state->connector);
2547 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2548 nv_connector->native_mode);
2551 /******************************************************************************
2553 *****************************************************************************/
2555 nv50_dac_disable(struct drm_encoder *encoder)
2557 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2558 struct nv50_mast *mast = nv50_mast(encoder->dev);
2559 const int or = nv_encoder->or;
2562 if (nv_encoder->crtc) {
2563 push = evo_wait(mast, 4);
2565 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2566 evo_mthd(push, 0x0400 + (or * 0x080), 1);
2567 evo_data(push, 0x00000000);
2569 evo_mthd(push, 0x0180 + (or * 0x020), 1);
2570 evo_data(push, 0x00000000);
2572 evo_kick(push, mast);
2576 nv_encoder->crtc = NULL;
2577 nv50_outp_release(nv_encoder);
2581 nv50_dac_enable(struct drm_encoder *encoder)
2583 struct nv50_mast *mast = nv50_mast(encoder->dev);
2584 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2585 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2586 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
2589 nv50_outp_acquire(nv_encoder);
2591 push = evo_wait(mast, 8);
2593 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2594 u32 syncs = 0x00000000;
2596 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2597 syncs |= 0x00000001;
2598 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2599 syncs |= 0x00000002;
2601 evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
2602 evo_data(push, 1 << nv_crtc->index);
2603 evo_data(push, syncs);
2605 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
2606 u32 syncs = 0x00000001;
2608 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2609 syncs |= 0x00000008;
2610 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2611 syncs |= 0x00000010;
2613 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2614 magic |= 0x00000001;
2616 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
2617 evo_data(push, syncs);
2618 evo_data(push, magic);
2619 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
2620 evo_data(push, 1 << nv_crtc->index);
2623 evo_kick(push, mast);
2626 nv_encoder->crtc = encoder->crtc;
2629 static enum drm_connector_status
2630 nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2632 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2633 struct nv50_disp *disp = nv50_disp(encoder->dev);
2635 struct nv50_disp_mthd_v1 base;
2636 struct nv50_disp_dac_load_v0 load;
2639 .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
2640 .base.hasht = nv_encoder->dcb->hasht,
2641 .base.hashm = nv_encoder->dcb->hashm,
2645 args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
2646 if (args.load.data == 0)
2647 args.load.data = 340;
2649 ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
2650 if (ret || !args.load.load)
2651 return connector_status_disconnected;
2653 return connector_status_connected;
2656 static const struct drm_encoder_helper_funcs
2658 .atomic_check = nv50_outp_atomic_check,
2659 .enable = nv50_dac_enable,
2660 .disable = nv50_dac_disable,
2661 .detect = nv50_dac_detect
2665 nv50_dac_destroy(struct drm_encoder *encoder)
2667 drm_encoder_cleanup(encoder);
2671 static const struct drm_encoder_funcs
2673 .destroy = nv50_dac_destroy,
2677 nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
2679 struct nouveau_drm *drm = nouveau_drm(connector->dev);
2680 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
2681 struct nvkm_i2c_bus *bus;
2682 struct nouveau_encoder *nv_encoder;
2683 struct drm_encoder *encoder;
2684 int type = DRM_MODE_ENCODER_DAC;
2686 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
2689 nv_encoder->dcb = dcbe;
2691 bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
2693 nv_encoder->i2c = &bus->i2c;
2695 encoder = to_drm_encoder(nv_encoder);
2696 encoder->possible_crtcs = dcbe->heads;
2697 encoder->possible_clones = 0;
2698 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
2699 "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
2700 drm_encoder_helper_add(encoder, &nv50_dac_help);
2702 drm_mode_connector_attach_encoder(connector, encoder);
2706 /******************************************************************************
2708 *****************************************************************************/
2710 nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2712 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2713 struct nv50_disp *disp = nv50_disp(encoder->dev);
2715 struct nv50_disp_mthd_v1 base;
2716 struct nv50_disp_sor_hda_eld_v0 eld;
2719 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2720 .base.hasht = nv_encoder->dcb->hasht,
2721 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2722 (0x0100 << nv_crtc->index),
2725 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2729 nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2731 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2732 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2733 struct nouveau_connector *nv_connector;
2734 struct nv50_disp *disp = nv50_disp(encoder->dev);
2737 struct nv50_disp_mthd_v1 mthd;
2738 struct nv50_disp_sor_hda_eld_v0 eld;
2740 u8 data[sizeof(nv_connector->base.eld)];
2742 .base.mthd.version = 1,
2743 .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2744 .base.mthd.hasht = nv_encoder->dcb->hasht,
2745 .base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2746 (0x0100 << nv_crtc->index),
2749 nv_connector = nouveau_encoder_connector_get(nv_encoder);
2750 if (!drm_detect_monitor_audio(nv_connector->edid))
2753 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
2755 nvif_mthd(disp->disp, 0, &args,
2756 sizeof(args.base) + drm_eld_size(args.data));
2759 /******************************************************************************
2761 *****************************************************************************/
2763 nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2765 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2766 struct nv50_disp *disp = nv50_disp(encoder->dev);
2768 struct nv50_disp_mthd_v1 base;
2769 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2772 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2773 .base.hasht = nv_encoder->dcb->hasht,
2774 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2775 (0x0100 << nv_crtc->index),
2778 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2782 nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2784 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2785 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2786 struct nv50_disp *disp = nv50_disp(encoder->dev);
2788 struct nv50_disp_mthd_v1 base;
2789 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2790 u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
2793 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2794 .base.hasht = nv_encoder->dcb->hasht,
2795 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
2796 (0x0100 << nv_crtc->index),
2798 .pwr.rekey = 56, /* binary driver, and tegra, constant */
2800 struct nouveau_connector *nv_connector;
2802 union hdmi_infoframe avi_frame;
2803 union hdmi_infoframe vendor_frame;
2807 nv_connector = nouveau_encoder_connector_get(nv_encoder);
2808 if (!drm_detect_hdmi_monitor(nv_connector->edid))
2811 ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
2814 /* We have an AVI InfoFrame, populate it to the display */
2815 args.pwr.avi_infoframe_length
2816 = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
2819 ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
2820 &nv_connector->base, mode);
2822 /* We have a Vendor InfoFrame, populate it to the display */
2823 args.pwr.vendor_infoframe_length
2824 = hdmi_infoframe_pack(&vendor_frame,
2826 + args.pwr.avi_infoframe_length,
2830 max_ac_packet = mode->htotal - mode->hdisplay;
2831 max_ac_packet -= args.pwr.rekey;
2832 max_ac_packet -= 18; /* constant from tegra */
2833 args.pwr.max_ac_packet = max_ac_packet / 32;
2835 size = sizeof(args.base)
2837 + args.pwr.avi_infoframe_length
2838 + args.pwr.vendor_infoframe_length;
2839 nvif_mthd(disp->disp, 0, &args, size);
2840 nv50_audio_enable(encoder, mode);
2843 /******************************************************************************
2845 *****************************************************************************/
2846 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
2847 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
2848 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
2851 struct nouveau_encoder *outp;
2853 struct drm_dp_mst_topology_mgr mgr;
2854 struct nv50_msto *msto[4];
2862 struct nv50_mstm *mstm;
2863 struct drm_dp_mst_port *port;
2864 struct drm_connector connector;
2866 struct drm_display_mode *native;
2873 struct drm_encoder encoder;
2875 struct nv50_head *head;
2876 struct nv50_mstc *mstc;
2880 static struct drm_dp_payload *
2881 nv50_msto_payload(struct nv50_msto *msto)
2883 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2884 struct nv50_mstc *mstc = msto->mstc;
2885 struct nv50_mstm *mstm = mstc->mstm;
2886 int vcpi = mstc->port->vcpi.vcpi, i;
2888 NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
2889 for (i = 0; i < mstm->mgr.max_payloads; i++) {
2890 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2891 NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
2892 mstm->outp->base.base.name, i, payload->vcpi,
2893 payload->start_slot, payload->num_slots);
2896 for (i = 0; i < mstm->mgr.max_payloads; i++) {
2897 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2898 if (payload->vcpi == vcpi)
2906 nv50_msto_cleanup(struct nv50_msto *msto)
2908 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2909 struct nv50_mstc *mstc = msto->mstc;
2910 struct nv50_mstm *mstm = mstc->mstm;
2912 NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
2913 if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
2914 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
2915 if (msto->disabled) {
2918 msto->disabled = false;
2923 nv50_msto_prepare(struct nv50_msto *msto)
2925 struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2926 struct nv50_mstc *mstc = msto->mstc;
2927 struct nv50_mstm *mstm = mstc->mstm;
2929 struct nv50_disp_mthd_v1 base;
2930 struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
2933 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
2934 .base.hasht = mstm->outp->dcb->hasht,
2935 .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
2936 (0x0100 << msto->head->base.index),
2939 NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
2940 if (mstc->port && mstc->port->vcpi.vcpi > 0) {
2941 struct drm_dp_payload *payload = nv50_msto_payload(msto);
2943 args.vcpi.start_slot = payload->start_slot;
2944 args.vcpi.num_slots = payload->num_slots;
2945 args.vcpi.pbn = mstc->port->vcpi.pbn;
2946 args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
2950 NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
2951 msto->encoder.name, msto->head->base.base.name,
2952 args.vcpi.start_slot, args.vcpi.num_slots,
2953 args.vcpi.pbn, args.vcpi.aligned_pbn);
2954 nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
2958 nv50_msto_atomic_check(struct drm_encoder *encoder,
2959 struct drm_crtc_state *crtc_state,
2960 struct drm_connector_state *conn_state)
2962 struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
2963 struct nv50_mstm *mstm = mstc->mstm;
2964 int bpp = conn_state->connector->display_info.bpc * 3;
2967 mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
2969 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
2973 return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2978 nv50_msto_enable(struct drm_encoder *encoder)
2980 struct nv50_head *head = nv50_head(encoder->crtc);
2981 struct nv50_msto *msto = nv50_msto(encoder);
2982 struct nv50_mstc *mstc = NULL;
2983 struct nv50_mstm *mstm = NULL;
2984 struct drm_connector *connector;
2985 struct drm_connector_list_iter conn_iter;
2990 drm_connector_list_iter_begin(encoder->dev, &conn_iter);
2991 drm_for_each_connector_iter(connector, &conn_iter) {
2992 if (connector->state->best_encoder == &msto->encoder) {
2993 mstc = nv50_mstc(connector);
2998 drm_connector_list_iter_end(&conn_iter);
3003 slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
3004 r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
3008 nv50_outp_acquire(mstm->outp);
3010 if (mstm->outp->link & 1)
3015 switch (mstc->connector.display_info.bpc) {
3016 case 6: depth = 0x2; break;
3017 case 8: depth = 0x5; break;
3019 default: depth = 0x6; break;
3022 mstm->outp->update(mstm->outp, head->base.index,
3023 &head->base.base.state->adjusted_mode, proto, depth);
3027 mstm->modified = true;
3031 nv50_msto_disable(struct drm_encoder *encoder)
3033 struct nv50_msto *msto = nv50_msto(encoder);
3034 struct nv50_mstc *mstc = msto->mstc;
3035 struct nv50_mstm *mstm = mstc->mstm;
3038 drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
3040 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
3041 mstm->modified = true;
3043 mstm->disabled = true;
3044 msto->disabled = true;
3047 static const struct drm_encoder_helper_funcs
3049 .disable = nv50_msto_disable,
3050 .enable = nv50_msto_enable,
3051 .atomic_check = nv50_msto_atomic_check,
3055 nv50_msto_destroy(struct drm_encoder *encoder)
3057 struct nv50_msto *msto = nv50_msto(encoder);
3058 drm_encoder_cleanup(&msto->encoder);
3062 static const struct drm_encoder_funcs
3064 .destroy = nv50_msto_destroy,
3068 nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
3069 struct nv50_msto **pmsto)
3071 struct nv50_msto *msto;
3074 if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
3077 ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
3078 DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
3085 drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
3086 msto->encoder.possible_crtcs = heads;
3090 static struct drm_encoder *
3091 nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
3092 struct drm_connector_state *connector_state)
3094 struct nv50_head *head = nv50_head(connector_state->crtc);
3095 struct nv50_mstc *mstc = nv50_mstc(connector);
3097 struct nv50_mstm *mstm = mstc->mstm;
3098 return &mstm->msto[head->base.index]->encoder;
3103 static struct drm_encoder *
3104 nv50_mstc_best_encoder(struct drm_connector *connector)
3106 struct nv50_mstc *mstc = nv50_mstc(connector);
3108 struct nv50_mstm *mstm = mstc->mstm;
3109 return &mstm->msto[0]->encoder;
3114 static enum drm_mode_status
3115 nv50_mstc_mode_valid(struct drm_connector *connector,
3116 struct drm_display_mode *mode)
3122 nv50_mstc_get_modes(struct drm_connector *connector)
3124 struct nv50_mstc *mstc = nv50_mstc(connector);
3127 mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
3128 drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
3130 ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
3132 if (!mstc->connector.display_info.bpc)
3133 mstc->connector.display_info.bpc = 8;
3136 drm_mode_destroy(mstc->connector.dev, mstc->native);
3137 mstc->native = nouveau_conn_native_mode(&mstc->connector);
3141 static const struct drm_connector_helper_funcs
3143 .get_modes = nv50_mstc_get_modes,
3144 .mode_valid = nv50_mstc_mode_valid,
3145 .best_encoder = nv50_mstc_best_encoder,
3146 .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
3149 static enum drm_connector_status
3150 nv50_mstc_detect(struct drm_connector *connector, bool force)
3152 struct nv50_mstc *mstc = nv50_mstc(connector);
3154 return connector_status_disconnected;
3155 return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
3159 nv50_mstc_destroy(struct drm_connector *connector)
3161 struct nv50_mstc *mstc = nv50_mstc(connector);
3162 drm_connector_cleanup(&mstc->connector);
3166 static const struct drm_connector_funcs
3168 .reset = nouveau_conn_reset,
3169 .detect = nv50_mstc_detect,
3170 .fill_modes = drm_helper_probe_single_connector_modes,
3171 .destroy = nv50_mstc_destroy,
3172 .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
3173 .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
3174 .atomic_set_property = nouveau_conn_atomic_set_property,
3175 .atomic_get_property = nouveau_conn_atomic_get_property,
3179 nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
3180 const char *path, struct nv50_mstc **pmstc)
3182 struct drm_device *dev = mstm->outp->base.base.dev;
3183 struct nv50_mstc *mstc;
3186 if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
3191 ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
3192 DRM_MODE_CONNECTOR_DisplayPort);
3199 drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
3201 mstc->connector.funcs->reset(&mstc->connector);
3202 nouveau_conn_attach_properties(&mstc->connector);
3204 for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
3205 drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
3207 drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
3208 drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
3209 drm_mode_connector_set_path_property(&mstc->connector, path);
3214 nv50_mstm_cleanup(struct nv50_mstm *mstm)
3216 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3217 struct drm_encoder *encoder;
3220 NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
3221 ret = drm_dp_check_act_status(&mstm->mgr);
3223 ret = drm_dp_update_payload_part2(&mstm->mgr);
3225 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3226 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3227 struct nv50_msto *msto = nv50_msto(encoder);
3228 struct nv50_mstc *mstc = msto->mstc;
3229 if (mstc && mstc->mstm == mstm)
3230 nv50_msto_cleanup(msto);
3234 mstm->modified = false;
3238 nv50_mstm_prepare(struct nv50_mstm *mstm)
3240 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3241 struct drm_encoder *encoder;
3244 NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
3245 ret = drm_dp_update_payload_part1(&mstm->mgr);
3247 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3248 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3249 struct nv50_msto *msto = nv50_msto(encoder);
3250 struct nv50_mstc *mstc = msto->mstc;
3251 if (mstc && mstc->mstm == mstm)
3252 nv50_msto_prepare(msto);
3256 if (mstm->disabled) {
3258 nv50_outp_release(mstm->outp);
3259 mstm->disabled = false;
3264 nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
3266 struct nv50_mstm *mstm = nv50_mstm(mgr);
3267 drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
3271 nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
3272 struct drm_connector *connector)
3274 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3275 struct nv50_mstc *mstc = nv50_mstc(connector);
3277 drm_connector_unregister(&mstc->connector);
3279 drm_modeset_lock_all(drm->dev);
3280 drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
3282 drm_modeset_unlock_all(drm->dev);
3284 drm_connector_unreference(&mstc->connector);
3288 nv50_mstm_register_connector(struct drm_connector *connector)
3290 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3292 drm_modeset_lock_all(drm->dev);
3293 drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
3294 drm_modeset_unlock_all(drm->dev);
3296 drm_connector_register(connector);
3299 static struct drm_connector *
3300 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
3301 struct drm_dp_mst_port *port, const char *path)
3303 struct nv50_mstm *mstm = nv50_mstm(mgr);
3304 struct nv50_mstc *mstc;
3307 ret = nv50_mstc_new(mstm, port, path, &mstc);
3310 mstc->connector.funcs->destroy(&mstc->connector);
3314 return &mstc->connector;
3317 static const struct drm_dp_mst_topology_cbs
3319 .add_connector = nv50_mstm_add_connector,
3320 .register_connector = nv50_mstm_register_connector,
3321 .destroy_connector = nv50_mstm_destroy_connector,
3322 .hotplug = nv50_mstm_hotplug,
3326 nv50_mstm_service(struct nv50_mstm *mstm)
3328 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3329 bool handled = true;
3337 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3339 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3343 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
3347 drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
3352 nv50_mstm_remove(struct nv50_mstm *mstm)
3355 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3359 nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
3361 struct nouveau_encoder *outp = mstm->outp;
3363 struct nv50_disp_mthd_v1 base;
3364 struct nv50_disp_sor_dp_mst_link_v0 mst;
3367 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
3368 .base.hasht = outp->dcb->hasht,
3369 .base.hashm = outp->dcb->hashm,
3372 struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
3373 struct nvif_object *disp = &drm->display->disp;
3377 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
3385 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
3390 return nvif_mthd(disp, 0, &args, sizeof(args));
3394 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
3401 if (dpcd[0] >= 0x12) {
3402 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
3406 if (!(dpcd[1] & DP_MST_CAP))
3412 ret = nv50_mstm_enable(mstm, dpcd[0], state);
3416 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
3418 return nv50_mstm_enable(mstm, dpcd[0], 0);
3420 return mstm->mgr.mst_state;
3424 nv50_mstm_fini(struct nv50_mstm *mstm)
3426 if (mstm && mstm->mgr.mst_state)
3427 drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
3431 nv50_mstm_init(struct nv50_mstm *mstm)
3433 if (mstm && mstm->mgr.mst_state)
3434 drm_dp_mst_topology_mgr_resume(&mstm->mgr);
3438 nv50_mstm_del(struct nv50_mstm **pmstm)
3440 struct nv50_mstm *mstm = *pmstm;
3448 nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
3449 int conn_base_id, struct nv50_mstm **pmstm)
3451 const int max_payloads = hweight8(outp->dcb->heads);
3452 struct drm_device *dev = outp->base.base.dev;
3453 struct nv50_mstm *mstm;
3457 /* This is a workaround for some monitors not functioning
3458 * correctly in MST mode on initial module load. I think
3459 * some bad interaction with the VBIOS may be responsible.
3461 * A good ol' off and on again seems to work here ;)
3463 ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
3464 if (ret >= 0 && dpcd >= 0x12)
3465 drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
3467 if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
3470 mstm->mgr.cbs = &nv50_mstm;
3472 ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
3473 max_payloads, conn_base_id);
3477 for (i = 0; i < max_payloads; i++) {
3478 ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
3487 /******************************************************************************
3489 *****************************************************************************/
3491 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
3492 struct drm_display_mode *mode, u8 proto, u8 depth)
3494 struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
3498 nv_encoder->ctrl &= ~BIT(head);
3499 if (!(nv_encoder->ctrl & 0x0000000f))
3500 nv_encoder->ctrl = 0;
3502 nv_encoder->ctrl |= proto << 8;
3503 nv_encoder->ctrl |= BIT(head);
3506 if ((push = evo_wait(core, 6))) {
3507 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
3509 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3510 nv_encoder->ctrl |= 0x00001000;
3511 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3512 nv_encoder->ctrl |= 0x00002000;
3513 nv_encoder->ctrl |= depth << 16;
3515 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
3518 u32 magic = 0x31ec6000 | (head << 25);
3519 u32 syncs = 0x00000001;
3520 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3521 syncs |= 0x00000008;
3522 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3523 syncs |= 0x00000010;
3524 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
3525 magic |= 0x00000001;
3527 evo_mthd(push, 0x0404 + (head * 0x300), 2);
3528 evo_data(push, syncs | (depth << 6));
3529 evo_data(push, magic);
3531 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
3533 evo_data(push, nv_encoder->ctrl);
3534 evo_kick(push, core);
3539 nv50_sor_disable(struct drm_encoder *encoder)
3541 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3542 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
3544 nv_encoder->crtc = NULL;
3547 struct nvkm_i2c_aux *aux = nv_encoder->aux;
3551 int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
3553 pwr &= ~DP_SET_POWER_MASK;
3554 pwr |= DP_SET_POWER_D3;
3555 nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
3559 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
3560 nv50_audio_disable(encoder, nv_crtc);
3561 nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
3562 nv50_outp_release(nv_encoder);
3567 nv50_sor_enable(struct drm_encoder *encoder)
3569 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3570 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3571 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3573 struct nv50_disp_mthd_v1 base;
3574 struct nv50_disp_sor_lvds_script_v0 lvds;
3577 .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
3578 .base.hasht = nv_encoder->dcb->hasht,
3579 .base.hashm = nv_encoder->dcb->hashm,
3581 struct nv50_disp *disp = nv50_disp(encoder->dev);
3582 struct drm_device *dev = encoder->dev;
3583 struct nouveau_drm *drm = nouveau_drm(dev);
3584 struct nouveau_connector *nv_connector;
3585 struct nvbios *bios = &drm->vbios;
3589 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3590 nv_encoder->crtc = encoder->crtc;
3591 nv50_outp_acquire(nv_encoder);
3593 switch (nv_encoder->dcb->type) {
3594 case DCB_OUTPUT_TMDS:
3595 if (nv_encoder->link & 1) {
3597 /* Only enable dual-link if:
3598 * - Need to (i.e. rate > 165MHz)
3600 * - Not an HDMI monitor, since there's no dual-link
3603 if (mode->clock >= 165000 &&
3604 nv_encoder->dcb->duallink_possible &&
3605 !drm_detect_hdmi_monitor(nv_connector->edid))
3611 nv50_hdmi_enable(&nv_encoder->base.base, mode);
3613 case DCB_OUTPUT_LVDS:
3616 if (bios->fp_no_ddc) {
3617 if (bios->fp.dual_link)
3618 lvds.lvds.script |= 0x0100;
3619 if (bios->fp.if_is_24bit)
3620 lvds.lvds.script |= 0x0200;
3622 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
3623 if (((u8 *)nv_connector->edid)[121] == 2)
3624 lvds.lvds.script |= 0x0100;
3626 if (mode->clock >= bios->fp.duallink_transition_clk) {
3627 lvds.lvds.script |= 0x0100;
3630 if (lvds.lvds.script & 0x0100) {
3631 if (bios->fp.strapless_is_24bit & 2)
3632 lvds.lvds.script |= 0x0200;
3634 if (bios->fp.strapless_is_24bit & 1)
3635 lvds.lvds.script |= 0x0200;
3638 if (nv_connector->base.display_info.bpc == 8)
3639 lvds.lvds.script |= 0x0200;
3642 nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
3645 if (nv_connector->base.display_info.bpc == 6)
3648 if (nv_connector->base.display_info.bpc == 8)
3653 if (nv_encoder->link & 1)
3658 nv50_audio_enable(encoder, mode);
3665 nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
3668 static const struct drm_encoder_helper_funcs
3670 .atomic_check = nv50_outp_atomic_check,
3671 .enable = nv50_sor_enable,
3672 .disable = nv50_sor_disable,
3676 nv50_sor_destroy(struct drm_encoder *encoder)
3678 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3679 nv50_mstm_del(&nv_encoder->dp.mstm);
3680 drm_encoder_cleanup(encoder);
3684 static const struct drm_encoder_funcs
3686 .destroy = nv50_sor_destroy,
3690 nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3692 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3693 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3694 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3695 struct nouveau_encoder *nv_encoder;
3696 struct drm_encoder *encoder;
3699 switch (dcbe->type) {
3700 case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
3701 case DCB_OUTPUT_TMDS:
3704 type = DRM_MODE_ENCODER_TMDS;
3708 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3711 nv_encoder->dcb = dcbe;
3712 nv_encoder->update = nv50_sor_update;
3714 encoder = to_drm_encoder(nv_encoder);
3715 encoder->possible_crtcs = dcbe->heads;
3716 encoder->possible_clones = 0;
3717 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
3718 "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
3719 drm_encoder_helper_add(encoder, &nv50_sor_help);
3721 drm_mode_connector_attach_encoder(connector, encoder);
3723 if (dcbe->type == DCB_OUTPUT_DP) {
3724 struct nv50_disp *disp = nv50_disp(encoder->dev);
3725 struct nvkm_i2c_aux *aux =
3726 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3728 if (disp->disp->oclass < GF110_DISP) {
3729 /* HW has no support for address-only
3730 * transactions, so we're required to
3731 * use custom I2C-over-AUX code.
3733 nv_encoder->i2c = &aux->i2c;
3735 nv_encoder->i2c = &nv_connector->aux.ddc;
3737 nv_encoder->aux = aux;
3740 /*TODO: Use DP Info Table to check for support. */
3741 if (disp->disp->oclass >= GF110_DISP) {
3742 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
3743 nv_connector->base.base.id,
3744 &nv_encoder->dp.mstm);
3749 struct nvkm_i2c_bus *bus =
3750 nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
3752 nv_encoder->i2c = &bus->i2c;
3758 /******************************************************************************
3760 *****************************************************************************/
3762 nv50_pior_atomic_check(struct drm_encoder *encoder,
3763 struct drm_crtc_state *crtc_state,
3764 struct drm_connector_state *conn_state)
3766 int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
3769 crtc_state->adjusted_mode.clock *= 2;
3774 nv50_pior_disable(struct drm_encoder *encoder)
3776 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3777 struct nv50_mast *mast = nv50_mast(encoder->dev);
3778 const int or = nv_encoder->or;
3781 if (nv_encoder->crtc) {
3782 push = evo_wait(mast, 4);
3784 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3785 evo_mthd(push, 0x0700 + (or * 0x040), 1);
3786 evo_data(push, 0x00000000);
3788 evo_kick(push, mast);
3792 nv_encoder->crtc = NULL;
3793 nv50_outp_release(nv_encoder);
3797 nv50_pior_enable(struct drm_encoder *encoder)
3799 struct nv50_mast *mast = nv50_mast(encoder->dev);
3800 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3801 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3802 struct nouveau_connector *nv_connector;
3803 struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3804 u8 owner = 1 << nv_crtc->index;
3808 nv50_outp_acquire(nv_encoder);
3810 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3811 switch (nv_connector->base.display_info.bpc) {
3812 case 10: depth = 0x6; break;
3813 case 8: depth = 0x5; break;
3814 case 6: depth = 0x2; break;
3815 default: depth = 0x0; break;
3818 switch (nv_encoder->dcb->type) {
3819 case DCB_OUTPUT_TMDS:
3828 push = evo_wait(mast, 8);
3830 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3831 u32 ctrl = (depth << 16) | (proto << 8) | owner;
3832 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3834 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3836 evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
3837 evo_data(push, ctrl);
3840 evo_kick(push, mast);
3843 nv_encoder->crtc = encoder->crtc;
3846 static const struct drm_encoder_helper_funcs
3848 .atomic_check = nv50_pior_atomic_check,
3849 .enable = nv50_pior_enable,
3850 .disable = nv50_pior_disable,
3854 nv50_pior_destroy(struct drm_encoder *encoder)
3856 drm_encoder_cleanup(encoder);
3860 static const struct drm_encoder_funcs
3862 .destroy = nv50_pior_destroy,
3866 nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3868 struct nouveau_connector *nv_connector = nouveau_connector(connector);
3869 struct nouveau_drm *drm = nouveau_drm(connector->dev);
3870 struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3871 struct nvkm_i2c_bus *bus = NULL;
3872 struct nvkm_i2c_aux *aux = NULL;
3873 struct i2c_adapter *ddc;
3874 struct nouveau_encoder *nv_encoder;
3875 struct drm_encoder *encoder;
3878 switch (dcbe->type) {
3879 case DCB_OUTPUT_TMDS:
3880 bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
3881 ddc = bus ? &bus->i2c : NULL;
3882 type = DRM_MODE_ENCODER_TMDS;
3885 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
3886 ddc = aux ? &nv_connector->aux.ddc : NULL;
3887 type = DRM_MODE_ENCODER_TMDS;
3893 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3896 nv_encoder->dcb = dcbe;
3897 nv_encoder->i2c = ddc;
3898 nv_encoder->aux = aux;
3900 encoder = to_drm_encoder(nv_encoder);
3901 encoder->possible_crtcs = dcbe->heads;
3902 encoder->possible_clones = 0;
3903 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
3904 "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
3905 drm_encoder_helper_add(encoder, &nv50_pior_help);
3907 drm_mode_connector_attach_encoder(connector, encoder);
3911 /******************************************************************************
3913 *****************************************************************************/
3916 nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
3918 struct nv50_disp *disp = nv50_disp(drm->dev);
3919 struct nv50_dmac *core = &disp->mast.base;
3920 struct nv50_mstm *mstm;
3921 struct drm_encoder *encoder;
3924 NV_ATOMIC(drm, "commit core %08x\n", interlock);
3926 drm_for_each_encoder(encoder, drm->dev) {
3927 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3928 mstm = nouveau_encoder(encoder)->dp.mstm;
3929 if (mstm && mstm->modified)
3930 nv50_mstm_prepare(mstm);
3934 if ((push = evo_wait(core, 5))) {
3935 evo_mthd(push, 0x0084, 1);
3936 evo_data(push, 0x80000000);
3937 evo_mthd(push, 0x0080, 2);
3938 evo_data(push, interlock);
3939 evo_data(push, 0x00000000);
3940 nouveau_bo_wr32(disp->sync, 0, 0x00000000);
3941 evo_kick(push, core);
3942 if (nvif_msec(&drm->client.device, 2000ULL,
3943 if (nouveau_bo_rd32(disp->sync, 0))
3947 NV_ERROR(drm, "EVO timeout\n");
3950 drm_for_each_encoder(encoder, drm->dev) {
3951 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3952 mstm = nouveau_encoder(encoder)->dp.mstm;
3953 if (mstm && mstm->modified)
3954 nv50_mstm_cleanup(mstm);
3960 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
3962 struct drm_device *dev = state->dev;
3963 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
3964 struct drm_crtc *crtc;
3965 struct drm_plane_state *new_plane_state;
3966 struct drm_plane *plane;
3967 struct nouveau_drm *drm = nouveau_drm(dev);
3968 struct nv50_disp *disp = nv50_disp(dev);
3969 struct nv50_atom *atom = nv50_atom(state);
3970 struct nv50_outp_atom *outp, *outt;
3971 u32 interlock_core = 0;
3972 u32 interlock_chan = 0;
3975 NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
3976 drm_atomic_helper_wait_for_fences(dev, state, false);
3977 drm_atomic_helper_wait_for_dependencies(state);
3978 drm_atomic_helper_update_legacy_modeset_state(dev, state);
3980 if (atom->lock_core)
3981 mutex_lock(&disp->mutex);
3983 /* Disable head(s). */
3984 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3985 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
3986 struct nv50_head *head = nv50_head(crtc);
3988 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
3989 asyh->clr.mask, asyh->set.mask);
3990 if (old_crtc_state->active && !new_crtc_state->active)
3991 drm_crtc_vblank_off(crtc);
3993 if (asyh->clr.mask) {
3994 nv50_head_flush_clr(head, asyh, atom->flush_disable);
3995 interlock_core |= 1;
3999 /* Disable plane(s). */
4000 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4001 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4002 struct nv50_wndw *wndw = nv50_wndw(plane);
4004 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
4005 asyw->clr.mask, asyw->set.mask);
4006 if (!asyw->clr.mask)
4009 interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
4010 atom->flush_disable,
4014 /* Disable output path(s). */
4015 list_for_each_entry(outp, &atom->outp, head) {
4016 const struct drm_encoder_helper_funcs *help;
4017 struct drm_encoder *encoder;
4019 encoder = outp->encoder;
4020 help = encoder->helper_private;
4022 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
4023 outp->clr.mask, outp->set.mask);
4025 if (outp->clr.mask) {
4026 help->disable(encoder);
4027 interlock_core |= 1;
4028 if (outp->flush_disable) {
4029 nv50_disp_atomic_commit_core(drm, interlock_chan);
4036 /* Flush disable. */
4037 if (interlock_core) {
4038 if (atom->flush_disable) {
4039 nv50_disp_atomic_commit_core(drm, interlock_chan);
4045 /* Update output path(s). */
4046 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4047 const struct drm_encoder_helper_funcs *help;
4048 struct drm_encoder *encoder;
4050 encoder = outp->encoder;
4051 help = encoder->helper_private;
4053 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
4054 outp->set.mask, outp->clr.mask);
4056 if (outp->set.mask) {
4057 help->enable(encoder);
4061 list_del(&outp->head);
4065 /* Update head(s). */
4066 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4067 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
4068 struct nv50_head *head = nv50_head(crtc);
4070 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
4071 asyh->set.mask, asyh->clr.mask);
4073 if (asyh->set.mask) {
4074 nv50_head_flush_set(head, asyh);
4078 if (new_crtc_state->active) {
4079 if (!old_crtc_state->active)
4080 drm_crtc_vblank_on(crtc);
4081 if (new_crtc_state->event)
4082 drm_crtc_vblank_get(crtc);
4086 /* Update plane(s). */
4087 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4088 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4089 struct nv50_wndw *wndw = nv50_wndw(plane);
4091 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
4092 asyw->set.mask, asyw->clr.mask);
4093 if ( !asyw->set.mask &&
4094 (!asyw->clr.mask || atom->flush_disable))
4097 interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
4101 if (interlock_core) {
4102 if (!interlock_chan && atom->state.legacy_cursor_update) {
4103 u32 *push = evo_wait(&disp->mast, 2);
4105 evo_mthd(push, 0x0080, 1);
4106 evo_data(push, 0x00000000);
4107 evo_kick(push, &disp->mast);
4110 nv50_disp_atomic_commit_core(drm, interlock_chan);
4114 if (atom->lock_core)
4115 mutex_unlock(&disp->mutex);
4117 /* Wait for HW to signal completion. */
4118 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4119 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4120 struct nv50_wndw *wndw = nv50_wndw(plane);
4121 int ret = nv50_wndw_wait_armed(wndw, asyw);
4123 NV_ERROR(drm, "%s: timeout\n", plane->name);
4126 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4127 if (new_crtc_state->event) {
4128 unsigned long flags;
4129 /* Get correct count/ts if racing with vblank irq */
4130 if (new_crtc_state->active)
4131 drm_crtc_accurate_vblank_count(crtc);
4132 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4133 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
4134 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4136 new_crtc_state->event = NULL;
4137 if (new_crtc_state->active)
4138 drm_crtc_vblank_put(crtc);
4142 drm_atomic_helper_commit_hw_done(state);
4143 drm_atomic_helper_cleanup_planes(dev, state);
4144 drm_atomic_helper_commit_cleanup_done(state);
4145 drm_atomic_state_put(state);
4149 nv50_disp_atomic_commit_work(struct work_struct *work)
4151 struct drm_atomic_state *state =
4152 container_of(work, typeof(*state), commit_work);
4153 nv50_disp_atomic_commit_tail(state);
4157 nv50_disp_atomic_commit(struct drm_device *dev,
4158 struct drm_atomic_state *state, bool nonblock)
4160 struct nouveau_drm *drm = nouveau_drm(dev);
4161 struct nv50_disp *disp = nv50_disp(dev);
4162 struct drm_plane_state *new_plane_state;
4163 struct drm_plane *plane;
4164 struct drm_crtc *crtc;
4165 bool active = false;
4168 ret = pm_runtime_get_sync(dev->dev);
4169 if (ret < 0 && ret != -EACCES)
4172 ret = drm_atomic_helper_setup_commit(state, nonblock);
4176 INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
4178 ret = drm_atomic_helper_prepare_planes(dev, state);
4183 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
4188 ret = drm_atomic_helper_swap_state(state, true);
4192 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4193 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4194 struct nv50_wndw *wndw = nv50_wndw(plane);
4196 if (asyw->set.image) {
4197 asyw->ntfy.handle = wndw->dmac->sync.handle;
4198 asyw->ntfy.offset = wndw->ntfy;
4199 asyw->ntfy.awaken = false;
4200 asyw->set.ntfy = true;
4201 nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
4206 drm_atomic_state_get(state);
4209 queue_work(system_unbound_wq, &state->commit_work);
4211 nv50_disp_atomic_commit_tail(state);
4213 drm_for_each_crtc(crtc, dev) {
4214 if (crtc->state->enable) {
4215 if (!drm->have_disp_power_ref) {
4216 drm->have_disp_power_ref = true;
4224 if (!active && drm->have_disp_power_ref) {
4225 pm_runtime_put_autosuspend(dev->dev);
4226 drm->have_disp_power_ref = false;
4231 drm_atomic_helper_cleanup_planes(dev, state);
4233 pm_runtime_put_autosuspend(dev->dev);
4237 static struct nv50_outp_atom *
4238 nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
4240 struct nv50_outp_atom *outp;
4242 list_for_each_entry(outp, &atom->outp, head) {
4243 if (outp->encoder == encoder)
4247 outp = kzalloc(sizeof(*outp), GFP_KERNEL);
4249 return ERR_PTR(-ENOMEM);
4251 list_add(&outp->head, &atom->outp);
4252 outp->encoder = encoder;
4257 nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
4258 struct drm_connector_state *old_connector_state)
4260 struct drm_encoder *encoder = old_connector_state->best_encoder;
4261 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4262 struct drm_crtc *crtc;
4263 struct nv50_outp_atom *outp;
4265 if (!(crtc = old_connector_state->crtc))
4268 old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
4269 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4270 if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4271 outp = nv50_disp_outp_atomic_add(atom, encoder);
4273 return PTR_ERR(outp);
4275 if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
4276 outp->flush_disable = true;
4277 atom->flush_disable = true;
4279 outp->clr.ctrl = true;
4280 atom->lock_core = true;
4287 nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
4288 struct drm_connector_state *connector_state)
4290 struct drm_encoder *encoder = connector_state->best_encoder;
4291 struct drm_crtc_state *new_crtc_state;
4292 struct drm_crtc *crtc;
4293 struct nv50_outp_atom *outp;
4295 if (!(crtc = connector_state->crtc))
4298 new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4299 if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4300 outp = nv50_disp_outp_atomic_add(atom, encoder);
4302 return PTR_ERR(outp);
4304 outp->set.ctrl = true;
4305 atom->lock_core = true;
4312 nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
4314 struct nv50_atom *atom = nv50_atom(state);
4315 struct drm_connector_state *old_connector_state, *new_connector_state;
4316 struct drm_connector *connector;
4319 ret = drm_atomic_helper_check(dev, state);
4323 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
4324 ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
4328 ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
4337 nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
4339 struct nv50_atom *atom = nv50_atom(state);
4340 struct nv50_outp_atom *outp, *outt;
4342 list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4343 list_del(&outp->head);
4347 drm_atomic_state_default_clear(state);
4351 nv50_disp_atomic_state_free(struct drm_atomic_state *state)
4353 struct nv50_atom *atom = nv50_atom(state);
4354 drm_atomic_state_default_release(&atom->state);
4358 static struct drm_atomic_state *
4359 nv50_disp_atomic_state_alloc(struct drm_device *dev)
4361 struct nv50_atom *atom;
4362 if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
4363 drm_atomic_state_init(dev, &atom->state) < 0) {
4367 INIT_LIST_HEAD(&atom->outp);
4368 return &atom->state;
4371 static const struct drm_mode_config_funcs
4373 .fb_create = nouveau_user_framebuffer_create,
4374 .output_poll_changed = drm_fb_helper_output_poll_changed,
4375 .atomic_check = nv50_disp_atomic_check,
4376 .atomic_commit = nv50_disp_atomic_commit,
4377 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
4378 .atomic_state_clear = nv50_disp_atomic_state_clear,
4379 .atomic_state_free = nv50_disp_atomic_state_free,
4382 /******************************************************************************
4384 *****************************************************************************/
4387 nv50_display_fini(struct drm_device *dev)
4389 struct nouveau_encoder *nv_encoder;
4390 struct drm_encoder *encoder;
4391 struct drm_plane *plane;
4393 drm_for_each_plane(plane, dev) {
4394 struct nv50_wndw *wndw = nv50_wndw(plane);
4395 if (plane->funcs != &nv50_wndw)
4397 nv50_wndw_fini(wndw);
4400 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4401 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4402 nv_encoder = nouveau_encoder(encoder);
4403 nv50_mstm_fini(nv_encoder->dp.mstm);
4409 nv50_display_init(struct drm_device *dev)
4411 struct drm_encoder *encoder;
4412 struct drm_plane *plane;
4415 push = evo_wait(nv50_mast(dev), 32);
4419 evo_mthd(push, 0x0088, 1);
4420 evo_data(push, nv50_mast(dev)->base.sync.handle);
4421 evo_kick(push, nv50_mast(dev));
4423 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4424 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4425 struct nouveau_encoder *nv_encoder =
4426 nouveau_encoder(encoder);
4427 nv50_mstm_init(nv_encoder->dp.mstm);
4431 drm_for_each_plane(plane, dev) {
4432 struct nv50_wndw *wndw = nv50_wndw(plane);
4433 if (plane->funcs != &nv50_wndw)
4435 nv50_wndw_init(wndw);
4442 nv50_display_destroy(struct drm_device *dev)
4444 struct nv50_disp *disp = nv50_disp(dev);
4446 nv50_dmac_destroy(&disp->mast.base, disp->disp);
4448 nouveau_bo_unmap(disp->sync);
4450 nouveau_bo_unpin(disp->sync);
4451 nouveau_bo_ref(NULL, &disp->sync);
4453 nouveau_display(dev)->priv = NULL;
4457 MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
4458 static int nouveau_atomic = 0;
4459 module_param_named(atomic, nouveau_atomic, int, 0400);
4462 nv50_display_create(struct drm_device *dev)
4464 struct nvif_device *device = &nouveau_drm(dev)->client.device;
4465 struct nouveau_drm *drm = nouveau_drm(dev);
4466 struct dcb_table *dcb = &drm->vbios.dcb;
4467 struct drm_connector *connector, *tmp;
4468 struct nv50_disp *disp;
4469 struct dcb_output *dcbe;
4472 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
4476 mutex_init(&disp->mutex);
4478 nouveau_display(dev)->priv = disp;
4479 nouveau_display(dev)->dtor = nv50_display_destroy;
4480 nouveau_display(dev)->init = nv50_display_init;
4481 nouveau_display(dev)->fini = nv50_display_fini;
4482 disp->disp = &nouveau_display(dev)->disp;
4483 dev->mode_config.funcs = &nv50_disp_func;
4485 dev->driver->driver_features |= DRIVER_ATOMIC;
4487 /* small shared memory area we use for notifiers and semaphores */
4488 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
4489 0, 0x0000, NULL, NULL, &disp->sync);
4491 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
4493 ret = nouveau_bo_map(disp->sync);
4495 nouveau_bo_unpin(disp->sync);
4498 nouveau_bo_ref(NULL, &disp->sync);
4504 /* allocate master evo channel */
4505 ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
4510 /* create crtc objects to represent the hw heads */
4511 if (disp->disp->oclass >= GF110_DISP)
4512 crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
4516 for (i = 0; i < fls(crtcs); i++) {
4517 if (!(crtcs & (1 << i)))
4519 ret = nv50_head_create(dev, i);
4524 /* create encoder/connector objects based on VBIOS DCB table */
4525 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
4526 connector = nouveau_connector_create(dev, dcbe->connector);
4527 if (IS_ERR(connector))
4530 if (dcbe->location == DCB_LOC_ON_CHIP) {
4531 switch (dcbe->type) {
4532 case DCB_OUTPUT_TMDS:
4533 case DCB_OUTPUT_LVDS:
4535 ret = nv50_sor_create(connector, dcbe);
4537 case DCB_OUTPUT_ANALOG:
4538 ret = nv50_dac_create(connector, dcbe);
4545 ret = nv50_pior_create(connector, dcbe);
4549 NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
4550 dcbe->location, dcbe->type,
4551 ffs(dcbe->or) - 1, ret);
4556 /* cull any connectors we created that don't have an encoder */
4557 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
4558 if (connector->encoder_ids[0])
4561 NV_WARN(drm, "%s has no encoders, removing\n",
4563 connector->funcs->destroy(connector);
4568 nv50_display_destroy(dev);