drm/nouveau/gsp: move subdev/engine impls to subdev/gsp/rm/r535/
authorBen Skeggs <bskeggs@nvidia.com>
Thu, 14 Nov 2024 03:02:37 +0000 (13:02 +1000)
committerDave Airlie <airlied@redhat.com>
Sun, 18 May 2025 20:29:23 +0000 (06:29 +1000)
Move all the remaining GSP-RM code together underneath a versioned path,
to make the code easier to work with when adding support for a newer RM
version.

Aside from adjusting include paths, no code change is intended.

Signed-off-by: Ben Skeggs <bskeggs@nvidia.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Reviewed-by: Timur Tabi <ttabi@nvidia.com>
Tested-by: Timur Tabi <ttabi@nvidia.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
37 files changed:
drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c [deleted file]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c [deleted file]

index 165d61fc5d6c55dc056f37b882e65cc77b49527a..8bf1635ffabc093f8c1fca62c86fd70e71a357ec 100644 (file)
@@ -10,5 +10,3 @@ nvkm-y += nvkm/engine/ce/gv100.o
 nvkm-y += nvkm/engine/ce/tu102.o
 nvkm-y += nvkm/engine/ce/ga100.o
 nvkm-y += nvkm/engine/ce/ga102.o
-
-nvkm-y += nvkm/engine/ce/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
deleted file mode 100644 (file)
index bd0d435..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
-
-struct r535_ce_obj {
-       struct nvkm_object object;
-       struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ce_obj_dtor(struct nvkm_object *object)
-{
-       struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
-
-       nvkm_gsp_rm_free(&obj->rm);
-       return obj;
-}
-
-static const struct nvkm_object_func
-r535_ce_obj = {
-       .dtor = r535_ce_obj_dtor,
-};
-
-static int
-r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
-                struct nvkm_object **pobject)
-{
-       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
-       struct r535_ce_obj *obj;
-       NVC0B5_ALLOCATION_PARAMETERS *args;
-
-       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
-               return -ENOMEM;
-
-       nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
-       *pobject = &obj->object;
-
-       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
-                                    sizeof(*args), &obj->rm);
-       if (WARN_ON(IS_ERR(args)))
-               return PTR_ERR(args);
-
-       args->version = 1;
-       args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
-
-       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ce_dtor(struct nvkm_engine *engine)
-{
-       kfree(engine->func);
-       return engine;
-}
-
-int
-r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
-           enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
-       struct nvkm_engine_func *rm;
-       int nclass, ret;
-
-       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
-       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_ce_dtor;
-       for (int i = 0; i < nclass; i++) {
-               rm->sclass[i].minver = hw->sclass[i].minver;
-               rm->sclass[i].maxver = hw->sclass[i].maxver;
-               rm->sclass[i].oclass = hw->sclass[i].oclass;
-               rm->sclass[i].ctor = r535_ce_obj_ctor;
-       }
-
-       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
-       if (ret)
-               kfree(rm);
-
-       return ret;
-}
index e346e924fee8bdb14740daa86614d0540e690a64..23a10e081081b9bd0d72003d4bf1f8747ec47ef5 100644 (file)
@@ -29,8 +29,6 @@ nvkm-y += nvkm/engine/disp/tu102.o
 nvkm-y += nvkm/engine/disp/ga102.o
 nvkm-y += nvkm/engine/disp/ad102.o
 
-nvkm-y += nvkm/engine/disp/r535.o
-
 nvkm-y += nvkm/engine/disp/udisp.o
 nvkm-y += nvkm/engine/disp/uconn.o
 nvkm-y += nvkm/engine/disp/uoutp.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
deleted file mode 100644 (file)
index 99110ab..0000000
+++ /dev/null
@@ -1,1725 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-#include "chan.h"
-#include "conn.h"
-#include "dp.h"
-#include "head.h"
-#include "ior.h"
-#include "outp.h"
-
-#include <core/ramht.h>
-#include <subdev/bios.h>
-#include <subdev/bios/conn.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu.h>
-#include <subdev/vfn.h>
-
-#include <nvhw/drf.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
-
-#include <linux/acpi.h>
-
-static u64
-r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
-{
-       switch (chan->object.oclass & 0xff) {
-       case 0x7d: *psize = 0x10000; return 0x680000;
-       case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize);
-       case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize);
-       case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize);
-       default:
-               BUG_ON(1);
-               break;
-       }
-
-       return 0ULL;
-}
-
-static void
-r535_chan_intr(struct nvkm_disp_chan *chan, bool en)
-{
-}
-
-static void
-r535_chan_fini(struct nvkm_disp_chan *chan)
-{
-       nvkm_gsp_rm_free(&chan->rm.object);
-}
-
-static int
-r535_chan_push(struct nvkm_disp_chan *chan)
-{
-       struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
-       NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
-                                   NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
-                                   sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       if (chan->memory) {
-               switch (nvkm_memory_target(chan->memory)) {
-               case NVKM_MEM_TARGET_NCOH:
-                       ctrl->addressSpace = ADDR_SYSMEM;
-                       ctrl->cacheSnoop = 0;
-                       break;
-               case NVKM_MEM_TARGET_HOST:
-                       ctrl->addressSpace = ADDR_SYSMEM;
-                       ctrl->cacheSnoop = 1;
-                       break;
-               case NVKM_MEM_TARGET_VRAM:
-                       ctrl->addressSpace = ADDR_FBMEM;
-                       break;
-               default:
-                       WARN_ON(1);
-                       return -EINVAL;
-               }
-
-               ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
-               ctrl->limit = nvkm_memory_size(chan->memory) - 1;
-       }
-
-       ctrl->hclass = chan->object.oclass;
-       ctrl->channelInstance = chan->head;
-       ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
-
-       return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
-}
-
-static int
-r535_curs_init(struct nvkm_disp_chan *chan)
-{
-       NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
-       int ret;
-
-       ret = r535_chan_push(chan);
-       if (ret)
-               return ret;
-
-       args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
-                                    (chan->object.oclass << 16) | chan->head,
-                                    chan->object.oclass, sizeof(*args), &chan->rm.object);
-       if (IS_ERR(args))
-               return PTR_ERR(args);
-
-       args->channelInstance = chan->head;
-
-       return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
-}
-
-static const struct nvkm_disp_chan_func
-r535_curs_func = {
-       .init = r535_curs_init,
-       .fini = r535_chan_fini,
-       .intr = r535_chan_intr,
-       .user = r535_chan_user,
-};
-
-static const struct nvkm_disp_chan_user
-r535_curs = {
-       .func = &r535_curs_func,
-       .user = 73,
-};
-
-static int
-r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
-{
-       return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
-                                chan->chid.user << 25 |
-                                (chan->disp->rm.client.object.handle & 0x3fff));
-}
-
-static void
-r535_dmac_fini(struct nvkm_disp_chan *chan)
-{
-       struct nvkm_device *device = chan->disp->engine.subdev.device;
-       const u32 uoff = (chan->chid.user - 1) * 0x1000;
-
-       chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
-       r535_chan_fini(chan);
-}
-
-static int
-r535_dmac_init(struct nvkm_disp_chan *chan)
-{
-       NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
-       int ret;
-
-       ret = r535_chan_push(chan);
-       if (ret)
-               return ret;
-
-       args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
-                                    (chan->object.oclass << 16) | chan->head,
-                                    chan->object.oclass, sizeof(*args), &chan->rm.object);
-       if (IS_ERR(args))
-               return PTR_ERR(args);
-
-       args->channelInstance = chan->head;
-       args->offset = chan->suspend_put;
-
-       return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
-}
-
-static int
-r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory)
-{
-       chan->memory = nvkm_umem_search(chan->object.client, memory);
-       if (IS_ERR(chan->memory))
-               return PTR_ERR(chan->memory);
-
-       return 0;
-}
-
-static const struct nvkm_disp_chan_func
-r535_dmac_func = {
-       .push = r535_dmac_push,
-       .init = r535_dmac_init,
-       .fini = r535_dmac_fini,
-       .intr = r535_chan_intr,
-       .user = r535_chan_user,
-       .bind = r535_dmac_bind,
-};
-
-static const struct nvkm_disp_chan_func
-r535_wimm_func = {
-       .push = r535_dmac_push,
-       .init = r535_dmac_init,
-       .fini = r535_dmac_fini,
-       .intr = r535_chan_intr,
-       .user = r535_chan_user,
-};
-
-static const struct nvkm_disp_chan_user
-r535_wimm = {
-       .func = &r535_wimm_func,
-       .user = 33,
-};
-
-static const struct nvkm_disp_chan_user
-r535_wndw = {
-       .func = &r535_dmac_func,
-       .user = 1,
-};
-
-static void
-r535_core_fini(struct nvkm_disp_chan *chan)
-{
-       struct nvkm_device *device = chan->disp->engine.subdev.device;
-
-       chan->suspend_put = nvkm_rd32(device, 0x680000);
-       r535_chan_fini(chan);
-}
-
-static const struct nvkm_disp_chan_func
-r535_core_func = {
-       .push = r535_dmac_push,
-       .init = r535_dmac_init,
-       .fini = r535_core_fini,
-       .intr = r535_chan_intr,
-       .user = r535_chan_user,
-       .bind = r535_dmac_bind,
-};
-
-static const struct nvkm_disp_chan_user
-r535_core = {
-       .func = &r535_core_func,
-       .user = 0,
-};
-
-static int
-r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
-{
-       struct nvkm_disp *disp = sor->disp;
-       NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
-                                   sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->displayId = BIT(sor->asy.outp->index);
-       ctrl->brightness = lvl;
-
-       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
-}
-
-static int
-r535_sor_bl_get(struct nvkm_ior *sor)
-{
-       struct nvkm_disp *disp = sor->disp;
-       NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
-       int ret, lvl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
-                                   sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->displayId = BIT(sor->asy.outp->index);
-
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ret;
-       }
-
-       lvl = ctrl->brightness;
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-       return lvl;
-}
-
-static const struct nvkm_ior_func_bl
-r535_sor_bl = {
-       .get = r535_sor_bl_get,
-       .set = r535_sor_bl_set,
-};
-
-static void
-r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size)
-{
-       struct nvkm_disp *disp = sor->disp;
-       NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
-
-       if (WARN_ON(size > sizeof(ctrl->bufferELD)))
-               return;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->displayId = BIT(sor->asy.outp->index);
-       ctrl->numELDSize = size;
-       memcpy(ctrl->bufferELD, data, size);
-       ctrl->maxFreqSupported = 0; //XXX
-       ctrl->ctrl  = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE);
-       ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE);
-       ctrl->deviceEntry = head;
-
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-}
-
-static void
-r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present)
-{
-       struct nvkm_disp *disp = sor->disp;
-       NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
-
-       if (present)
-               return;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->displayId = BIT(sor->asy.outp->index);
-       ctrl->deviceEntry = head;
-
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-}
-
-static const struct nvkm_ior_func_hda
-r535_sor_hda = {
-       .hpd = r535_sor_hda_hpd,
-       .eld = r535_sor_hda_eld,
-};
-
-static void
-r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute)
-{
-       struct nvkm_disp *disp = sor->disp;
-       NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->displayId = BIT(sor->asy.outp->index);
-       ctrl->mute = mute;
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-}
-
-static void
-r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
-{
-       struct nvkm_disp *disp = sor->disp;
-       NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl;
-
-       if (!enable)
-               r535_sor_dp_audio_mute(sor, true);
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->displayId = BIT(sor->asy.outp->index);
-       ctrl->enable = enable;
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-
-       if (enable)
-               r535_sor_dp_audio_mute(sor, false);
-}
-
-static void
-r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn)
-{
-       struct nvkm_disp *disp = sor->disp;
-       struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->head = head;
-       ctrl->sorIndex = sor->id;
-       ctrl->dpLink = sor->asy.link == 2;
-       ctrl->bEnableOverride = 1;
-       ctrl->bMST = 1;
-       ctrl->hBlankSym = 0;
-       ctrl->vBlankSym = 0;
-       ctrl->colorFormat = 0;
-       ctrl->bEnableTwoHeadOneOr = 0;
-       ctrl->singleHeadMultistreamMode = 0;
-       ctrl->MST.slotStart = slot;
-       ctrl->MST.slotEnd = slot + slot_nr - 1;
-       ctrl->MST.PBN = pbn;
-       ctrl->MST.Timeslice = aligned_pbn;
-       ctrl->MST.sendACT = 0;
-       ctrl->MST.singleHeadMSTPipeline = 0;
-       ctrl->MST.bEnableAudioOverRightPanel = 0;
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-}
-
-static int
-r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef,
-               u32 watermark, u32 hblanksym, u32 vblanksym)
-{
-       struct nvkm_disp *disp = sor->disp;
-       struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->head = head;
-       ctrl->sorIndex = sor->id;
-       ctrl->dpLink = sor->asy.link == 2;
-       ctrl->bEnableOverride = 1;
-       ctrl->bMST = 0;
-       ctrl->hBlankSym = hblanksym;
-       ctrl->vBlankSym = vblanksym;
-       ctrl->colorFormat = 0;
-       ctrl->bEnableTwoHeadOneOr = 0;
-       ctrl->SST.bEnhancedFraming = ef;
-       ctrl->SST.tuSize = 64;
-       ctrl->SST.waterMark = watermark;
-       ctrl->SST.bEnableAudioOverRightPanel = 0;
-       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
-}
-
-static const struct nvkm_ior_func_dp
-r535_sor_dp = {
-       .sst = r535_sor_dp_sst,
-       .vcpi = r535_sor_dp_vcpi,
-       .audio = r535_sor_dp_audio,
-};
-
-static void
-r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling,
-                  bool scrambling_low_rates)
-{
-       struct nvkm_outp *outp = sor->asy.outp;
-       struct nvkm_disp *disp = outp->disp;
-       NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->displayId = BIT(outp->index);
-       ctrl->caps = 0;
-       if (support)
-               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE);
-       if (scrambling)
-               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE);
-       if (scrambling_low_rates)
-               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE);
-
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-}
-
-static void
-r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute)
-{
-       struct nvkm_disp *disp = outp->disp;
-       NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->displayId = BIT(outp->index);
-       ctrl->mute = mute;
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-}
-
-static void
-r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable)
-{
-       struct nvkm_disp *disp = outp->disp;
-       NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->displayId = BIT(outp->index);
-       ctrl->transmitControl =
-               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) |
-               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) |
-               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) |
-               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) |
-               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) |
-               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO);
-       ctrl->packetSize = 10;
-       ctrl->aPacket[0] = 0x03;
-       ctrl->aPacket[1] = 0x00;
-       ctrl->aPacket[2] = 0x00;
-       ctrl->aPacket[3] = enable ? 0x10 : 0x01;
-       ctrl->aPacket[4] = 0x00;
-       ctrl->aPacket[5] = 0x00;
-       ctrl->aPacket[6] = 0x00;
-       ctrl->aPacket[7] = 0x00;
-       ctrl->aPacket[8] = 0x00;
-       ctrl->aPacket[9] = 0x00;
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-}
-
-static void
-r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable)
-{
-       struct nvkm_device *device = sor->disp->engine.subdev.device;
-       const u32 hdmi = head * 0x400;
-
-       r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable);
-       r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable);
-
-       /* General Control (GCP). */
-       nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
-       nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010);
-       nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
-}
-
-static void
-r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey)
-{
-       struct nvkm_disp *disp = sor->disp;
-       NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl;
-
-       if (!enable)
-               return;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return;
-
-       ctrl->displayId = BIT(sor->asy.outp->index);
-       ctrl->enable = enable;
-
-       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
-}
-
-static const struct nvkm_ior_func_hdmi
-r535_sor_hdmi = {
-       .ctrl = r535_sor_hdmi_ctrl,
-       .scdc = r535_sor_hdmi_scdc,
-       /*TODO: SF_USER -> KMS. */
-       .infoframe_avi = gv100_sor_hdmi_infoframe_avi,
-       .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi,
-       .audio = r535_sor_hdmi_audio,
-};
-
-static const struct nvkm_ior_func
-r535_sor = {
-       .hdmi = &r535_sor_hdmi,
-       .dp = &r535_sor_dp,
-       .hda = &r535_sor_hda,
-       .bl = &r535_sor_bl,
-};
-
-static int
-r535_sor_new(struct nvkm_disp *disp, int id)
-{
-       return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/);
-}
-
-static int
-r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
-{
-       *pmask = 0xf;
-       return 4;
-}
-
-static void
-r535_head_vblank_put(struct nvkm_head *head)
-{
-       struct nvkm_device *device = head->disp->engine.subdev.device;
-
-       nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000);
-}
-
-static void
-r535_head_vblank_get(struct nvkm_head *head)
-{
-       struct nvkm_device *device = head->disp->engine.subdev.device;
-
-       nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002);
-       nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002);
-}
-
-static void
-r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
-{
-}
-
-static const struct nvkm_head_func
-r535_head = {
-       .state = r535_head_state,
-       .vblank_get = r535_head_vblank_get,
-       .vblank_put = r535_head_vblank_put,
-};
-
-static struct nvkm_conn *
-r535_conn_new(struct nvkm_disp *disp, u32 id)
-{
-       NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl;
-       struct nvbios_connE dcbE = {};
-       struct nvkm_conn *conn;
-       int ret, index;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return (void *)ctrl;
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->displayId = BIT(id);
-
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ERR_PTR(ret);
-       }
-
-       list_for_each_entry(conn, &disp->conns, head) {
-               if (conn->index == ctrl->data[0].index) {
-                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-                       return conn;
-               }
-       }
-
-       dcbE.type = ctrl->data[0].type;
-       index = ctrl->data[0].index;
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-
-       ret = nvkm_conn_new(disp, index, &dcbE, &conn);
-       if (ret)
-               return ERR_PTR(ret);
-
-       list_add_tail(&conn->head, &disp->conns);
-       return conn;
-}
-
-static void
-r535_outp_release(struct nvkm_outp *outp)
-{
-       outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id);
-       outp->ior->asy.outp = NULL;
-       outp->ior = NULL;
-}
-
-static int
-r535_outp_acquire(struct nvkm_outp *outp, bool hda)
-{
-       struct nvkm_disp *disp = outp->disp;
-       struct nvkm_ior *ior;
-       NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
-       int ret, or;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->displayId = BIT(outp->index);
-       ctrl->sorExcludeMask = disp->rm.assigned_sors;
-       if (hda)
-               ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
-
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ret;
-       }
-
-       for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
-               if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
-                       disp->rm.assigned_sors |= BIT(or);
-                       break;
-               }
-       }
-
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-
-       if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag)))
-               return -EINVAL;
-
-       ior = nvkm_ior_find(disp, SOR, or);
-       if (WARN_ON(!ior))
-               return -EINVAL;
-
-       nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior);
-       return 0;
-}
-
-static int
-r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
-{
-       NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
-       int ret;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->head = head;
-
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ret;
-       }
-
-       *displayid = ctrl->displayId;
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-       return 0;
-}
-
-static struct nvkm_ior *
-r535_outp_inherit(struct nvkm_outp *outp)
-{
-       struct nvkm_disp *disp = outp->disp;
-       struct nvkm_head *head;
-       u32 displayid;
-       int ret;
-
-       list_for_each_entry(head, &disp->heads, head) {
-               ret = r535_disp_head_displayid(disp, head->id, &displayid);
-               if (WARN_ON(ret))
-                       return NULL;
-
-               if (displayid == BIT(outp->index)) {
-                       NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
-                       u32 id, proto;
-                       struct nvkm_ior *ior;
-
-                       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                                   NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
-                                                   sizeof(*ctrl));
-                       if (IS_ERR(ctrl))
-                               return NULL;
-
-                       ctrl->subDeviceInstance = 0;
-                       ctrl->displayId = displayid;
-
-                       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-                       if (ret) {
-                               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-                               return NULL;
-                       }
-
-                       id = ctrl->index;
-                       proto = ctrl->protocol;
-                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-
-                       ior = nvkm_ior_find(disp, SOR, id);
-                       if (WARN_ON(!ior))
-                               return NULL;
-
-                       switch (proto) {
-                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
-                               ior->arm.proto = TMDS;
-                               ior->arm.link = 1;
-                               break;
-                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
-                               ior->arm.proto = TMDS;
-                               ior->arm.link = 2;
-                               break;
-                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
-                               ior->arm.proto = TMDS;
-                               ior->arm.link = 3;
-                               break;
-                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
-                               ior->arm.proto = DP;
-                               ior->arm.link = 1;
-                               break;
-                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
-                               ior->arm.proto = DP;
-                               ior->arm.link = 2;
-                               break;
-                       default:
-                               WARN_ON(1);
-                               return NULL;
-                       }
-
-                       ior->arm.proto_evo = proto;
-                       ior->arm.head = BIT(head->id);
-                       disp->rm.assigned_sors |= BIT(ior->id);
-                       return ior;
-               }
-       }
-
-       return NULL;
-}
-
-static int
-r535_outp_dfp_get_info(struct nvkm_outp *outp)
-{
-       NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
-       struct nvkm_disp *disp = outp->disp;
-       int ret;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->displayId = BIT(outp->index);
-
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ret;
-       }
-
-       nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
-                  ctrl->displayId, ctrl->flags, ctrl->flags2);
-
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-       return 0;
-}
-
-static int
-r535_outp_detect(struct nvkm_outp *outp)
-{
-       NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
-       struct nvkm_disp *disp = outp->disp;
-       int ret;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->displayMask = BIT(outp->index);
-
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ret;
-       }
-
-       if (ctrl->displayMask & BIT(outp->index)) {
-               ret = r535_outp_dfp_get_info(outp);
-               if (ret == 0)
-                       ret = 1;
-       } else {
-               ret = 0;
-       }
-
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-       return ret;
-}
-
-static int
-r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id)
-{
-       NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl;
-       struct nvkm_disp *disp = outp->disp;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->displayId = id;
-       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
-}
-
-static int
-r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
-{
-       NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
-       struct nvkm_disp *disp = outp->disp;
-       int ret;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
-                                   sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->displayId = BIT(outp->index);
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ret;
-       }
-
-       *pid = ctrl->displayIdAssigned;
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-       return 0;
-}
-
-static int
-r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4])
-{
-       NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl;
-       struct nvkm_disp *disp = outp->disp;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->displayId = BIT(outp->index);
-       ctrl->numLanes = lanes;
-       for (int i = 0; i < lanes; i++)
-               ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA,  PREEMPHASIS, pe[i]) |
-                               NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]);
-
-       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
-}
-
-static int
-r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw)
-{
-       struct nvkm_disp *disp = outp->disp;
-       NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
-       int ret, retries;
-       u32 cmd, data;
-
-       cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
-             NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
-             NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
-       data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
-              NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
-              NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
-
-       if (mst)
-               cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
-
-       if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
-               cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
-
-       if (target == 0 &&
-            (outp->dp.dpcd[DPCD_RC02] & 0x20) &&
-           !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
-               cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
-
-       /* We should retry up to 3 times, but only if GSP asks politely */
-       for (retries = 0; retries < 3; ++retries) {
-               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL,
-                                           sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               ctrl->subDeviceInstance = 0;
-               ctrl->displayId = BIT(outp->index);
-               ctrl->retryTimeMs = 0;
-               ctrl->cmd = cmd;
-               ctrl->data = data;
-
-               ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-               if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
-                       /*
-                        * Device (likely an eDP panel) isn't ready yet, wait for the time specified
-                        * by GSP before retrying again
-                        */
-                       nvkm_debug(&disp->engine.subdev,
-                                  "Waiting %dms for GSP LT panel delay before retrying\n",
-                                  ctrl->retryTimeMs);
-                       msleep(ctrl->retryTimeMs);
-                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               } else {
-                       /* GSP didn't say to retry, or we were successful */
-                       if (ctrl->err)
-                               ret = -EIO;
-                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-                       break;
-               }
-       }
-
-       return ret;
-}
-
-static int
-r535_dp_train(struct nvkm_outp *outp, bool retrain)
-{
-       for (int target = outp->dp.lttprs; target >= 0; target--) {
-               int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst,
-                                                            outp->dp.lt.nr,
-                                                            outp->dp.lt.bw);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-r535_dp_rates(struct nvkm_outp *outp)
-{
-       NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
-       struct nvkm_disp *disp = outp->disp;
-
-       if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
-           !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
-               return 0;
-
-       if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
-               return -EINVAL;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->displayId = BIT(outp->index);
-       for (int i = 0; i < outp->dp.rates; i++)
-               ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
-
-       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
-}
-
-static int
-r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
-{
-       struct nvkm_disp *disp = outp->disp;
-       NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
-       u8 size = *psize;
-       int ret;
-       int retries;
-
-       for (retries = 0; retries < 3; ++retries) {
-               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               ctrl->subDeviceInstance = 0;
-               ctrl->displayId = BIT(outp->index);
-               ctrl->bAddrOnly = !size;
-               ctrl->cmd = type;
-               if (ctrl->bAddrOnly) {
-                       ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
-                       ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD,  I2C_MOT, FALSE);
-               }
-               ctrl->addr = addr;
-               ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
-               memcpy(ctrl->data, data, size);
-
-               ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-               if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
-                       /*
-                        * Device (likely an eDP panel) isn't ready yet, wait for the time specified
-                        * by GSP before retrying again
-                        */
-                       nvkm_debug(&disp->engine.subdev,
-                                  "Waiting %dms for GSP LT panel delay before retrying in AUX\n",
-                                  ctrl->retryTimeMs);
-                       msleep(ctrl->retryTimeMs);
-                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               } else {
-                       memcpy(data, ctrl->data, size);
-                       *psize = ctrl->size;
-                       ret = ctrl->replyType;
-                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-                       break;
-               }
-       }
-       return ret;
-}
-
-static int
-r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu)
-{
-       return 0;
-}
-
-static void
-r535_dp_release(struct nvkm_outp *outp)
-{
-       if (!outp->dp.lt.bw) {
-               if (!WARN_ON(!outp->dp.rates))
-                       outp->dp.lt.bw = outp->dp.rate[0].rate / 27000;
-               else
-                       outp->dp.lt.bw = 0x06;
-       }
-
-       outp->dp.lt.nr = 0;
-
-       r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw);
-       r535_outp_release(outp);
-}
-
-static int
-r535_dp_acquire(struct nvkm_outp *outp, bool hda)
-{
-       int ret;
-
-       ret = r535_outp_acquire(outp, hda);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static const struct nvkm_outp_func
-r535_dp = {
-       .detect = r535_outp_detect,
-       .inherit = r535_outp_inherit,
-       .acquire = r535_dp_acquire,
-       .release = r535_dp_release,
-       .dp.aux_pwr = r535_dp_aux_pwr,
-       .dp.aux_xfer = r535_dp_aux_xfer,
-       .dp.mst_id_get = r535_dp_mst_id_get,
-       .dp.mst_id_put = r535_dp_mst_id_put,
-       .dp.rates = r535_dp_rates,
-       .dp.train = r535_dp_train,
-       .dp.drive = r535_dp_drive,
-};
-
-static int
-r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
-{
-       NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
-       struct nvkm_disp *disp = outp->disp;
-       int ret = -E2BIG;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->displayId = BIT(outp->index);
-
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ret;
-       }
-
-       ret = -E2BIG;
-       if (ctrl->bufferSize <= *psize) {
-               memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
-               *psize = ctrl->bufferSize;
-               ret = 0;
-       }
-
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-       return ret;
-}
-
-static const struct nvkm_outp_func
-r535_tmds = {
-       .detect = r535_outp_detect,
-       .inherit = r535_outp_inherit,
-       .acquire = r535_outp_acquire,
-       .release = r535_outp_release,
-       .edid_get = r535_tmds_edid_get,
-};
-
-static int
-r535_outp_new(struct nvkm_disp *disp, u32 id)
-{
-       NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
-       enum nvkm_ior_proto proto;
-       struct dcb_output dcbE = {};
-       struct nvkm_conn *conn;
-       struct nvkm_outp *outp;
-       u8 locn, link = 0;
-       int ret;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                   NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->subDeviceInstance = 0;
-       ctrl->displayId = BIT(id);
-
-       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-       if (ret) {
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-               return ret;
-       }
-
-       switch (ctrl->type) {
-       case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
-               return 0;
-       case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR:
-               switch (ctrl->protocol) {
-               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
-                       proto = TMDS;
-                       link = 1;
-                       break;
-               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
-                       proto = TMDS;
-                       link = 2;
-                       break;
-               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
-                       proto = TMDS;
-                       link = 3;
-                       break;
-               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
-                       proto = DP;
-                       link = 1;
-                       break;
-               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
-                       proto = DP;
-                       link = 2;
-                       break;
-               default:
-                       WARN_ON(1);
-                       return -EINVAL;
-               }
-
-               break;
-       default:
-               WARN_ON(1);
-               return -EINVAL;
-       }
-
-       locn = ctrl->location;
-       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-
-       conn = r535_conn_new(disp, id);
-       if (IS_ERR(conn))
-               return PTR_ERR(conn);
-
-       switch (proto) {
-       case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break;
-       case   DP: dcbE.type = DCB_OUTPUT_DP; break;
-       default:
-               WARN_ON(1);
-               return -EINVAL;
-       }
-
-       dcbE.location = locn;
-       dcbE.connector = conn->index;
-       dcbE.heads = disp->head.mask;
-       dcbE.i2c_index = 0xff;
-       dcbE.link = dcbE.sorconf.link = link;
-
-       if (proto == TMDS) {
-               ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp);
-               if (ret)
-                       return ret;
-       } else {
-               NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
-               bool mst, wm;
-
-               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                           NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               ctrl->sorIndex = ~0;
-
-               ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
-               if (ret) {
-                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-                       return ret;
-               }
-
-               switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
-               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
-                       dcbE.dpconf.link_bw = 0x06;
-                       break;
-               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
-                       dcbE.dpconf.link_bw = 0x0a;
-                       break;
-               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
-                       dcbE.dpconf.link_bw = 0x14;
-                       break;
-               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
-                       dcbE.dpconf.link_bw = 0x1e;
-                       break;
-               default:
-                       dcbE.dpconf.link_bw = 0x00;
-                       break;
-               }
-
-               mst = ctrl->bIsMultistreamSupported;
-               wm = ctrl->bHasIncreasedWatermarkLimits;
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-
-               if (WARN_ON(!dcbE.dpconf.link_bw))
-                       return -EINVAL;
-
-               dcbE.dpconf.link_nr = 4;
-
-               ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp);
-               if (ret)
-                       return ret;
-
-               outp->dp.mst = mst;
-               outp->dp.increased_wm = wm;
-       }
-
-
-       outp->conn = conn;
-       list_add_tail(&outp->head, &disp->outps);
-       return 0;
-}
-
-static void
-r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc)
-{
-       struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq);
-       Nv2080DpIrqNotification *irq = repv;
-
-       if (WARN_ON(repc < sizeof(*irq)))
-               return;
-
-       nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId);
-
-       if (irq->displayId)
-               nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ);
-}
-
-static void
-r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc)
-{
-       struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd);
-       Nv2080HotplugNotification *hpd = repv;
-
-       if (WARN_ON(repc < sizeof(*hpd)))
-               return;
-
-       nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n",
-                  hpd->plugDisplayMask, hpd->unplugDisplayMask);
-
-       for (int i = 0; i < 31; i++) {
-               u32 mask = 0;
-
-               if (hpd->plugDisplayMask & BIT(i))
-                       mask |= NVKM_DPYID_PLUG;
-               if (hpd->unplugDisplayMask & BIT(i))
-                       mask |= NVKM_DPYID_UNPLUG;
-
-               if (mask)
-                       nvkm_event_ntfy(&disp->rm.event, i, mask);
-       }
-}
-
-static const struct nvkm_event_func
-r535_disp_event = {
-};
-
-static void
-r535_disp_intr_head_timing(struct nvkm_disp *disp, int head)
-{
-       struct nvkm_subdev *subdev = &disp->engine.subdev;
-       struct nvkm_device *device = subdev->device;
-       u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04));
-
-       if (stat & 0x00000002) {
-               nvkm_disp_vblank(disp, head);
-
-               nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002);
-       }
-}
-
-static irqreturn_t
-r535_disp_intr(struct nvkm_inth *inth)
-{
-       struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth);
-       struct nvkm_subdev *subdev = &disp->engine.subdev;
-       struct nvkm_device *device = subdev->device;
-       unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff;
-       int head;
-
-       for_each_set_bit(head, &mask, 8)
-               r535_disp_intr_head_timing(disp, head);
-
-       return IRQ_HANDLED;
-}
-
-static void
-r535_disp_fini(struct nvkm_disp *disp, bool suspend)
-{
-       if (!disp->engine.subdev.use.enabled)
-               return;
-
-       nvkm_gsp_rm_free(&disp->rm.object);
-
-       if (!suspend) {
-               nvkm_gsp_event_dtor(&disp->rm.irq);
-               nvkm_gsp_event_dtor(&disp->rm.hpd);
-               nvkm_event_fini(&disp->rm.event);
-
-               nvkm_gsp_rm_free(&disp->rm.objcom);
-               nvkm_gsp_device_dtor(&disp->rm.device);
-               nvkm_gsp_client_dtor(&disp->rm.client);
-       }
-}
-
-static int
-r535_disp_init(struct nvkm_disp *disp)
-{
-       int ret;
-
-       ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16,
-                               disp->func->root.oclass, 0, &disp->rm.object);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-r535_disp_oneinit(struct nvkm_disp *disp)
-{
-       struct nvkm_device *device = disp->engine.subdev.device;
-       struct nvkm_gsp *gsp = device->gsp;
-       NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
-       int ret, i;
-
-       /* RAMIN. */
-       ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
-       if (ret)
-               return ret;
-
-       if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM))
-               return -EINVAL;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
-                                   NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM,
-                                   sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory);
-       ctrl->instMemSize = nvkm_memory_size(disp->inst->memory);
-       ctrl->instMemAddrSpace = ADDR_FBMEM;
-       ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED;
-
-       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
-       if (ret)
-               return ret;
-
-       /* OBJs. */
-       ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device);
-       if (ret)
-               return ret;
-
-       ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
-                               &disp->rm.objcom);
-       if (ret)
-               return ret;
-
-       {
-               NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
-
-               ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
-                                          NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
-                                          sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               disp->wndw.mask = ctrl->windowPresentMask;
-               disp->wndw.nr = fls(disp->wndw.mask);
-               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
-       }
-
-       /* */
-       {
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-               NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl;
-               struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice;
-
-               ctrl = nvkm_gsp_rm_ctrl_get(subdevice,
-                                           NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD,
-                                           sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */
-
-               {
-                       const guid_t NBCI_DSM_GUID =
-                               GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7,
-                                         0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44);
-                       u64 NBCI_DSM_REV = 0x00000102;
-                       const guid_t NVHG_DSM_GUID =
-                               GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
-                                         0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
-                       u64 NVHG_DSM_REV = 0x00000102;
-                       acpi_handle handle = ACPI_HANDLE(device->dev);
-
-                       if (handle && acpi_has_method(handle, "_DSM")) {
-                               bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV,
-                                                          1ULL << 0x00000014);
-                               bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
-                                                          1ULL << 0x00000014);
-
-                               if (nbci || nvhg) {
-                                       union acpi_object argv4 = {
-                                               .buffer.type    = ACPI_TYPE_BUFFER,
-                                               .buffer.length  = sizeof(ctrl->backLightData),
-                                               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
-                                       }, *obj;
-
-                                       obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID,
-                                                               0x00000102, 0x14, &argv4);
-                                       if (!obj) {
-                                               acpi_handle_info(handle, "failed to evaluate _DSM\n");
-                                       } else {
-                                               for (int i = 0; i < obj->package.count; i++) {
-                                                       union acpi_object *elt = &obj->package.elements[i];
-                                                       u32 size;
-
-                                                       if (elt->integer.value & ~0xffffffffULL)
-                                                               size = 8;
-                                                       else
-                                                               size = 4;
-
-                                                       memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
-                                                       ctrl->backLightDataSize += size;
-                                               }
-
-                                               ctrl->status = 0;
-                                               ACPI_FREE(obj);
-                                       }
-
-                                       kfree(argv4.buffer.pointer);
-                               }
-                       }
-               }
-
-               ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl);
-               if (ret)
-                       return ret;
-#endif
-       }
-
-       /* */
-       {
-               NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl;
-
-               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
-                                           NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT,
-                                           sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
-               if (ret)
-                       return ret;
-       }
-
-       /* */
-       {
-               NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl;
-
-               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
-                                          NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               disp->head.nr = ctrl->numHeads;
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-       }
-
-       /* */
-       {
-               NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl;
-
-               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
-                                          NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK,
-                                          sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               disp->head.mask = ctrl->headMask;
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-
-               for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
-                       ret = nvkm_head_new_(&r535_head, disp, i);
-                       if (ret)
-                               return ret;
-               }
-       }
-
-       disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask);
-       nvkm_debug(&disp->engine.subdev, "   SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
-       for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
-               ret = disp->func->sor.new(disp, i);
-               if (ret)
-                       return ret;
-       }
-
-       /* */
-       {
-               NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
-               unsigned long mask;
-               int i;
-
-               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
-                                          NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               mask = ctrl->displayMask;
-               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
-
-               for_each_set_bit(i, &mask, 32) {
-                       ret = r535_outp_new(disp, i);
-                       if (ret)
-                               return ret;
-               }
-       }
-
-       ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
-       if (WARN_ON(ret))
-               return ret;
-
-       ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG,
-                                        r535_disp_hpd, &disp->rm.hpd);
-       if (ret)
-               return ret;
-
-       ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ,
-                                        r535_disp_irq, &disp->rm.irq);
-       if (ret)
-               return ret;
-
-       /* RAMHT. */
-       ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size :
-                            0x1000, 0, disp->inst, &disp->ramht);
-       if (ret)
-               return ret;
-
-       ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst);
-       if (ret < 0)
-               return ret;
-
-       ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev,
-                           r535_disp_intr, &disp->engine.subdev.inth);
-       if (ret)
-               return ret;
-
-       nvkm_inth_allow(&disp->engine.subdev.inth);
-       return 0;
-}
-
-static void
-r535_disp_dtor(struct nvkm_disp *disp)
-{
-       kfree(disp->func);
-}
-
-int
-r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
-             enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
-{
-       struct nvkm_disp_func *rm;
-       int ret;
-
-       if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_disp_dtor;
-       rm->oneinit = r535_disp_oneinit;
-       rm->init = r535_disp_init;
-       rm->fini = r535_disp_fini;
-       rm->uevent = hw->uevent;
-       rm->sor.cnt = r535_sor_cnt;
-       rm->sor.new = r535_sor_new;
-       rm->ramht_size = hw->ramht_size;
-
-       rm->root = hw->root;
-
-       for (int i = 0; hw->user[i].ctor; i++) {
-               switch (hw->user[i].base.oclass & 0xff) {
-               case 0x73: rm->user[i] = hw->user[i]; break;
-               case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
-               case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
-               case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
-               case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
-               default:
-                       WARN_ON(1);
-                       continue;
-               }
-       }
-
-       ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
-       if (ret)
-               kfree(rm);
-
-       mutex_init(&(*pdisp)->super.mutex); //XXX
-       return ret;
-}
index aff92848abfee8b346479d896dfcb10ffdc9fbff..5a074b9970abe6612e0ee93ddfc32f1939c9701d 100644 (file)
@@ -26,7 +26,5 @@ nvkm-y += nvkm/engine/fifo/tu102.o
 nvkm-y += nvkm/engine/fifo/ga100.o
 nvkm-y += nvkm/engine/fifo/ga102.o
 
-nvkm-y += nvkm/engine/fifo/r535.o
-
 nvkm-y += nvkm/engine/fifo/ucgrp.o
 nvkm-y += nvkm/engine/fifo/uchan.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
deleted file mode 100644 (file)
index 129f274..0000000
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-#include "cgrp.h"
-#include "chan.h"
-#include "chid.h"
-#include "runl.h"
-
-#include <core/gpuobj.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu.h>
-#include <subdev/vfn.h>
-#include <engine/gr.h>
-
-#include <nvhw/drf.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
-
-static u32
-r535_chan_doorbell_handle(struct nvkm_chan *chan)
-{
-       return (chan->cgrp->runl->id << 16) | chan->id;
-}
-
-static void
-r535_chan_stop(struct nvkm_chan *chan)
-{
-}
-
-static void
-r535_chan_start(struct nvkm_chan *chan)
-{
-}
-
-static void
-r535_chan_ramfc_clear(struct nvkm_chan *chan)
-{
-       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
-
-       nvkm_gsp_rm_free(&chan->rm.object);
-
-       dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size,
-                         chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr);
-
-       nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx);
-}
-
-#define CHID_PER_USERD 8
-
-static int
-r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
-{
-       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
-       struct nvkm_engn *engn;
-       struct nvkm_device *device = fifo->engine.subdev.device;
-       NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
-       const int userd_p = chan->id / CHID_PER_USERD;
-       const int userd_i = chan->id % CHID_PER_USERD;
-       u32 eT = ~0;
-       int ret;
-
-       if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
-               ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
-               if (ret)
-                       return ret;
-       }
-
-       nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
-               eT = engn->id;
-               break;
-       }
-
-       if (WARN_ON(eT == ~0))
-               return -EINVAL;
-
-       chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
-                                                 fifo->rm.mthdbuf_size,
-                                                 &chan->rm.mthdbuf.addr, GFP_KERNEL);
-       if (!chan->rm.mthdbuf.ptr)
-               return -ENOMEM;
-
-       args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
-                                    fifo->func->chan.user.oclass, sizeof(*args),
-                                    &chan->rm.object);
-       if (WARN_ON(IS_ERR(args)))
-               return PTR_ERR(args);
-
-       args->gpFifoOffset = offset;
-       args->gpFifoEntries = length / 8;
-
-       args->flags  = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
-       args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
-       args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
-       if (!priv)
-               args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
-       else
-               args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
-       args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
-
-       args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
-       args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
-
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
-       args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
-       args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
-
-       args->hVASpace = chan->vmm->rm.object.handle;
-       args->engineType = eT;
-
-       args->instanceMem.base = chan->inst->addr;
-       args->instanceMem.size = chan->inst->size;
-       args->instanceMem.addressSpace = 2;
-       args->instanceMem.cacheAttrib = 1;
-
-       args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
-       args->userdMem.size = fifo->func->chan.func->userd->size;
-       args->userdMem.addressSpace = 2;
-       args->userdMem.cacheAttrib = 1;
-
-       args->ramfcMem.base = chan->inst->addr + 0;
-       args->ramfcMem.size = 0x200;
-       args->ramfcMem.addressSpace = 2;
-       args->ramfcMem.cacheAttrib = 1;
-
-       args->mthdbufMem.base = chan->rm.mthdbuf.addr;
-       args->mthdbufMem.size = fifo->rm.mthdbuf_size;
-       args->mthdbufMem.addressSpace = 1;
-       args->mthdbufMem.cacheAttrib = 0;
-
-       if (!priv)
-               args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
-       else
-               args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
-       args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
-       args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
-
-       ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
-       if (ret)
-               return ret;
-
-       if (1) {
-               NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl;
-
-               if (1) {
-                       NVA06F_CTRL_BIND_PARAMS *ctrl;
-
-                       ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
-                                                   NVA06F_CTRL_CMD_BIND, sizeof(*ctrl));
-                       if (WARN_ON(IS_ERR(ctrl)))
-                               return PTR_ERR(ctrl);
-
-                       ctrl->engineType = eT;
-
-                       ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
-                       if (ret)
-                               return ret;
-               }
-
-               ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
-                                           NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl));
-               if (WARN_ON(IS_ERR(ctrl)))
-                       return PTR_ERR(ctrl);
-
-               ctrl->bEnable = 1;
-               ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
-       }
-
-       return ret;
-}
-
-static const struct nvkm_chan_func_ramfc
-r535_chan_ramfc = {
-       .write = r535_chan_ramfc_write,
-       .clear = r535_chan_ramfc_clear,
-       .devm = 0xfff,
-       .priv = true,
-};
-
-static const struct nvkm_chan_func
-r535_chan = {
-       .inst = &gf100_chan_inst,
-       .userd = &gv100_chan_userd,
-       .ramfc = &r535_chan_ramfc,
-       .start = r535_chan_start,
-       .stop = r535_chan_stop,
-       .doorbell_handle = r535_chan_doorbell_handle,
-};
-
-static const struct nvkm_cgrp_func
-r535_cgrp = {
-};
-
-static int
-r535_engn_nonstall(struct nvkm_engn *engn)
-{
-       struct nvkm_subdev *subdev = &engn->engine->subdev;
-       int ret;
-
-       ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
-       WARN_ON(ret == -ENOENT);
-       return ret;
-}
-
-static const struct nvkm_engn_func
-r535_ce = {
-       .nonstall = r535_engn_nonstall,
-};
-
-static int
-r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
-{
-       /* RM requires GR context buffers to remain mapped until after the
-        * channel has been destroyed (as opposed to after the last gr obj
-        * has been deleted).
-        *
-        * Take an extra ref here, which will be released once the channel
-        * object has been deleted.
-        */
-       refcount_inc(&vctx->refs);
-       chan->rm.grctx = vctx;
-       return 0;
-}
-
-static const struct nvkm_engn_func
-r535_gr = {
-       .nonstall = r535_engn_nonstall,
-       .ctor2 = r535_gr_ctor,
-};
-
-static int
-r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
-{
-       struct nvkm_gsp_client *client = &chan->vmm->rm.client;
-       NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice,
-                                   NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->hClient = client->object.handle;
-       ctrl->hObject = chan->rm.object.handle;
-       ctrl->hChanClient = client->object.handle;
-       ctrl->virtAddress = vctx->vma->addr;
-       ctrl->size = vctx->inst->size;
-       ctrl->engineType = engn->id;
-       ctrl->ChID = chan->id;
-
-       return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl);
-}
-
-static int
-r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
-{
-       int ret;
-
-       if (WARN_ON(!engn->rm.size))
-               return -EINVAL;
-
-       ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL,
-                             &vctx->inst);
-       if (ret)
-               return ret;
-
-       ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
-       if (ret)
-               return ret;
-
-       ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
-       if (ret)
-               return ret;
-
-       return r535_flcn_bind(engn, vctx, chan);
-}
-
-static const struct nvkm_engn_func
-r535_flcn = {
-       .nonstall = r535_engn_nonstall,
-       .ctor2 = r535_flcn_ctor,
-};
-
-static void
-r535_runl_allow(struct nvkm_runl *runl, u32 engm)
-{
-}
-
-static void
-r535_runl_block(struct nvkm_runl *runl, u32 engm)
-{
-}
-
-static const struct nvkm_runl_func
-r535_runl = {
-       .block = r535_runl_block,
-       .allow = r535_runl_allow,
-};
-
-static int
-r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
-{
-       switch (type) {
-       case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
-       case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
-       case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
-       case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
-       case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
-       case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
-       case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
-       case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
-       default:
-               break;
-       }
-
-       WARN_ON(1);
-       return -EINVAL;
-}
-
-static int
-r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
-{
-       switch (rm) {
-       case RM_ENGINE_TYPE_GR0:
-               *ptype = NVKM_ENGINE_GR;
-               return 0;
-       case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
-               *ptype = NVKM_ENGINE_CE;
-               return rm - RM_ENGINE_TYPE_COPY0;
-       case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
-               *ptype = NVKM_ENGINE_NVDEC;
-               return rm - RM_ENGINE_TYPE_NVDEC0;
-       case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
-               *ptype = NVKM_ENGINE_NVENC;
-               return rm - RM_ENGINE_TYPE_NVENC0;
-       case RM_ENGINE_TYPE_SW:
-               *ptype = NVKM_ENGINE_SW;
-               return 0;
-       case RM_ENGINE_TYPE_SEC2:
-               *ptype = NVKM_ENGINE_SEC2;
-               return 0;
-       case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
-               *ptype = NVKM_ENGINE_NVJPG;
-               return rm - RM_ENGINE_TYPE_NVJPEG0;
-       case RM_ENGINE_TYPE_OFA:
-               *ptype = NVKM_ENGINE_OFA;
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int
-r535_fifo_ectx_size(struct nvkm_fifo *fifo)
-{
-       NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
-       struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
-       struct nvkm_runl *runl;
-       struct nvkm_engn *engn;
-
-       ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
-                                  NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO,
-                                  sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return PTR_ERR(ctrl);
-
-       for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
-               nvkm_runl_foreach(runl, fifo) {
-                       nvkm_runl_foreach_engn(engn, runl) {
-                               if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
-                                       engn->rm.size =
-                                               ctrl->constructedFalconsTable[i].ctxBufferSize;
-                                       break;
-                               }
-                       }
-               }
-       }
-
-       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
-       return 0;
-}
-
-static int
-r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
-{
-       struct nvkm_subdev *subdev = &fifo->engine.subdev;
-       struct nvkm_gsp *gsp = subdev->device->gsp;
-       struct nvkm_runl *runl;
-       struct nvkm_engn *engn;
-       u32 cgids = 2048;
-       u32 chids = 2048;
-       int ret;
-       NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
-
-       if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
-           (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
-               return ret;
-
-       ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
-                                  NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return PTR_ERR(ctrl);
-
-       for (int i = 0; i < ctrl->numEntries; i++) {
-               const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
-               const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
-
-               runl = nvkm_runl_get(fifo, id, addr);
-               if (!runl) {
-                       runl = nvkm_runl_new(fifo, id, addr, 0);
-                       if (WARN_ON(IS_ERR(runl)))
-                               continue;
-               }
-       }
-
-       for (int i = 0; i < ctrl->numEntries; i++) {
-               const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
-               const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE];
-               const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
-               enum nvkm_subdev_type type;
-               int inst, nv2080;
-
-               runl = nvkm_runl_get(fifo, id, addr);
-               if (!runl)
-                       continue;
-
-               inst = r535_fifo_engn_type(rmid, &type);
-               if (inst < 0) {
-                       nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
-                       nvkm_runl_del(runl);
-                       continue;
-               }
-
-               nv2080 = r535_fifo_2080_type(type, inst);
-               if (nv2080 < 0) {
-                       nvkm_runl_del(runl);
-                       continue;
-               }
-
-               switch (type) {
-               case NVKM_ENGINE_CE:
-                       engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
-                       break;
-               case NVKM_ENGINE_GR:
-                       engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
-                       break;
-               case NVKM_ENGINE_NVDEC:
-               case NVKM_ENGINE_NVENC:
-               case NVKM_ENGINE_NVJPG:
-               case NVKM_ENGINE_OFA:
-                       engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst);
-                       break;
-               case NVKM_ENGINE_SW:
-                       continue;
-               default:
-                       engn = NULL;
-                       break;
-               }
-
-               if (!engn) {
-                       nvkm_runl_del(runl);
-                       continue;
-               }
-
-               engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC];
-       }
-
-       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
-
-       {
-               NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl;
-
-               ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
-                                          NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE,
-                                          sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               fifo->rm.mthdbuf_size = ctrl->size;
-
-               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
-       }
-
-       return r535_fifo_ectx_size(fifo);
-}
-
-static void
-r535_fifo_dtor(struct nvkm_fifo *fifo)
-{
-       kfree(fifo->func);
-}
-
-int
-r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
-             enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
-{
-       struct nvkm_fifo_func *rm;
-
-       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_fifo_dtor;
-       rm->runl_ctor = r535_fifo_runl_ctor;
-       rm->runl = &r535_runl;
-       rm->cgrp = hw->cgrp;
-       rm->cgrp.func = &r535_cgrp;
-       rm->chan = hw->chan;
-       rm->chan.func = &r535_chan;
-       rm->nonstall = &ga100_fifo_nonstall;
-       rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
-
-       return nvkm_fifo_new_(rm, device, type, inst, pfifo);
-}
index 1555f8c40b4f3d93b6cb9453759211df5704b438..487fcc14b9a93449dca4b549aff03aaca2e99bb7 100644 (file)
@@ -43,8 +43,6 @@ nvkm-y += nvkm/engine/gr/tu102.o
 nvkm-y += nvkm/engine/gr/ga102.o
 nvkm-y += nvkm/engine/gr/ad102.o
 
-nvkm-y += nvkm/engine/gr/r535.o
-
 nvkm-y += nvkm/engine/gr/ctxnv40.o
 nvkm-y += nvkm/engine/gr/ctxnv50.o
 nvkm-y += nvkm/engine/gr/ctxgf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
deleted file mode 100644 (file)
index f4bed3e..0000000
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gf100.h"
-
-#include <core/memory.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu/vmm.h>
-#include <engine/fifo/priv.h>
-
-#include <nvif/if900d.h>
-
-#include <nvhw/drf.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-
-#define r535_gr(p) container_of((p), struct r535_gr, base)
-
-#define R515_GR_MAX_CTXBUFS 9
-
-struct r535_gr {
-       struct nvkm_gr base;
-
-       struct {
-               u16 bufferId;
-               u32 size;
-               u8  page;
-               u8  align;
-               bool global;
-               bool init;
-               bool ro;
-       } ctxbuf[R515_GR_MAX_CTXBUFS];
-       int ctxbuf_nr;
-
-       struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_chan {
-       struct nvkm_object object;
-       struct r535_gr *gr;
-
-       struct nvkm_vmm *vmm;
-       struct nvkm_chan *chan;
-
-       struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
-       struct nvkm_vma    *vma[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_obj {
-       struct nvkm_object object;
-       struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_gr_obj_dtor(struct nvkm_object *object)
-{
-       struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
-
-       nvkm_gsp_rm_free(&obj->rm);
-       return obj;
-}
-
-static const struct nvkm_object_func
-r535_gr_obj = {
-       .dtor = r535_gr_obj_dtor,
-};
-
-static int
-r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
-                struct nvkm_object **pobject)
-{
-       struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
-       struct r535_gr_obj *obj;
-
-       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
-               return -ENOMEM;
-
-       nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
-       *pobject = &obj->object;
-
-       return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
-                                &obj->rm);
-}
-
-static void *
-r535_gr_chan_dtor(struct nvkm_object *object)
-{
-       struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
-       struct r535_gr *gr = grc->gr;
-
-       for (int i = 0; i < gr->ctxbuf_nr; i++) {
-               nvkm_vmm_put(grc->vmm, &grc->vma[i]);
-               nvkm_memory_unref(&grc->mem[i]);
-       }
-
-       nvkm_vmm_unref(&grc->vmm);
-       return grc;
-}
-
-static const struct nvkm_object_func
-r535_gr_chan = {
-       .dtor = r535_gr_chan_dtor,
-};
-
-static int
-r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
-                   struct nvkm_memory **pmem, struct nvkm_vma **pvma,
-                   struct nvkm_gsp_object *chan)
-{
-       struct nvkm_subdev *subdev = &gr->base.engine.subdev;
-       struct nvkm_device *device = subdev->device;
-       NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
-                                   NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
-       if (WARN_ON(IS_ERR(ctrl)))
-               return PTR_ERR(ctrl);
-
-       ctrl->engineType = 1;
-       ctrl->hChanClient = vmm->rm.client.object.handle;
-       ctrl->hObject = chan->handle;
-
-       for (int i = 0; i < gr->ctxbuf_nr; i++) {
-               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
-                       &ctrl->promoteEntry[ctrl->entryCount];
-               const bool alloc = golden || !gr->ctxbuf[i].global;
-               int ret;
-
-               entry->bufferId = gr->ctxbuf[i].bufferId;
-               entry->bInitialize = gr->ctxbuf[i].init && alloc;
-
-               if (alloc) {
-                       ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
-                                             NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
-                                             gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
-                                             gr->ctxbuf[i].init, &pmem[i]);
-                       if (WARN_ON(ret))
-                               return ret;
-
-                       if (gr->ctxbuf[i].bufferId ==
-                                       NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
-                               entry->bNonmapped = 1;
-               } else {
-                       if (gr->ctxbuf[i].bufferId ==
-                               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
-                               continue;
-
-                       pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
-               }
-
-               if (!entry->bNonmapped) {
-                       struct gf100_vmm_map_v0 args = {
-                               .priv = 1,
-                               .ro   = gr->ctxbuf[i].ro,
-                       };
-
-                       mutex_lock(&vmm->mutex.vmm);
-                       ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
-                                                 nvkm_memory_size(pmem[i]), &pvma[i]);
-                       mutex_unlock(&vmm->mutex.vmm);
-                       if (ret)
-                               return ret;
-
-                       ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
-                       if (ret)
-                               return ret;
-
-                       entry->gpuVirtAddr = pvma[i]->addr;
-               }
-
-               if (entry->bInitialize) {
-                       entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
-                       entry->size = gr->ctxbuf[i].size;
-                       entry->physAttr = 4;
-               }
-
-               nvkm_debug(subdev,
-                          "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
-                          entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
-                          entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
-
-               ctrl->entryCount++;
-       }
-
-       return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
-}
-
-static int
-r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
-                struct nvkm_object **pobject)
-{
-       struct r535_gr *gr = r535_gr(base);
-       struct r535_gr_chan *grc;
-       int ret;
-
-       if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
-               return -ENOMEM;
-
-       nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
-       grc->gr = gr;
-       grc->vmm = nvkm_vmm_ref(chan->vmm);
-       grc->chan = chan;
-       *pobject = &grc->object;
-
-       ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static u64
-r535_gr_units(struct nvkm_gr *gr)
-{
-       struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
-
-       return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
-}
-
-static int
-r535_gr_oneinit(struct nvkm_gr *base)
-{
-       NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
-       struct r535_gr *gr = container_of(base, typeof(*gr), base);
-       struct nvkm_subdev *subdev = &gr->base.engine.subdev;
-       struct nvkm_device *device = subdev->device;
-       struct nvkm_gsp *gsp = device->gsp;
-       struct nvkm_mmu *mmu = device->mmu;
-       struct {
-               struct nvkm_memory *inst;
-               struct nvkm_vmm *vmm;
-               struct nvkm_gsp_object chan;
-               struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
-       } golden = {};
-       int ret;
-
-       /* Allocate a channel to use for golden context init. */
-       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
-       if (ret)
-               goto done;
-
-       ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
-       if (ret)
-               goto done;
-
-       ret = mmu->func->promote_vmm(golden.vmm);
-       if (ret)
-               goto done;
-
-       {
-               NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
-
-               args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
-                                            device->fifo->func->chan.user.oclass,
-                                            sizeof(*args), &golden.chan);
-               if (IS_ERR(args)) {
-                       ret = PTR_ERR(args);
-                       goto done;
-               }
-
-               args->gpFifoOffset = 0;
-               args->gpFifoEntries = 0x1000 / 8;
-               args->flags =
-                       NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
-                       NVDEF(NVOS04, FLAGS, VPR, FALSE) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
-                       NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
-                       NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
-                       NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
-                       NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
-                       NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
-                       NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
-                       NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
-                       NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
-                       NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
-                       NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
-               args->hVASpace = golden.vmm->rm.object.handle;
-               args->engineType = 1;
-               args->instanceMem.base = nvkm_memory_addr(golden.inst);
-               args->instanceMem.size = 0x1000;
-               args->instanceMem.addressSpace = 2;
-               args->instanceMem.cacheAttrib = 1;
-               args->ramfcMem.base = nvkm_memory_addr(golden.inst);
-               args->ramfcMem.size = 0x200;
-               args->ramfcMem.addressSpace = 2;
-               args->ramfcMem.cacheAttrib = 1;
-               args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
-               args->userdMem.size = 0x200;
-               args->userdMem.addressSpace = 2;
-               args->userdMem.cacheAttrib = 1;
-               args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
-               args->mthdbufMem.size = 0x5000;
-               args->mthdbufMem.addressSpace = 2;
-               args->mthdbufMem.cacheAttrib = 1;
-               args->internalFlags =
-                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
-                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
-                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
-
-               ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
-               if (ret)
-                       goto done;
-       }
-
-       /* Fetch context buffer info from RM and allocate each of them here to use
-        * during golden context init (or later as a global context buffer).
-        *
-        * Also build the information that'll be used to create channel contexts.
-        */
-       info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
-                                  NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
-                                  sizeof(*info));
-       if (WARN_ON(IS_ERR(info))) {
-               ret = PTR_ERR(info);
-               goto done;
-       }
-
-       for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
-               static const struct {
-                       u32     id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
-                       u32     id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
-                       bool global;
-                       bool   init;
-                       bool     ro;
-               } map[] = {
-#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
-                       .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
-                       .global = (G), .init = (I), .ro = (R) }
-#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
-                       /*                                       global   init     ro */
-                       _A(           GRAPHICS,             MAIN, false,  true, false),
-                       _B(                                PATCH, false,  true, false),
-                       _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB,  true, false, false),
-                       _B(                             PAGEPOOL,  true, false, false),
-                       _B(                         ATTRIBUTE_CB,  true, false, false),
-                       _B(                        RTV_CB_GLOBAL,  true, false, false),
-                       _B(                           FECS_EVENT,  true,  true, false),
-                       _B(                      PRIV_ACCESS_MAP,  true,  true,  true),
-#undef _B
-#undef _A
-               };
-               u32 size = info->engineContextBuffersInfo[0].engine[i].size;
-               u8 align, page;
-               int id;
-
-               for (id = 0; id < ARRAY_SIZE(map); id++) {
-                       if (map[id].id0 == i)
-                               break;
-               }
-
-               nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
-                          size, (id < ARRAY_SIZE(map)) ? "*" : "");
-               if (id >= ARRAY_SIZE(map))
-                       continue;
-
-               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
-                       size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
-
-               if      (size >= 1 << 21) page = 21;
-               else if (size >= 1 << 16) page = 16;
-               else                      page = 12;
-
-               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
-                       align = order_base_2(size);
-               else
-                       align = page;
-
-               if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
-                       continue;
-
-               gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
-               gr->ctxbuf[gr->ctxbuf_nr].size     = size;
-               gr->ctxbuf[gr->ctxbuf_nr].page     = page;
-               gr->ctxbuf[gr->ctxbuf_nr].align    = align;
-               gr->ctxbuf[gr->ctxbuf_nr].global   = map[id].global;
-               gr->ctxbuf[gr->ctxbuf_nr].init     = map[id].init;
-               gr->ctxbuf[gr->ctxbuf_nr].ro       = map[id].ro;
-               gr->ctxbuf_nr++;
-
-               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
-                       if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
-                               continue;
-
-                       gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
-                       gr->ctxbuf[gr->ctxbuf_nr].bufferId =
-                               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
-                       gr->ctxbuf_nr++;
-               }
-       }
-
-       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
-
-       /* Promote golden context to RM. */
-       ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
-       if (ret)
-               goto done;
-
-       /* Allocate 3D class on channel to trigger golden context init in RM. */
-       {
-               int i;
-
-               for (i = 0; gr->base.func->sclass[i].ctor; i++) {
-                       if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
-                               struct nvkm_gsp_object threed;
-
-                               ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
-                                                       gr->base.func->sclass[i].oclass, 0,
-                                                       &threed);
-                               if (ret)
-                                       goto done;
-
-                               nvkm_gsp_rm_free(&threed);
-                               break;
-                       }
-               }
-
-               if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
-                       ret = -EINVAL;
-                       goto done;
-               }
-       }
-
-done:
-       nvkm_gsp_rm_free(&golden.chan);
-       for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
-               nvkm_vmm_put(golden.vmm, &golden.vma[i]);
-       nvkm_vmm_unref(&golden.vmm);
-       nvkm_memory_unref(&golden.inst);
-       return ret;
-
-}
-
-static void *
-r535_gr_dtor(struct nvkm_gr *base)
-{
-       struct r535_gr *gr = r535_gr(base);
-
-       while (gr->ctxbuf_nr)
-               nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
-
-       kfree(gr->base.func);
-       return gr;
-}
-
-int
-r535_gr_new(const struct gf100_gr_func *hw,
-           struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
-       struct nvkm_gr_func *rm;
-       struct r535_gr *gr;
-       int nclass;
-
-       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
-       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_gr_dtor;
-       rm->oneinit = r535_gr_oneinit;
-       rm->units = r535_gr_units;
-       rm->chan_new = r535_gr_chan_new;
-
-       for (int i = 0; i < nclass; i++) {
-               rm->sclass[i].minver = hw->sclass[i].minver;
-               rm->sclass[i].maxver = hw->sclass[i].maxver;
-               rm->sclass[i].oclass = hw->sclass[i].oclass;
-               rm->sclass[i].ctor = r535_gr_obj_ctor;
-       }
-
-       if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
-               kfree(rm);
-               return -ENOMEM;
-       }
-
-       *pgr = &gr->base;
-
-       return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
-}
index 2b0e923cb75541f7fcad98eec9fc7cd7a95c9f3b..5cc317abc42cc0445065b161d49a042ab85a0cda 100644 (file)
@@ -5,5 +5,3 @@ nvkm-y += nvkm/engine/nvdec/tu102.o
 nvkm-y += nvkm/engine/nvdec/ga100.o
 nvkm-y += nvkm/engine/nvdec/ga102.o
 nvkm-y += nvkm/engine/nvdec/ad102.o
-
-nvkm-y += nvkm/engine/nvdec/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
deleted file mode 100644 (file)
index 75a24f3..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvdec_obj {
-       struct nvkm_object object;
-       struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvdec_obj_dtor(struct nvkm_object *object)
-{
-       struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
-
-       nvkm_gsp_rm_free(&obj->rm);
-       return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvdec_obj = {
-       .dtor = r535_nvdec_obj_dtor,
-};
-
-static int
-r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
-                struct nvkm_object **pobject)
-{
-       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
-       struct r535_nvdec_obj *obj;
-       NV_BSP_ALLOCATION_PARAMETERS *args;
-
-       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
-               return -ENOMEM;
-
-       nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
-       *pobject = &obj->object;
-
-       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
-                                    sizeof(*args), &obj->rm);
-       if (WARN_ON(IS_ERR(args)))
-               return PTR_ERR(args);
-
-       args->size = sizeof(*args);
-       args->engineInstance = oclass->engine->subdev.inst;
-
-       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvdec_dtor(struct nvkm_engine *engine)
-{
-       struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
-
-       kfree(nvdec->engine.func);
-       return nvdec;
-}
-
-int
-r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
-              enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
-{
-       struct nvkm_engine_func *rm;
-       int nclass;
-
-       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
-       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_nvdec_dtor;
-       for (int i = 0; i < nclass; i++) {
-               rm->sclass[i].minver = hw->sclass[i].minver;
-               rm->sclass[i].maxver = hw->sclass[i].maxver;
-               rm->sclass[i].oclass = hw->sclass[i].oclass;
-               rm->sclass[i].ctor = r535_nvdec_obj_ctor;
-       }
-
-       if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
-               kfree(rm);
-               return -ENOMEM;
-       }
-
-       return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
-}
index 2c1495b730f3b4fc4ad4a034a97706c8bb3f481d..3d71f2973dab02808bdab193bc378444cc167160 100644 (file)
@@ -4,5 +4,3 @@ nvkm-y += nvkm/engine/nvenc/gm107.o
 nvkm-y += nvkm/engine/nvenc/tu102.o
 nvkm-y += nvkm/engine/nvenc/ga102.o
 nvkm-y += nvkm/engine/nvenc/ad102.o
-
-nvkm-y += nvkm/engine/nvenc/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
deleted file mode 100644 (file)
index c8a2a91..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvenc_obj {
-       struct nvkm_object object;
-       struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvenc_obj_dtor(struct nvkm_object *object)
-{
-       struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
-
-       nvkm_gsp_rm_free(&obj->rm);
-       return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvenc_obj = {
-       .dtor = r535_nvenc_obj_dtor,
-};
-
-static int
-r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
-                struct nvkm_object **pobject)
-{
-       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
-       struct r535_nvenc_obj *obj;
-       NV_MSENC_ALLOCATION_PARAMETERS *args;
-
-       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
-               return -ENOMEM;
-
-       nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
-       *pobject = &obj->object;
-
-       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
-                                    sizeof(*args), &obj->rm);
-       if (WARN_ON(IS_ERR(args)))
-               return PTR_ERR(args);
-
-       args->size = sizeof(*args);
-       args->engineInstance = oclass->engine->subdev.inst;
-
-       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvenc_dtor(struct nvkm_engine *engine)
-{
-       struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
-
-       kfree(nvenc->engine.func);
-       return nvenc;
-}
-
-int
-r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
-              enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
-{
-       struct nvkm_engine_func *rm;
-       int nclass;
-
-       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
-       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_nvenc_dtor;
-       for (int i = 0; i < nclass; i++) {
-               rm->sclass[i].minver = hw->sclass[i].minver;
-               rm->sclass[i].maxver = hw->sclass[i].maxver;
-               rm->sclass[i].oclass = hw->sclass[i].oclass;
-               rm->sclass[i].ctor = r535_nvenc_obj_ctor;
-       }
-
-       if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
-               kfree(rm);
-               return -ENOMEM;
-       }
-
-       return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
-}
index 1408f664add6accc7e46e5246d4c45dd24af0ba5..1d9bddd686051359f1be06dc3f15864558a3ed43 100644 (file)
@@ -1,5 +1,3 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/engine/nvjpg/ga100.o
 nvkm-y += nvkm/engine/nvjpg/ad102.o
-
-nvkm-y += nvkm/engine/nvjpg/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
deleted file mode 100644 (file)
index 1babddc..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvjpg_obj {
-       struct nvkm_object object;
-       struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvjpg_obj_dtor(struct nvkm_object *object)
-{
-       struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
-
-       nvkm_gsp_rm_free(&obj->rm);
-       return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvjpg_obj = {
-       .dtor = r535_nvjpg_obj_dtor,
-};
-
-static int
-r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
-                   struct nvkm_object **pobject)
-{
-       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
-       struct r535_nvjpg_obj *obj;
-       NV_NVJPG_ALLOCATION_PARAMETERS *args;
-
-       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
-               return -ENOMEM;
-
-       nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
-       *pobject = &obj->object;
-
-       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
-                                    sizeof(*args), &obj->rm);
-       if (WARN_ON(IS_ERR(args)))
-               return PTR_ERR(args);
-
-       args->size = sizeof(*args);
-       args->engineInstance = oclass->engine->subdev.inst;
-
-       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvjpg_dtor(struct nvkm_engine *engine)
-{
-       kfree(engine->func);
-       return engine;
-}
-
-int
-r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
-              enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
-       struct nvkm_engine_func *rm;
-       int nclass, ret;
-
-       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
-       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_nvjpg_dtor;
-       for (int i = 0; i < nclass; i++) {
-               rm->sclass[i].minver = hw->sclass[i].minver;
-               rm->sclass[i].maxver = hw->sclass[i].maxver;
-               rm->sclass[i].oclass = hw->sclass[i].oclass;
-               rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
-       }
-
-       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
-       if (ret)
-               kfree(rm);
-
-       return ret;
-}
index 99f1713d7e517237a319c8336eb1d7ecf21e4403..3faf73b35f5abf94cfbe48a1c4df80dfa2f8c721 100644 (file)
@@ -2,5 +2,3 @@
 nvkm-y += nvkm/engine/ofa/ga100.o
 nvkm-y += nvkm/engine/ofa/ga102.o
 nvkm-y += nvkm/engine/ofa/ad102.o
-
-nvkm-y += nvkm/engine/ofa/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
deleted file mode 100644 (file)
index 438dc69..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_ofa_obj {
-       struct nvkm_object object;
-       struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ofa_obj_dtor(struct nvkm_object *object)
-{
-       struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
-
-       nvkm_gsp_rm_free(&obj->rm);
-       return obj;
-}
-
-static const struct nvkm_object_func
-r535_ofa_obj = {
-       .dtor = r535_ofa_obj_dtor,
-};
-
-static int
-r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
-                struct nvkm_object **pobject)
-{
-       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
-       struct r535_ofa_obj *obj;
-       NV_OFA_ALLOCATION_PARAMETERS *args;
-
-       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
-               return -ENOMEM;
-
-       nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
-       *pobject = &obj->object;
-
-       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
-                                    sizeof(*args), &obj->rm);
-       if (WARN_ON(IS_ERR(args)))
-               return PTR_ERR(args);
-
-       args->size = sizeof(*args);
-
-       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ofa_dtor(struct nvkm_engine *engine)
-{
-       kfree(engine->func);
-       return engine;
-}
-
-int
-r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
-            enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
-       struct nvkm_engine_func *rm;
-       int nclass, ret;
-
-       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
-       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_ofa_dtor;
-       for (int i = 0; i < nclass; i++) {
-               rm->sclass[i].minver = hw->sclass[i].minver;
-               rm->sclass[i].maxver = hw->sclass[i].maxver;
-               rm->sclass[i].oclass = hw->sclass[i].oclass;
-               rm->sclass[i].ctor = r535_ofa_obj_ctor;
-       }
-
-       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
-       if (ret)
-               kfree(rm);
-
-       return ret;
-}
index 9754c6872543cdd2c0e02509c7a17be02992ff42..8faee3317a74fbb13fc411c8266275d495ccc49a 100644 (file)
@@ -7,5 +7,3 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
 nvkm-y += nvkm/subdev/bar/gm107.o
 nvkm-y += nvkm/subdev/bar/gm20b.o
 nvkm-y += nvkm/subdev/bar/tu102.o
-
-nvkm-y += nvkm/subdev/bar/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
deleted file mode 100644 (file)
index 90186f9..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gf100.h"
-
-#include <core/mm.h>
-#include <subdev/fb.h>
-#include <subdev/gsp.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu/vmm.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-static void
-r535_bar_flush(struct nvkm_bar *bar)
-{
-       ioread32_native(bar->flushBAR2);
-}
-
-static void
-r535_bar_bar2_wait(struct nvkm_bar *base)
-{
-}
-
-static int
-r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
-{
-       rpc_update_bar_pde_v15_00 *rpc;
-
-       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc));
-       if (WARN_ON(IS_ERR_OR_NULL(rpc)))
-               return -EIO;
-
-       rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
-       rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
-       rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
-
-       return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
-}
-
-static void
-r535_bar_bar2_fini(struct nvkm_bar *bar)
-{
-       struct nvkm_gsp *gsp = bar->subdev.device->gsp;
-
-       bar->flushBAR2 = bar->flushBAR2PhysMode;
-       nvkm_done(bar->flushFBZero);
-
-       WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
-}
-
-static void
-r535_bar_bar2_init(struct nvkm_bar *bar)
-{
-       struct nvkm_device *device = bar->subdev.device;
-       struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
-       struct nvkm_gsp *gsp = device->gsp;
-
-       WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
-       vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
-
-       if (!bar->flushFBZero) {
-               struct nvkm_memory *fbZero;
-               int ret;
-
-               ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero);
-               if (ret == 0) {
-                       ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero);
-                       nvkm_memory_unref(&fbZero);
-               }
-               WARN_ON(ret);
-       }
-
-       bar->bar2 = true;
-       bar->flushBAR2 = nvkm_kmap(bar->flushFBZero);
-       WARN_ON(!bar->flushBAR2);
-}
-
-static void
-r535_bar_bar1_wait(struct nvkm_bar *base)
-{
-}
-
-static void
-r535_bar_bar1_fini(struct nvkm_bar *base)
-{
-}
-
-static void
-r535_bar_bar1_init(struct nvkm_bar *bar)
-{
-       struct nvkm_device *device = bar->subdev.device;
-       struct nvkm_gsp *gsp = device->gsp;
-       struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm;
-       struct nvkm_memory *pd3;
-       int ret;
-
-       ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3);
-       if (WARN_ON(ret))
-               return;
-
-       nvkm_memory_unref(&vmm->pd->pt[0]->memory);
-
-       ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory);
-       nvkm_memory_unref(&pd3);
-       if (WARN_ON(ret))
-               return;
-
-       vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory);
-}
-
-static void *
-r535_bar_dtor(struct nvkm_bar *bar)
-{
-       void *data = gf100_bar_dtor(bar);
-
-       nvkm_memory_unref(&bar->flushFBZero);
-
-       if (bar->flushBAR2PhysMode)
-               iounmap(bar->flushBAR2PhysMode);
-
-       kfree(bar->func);
-       return data;
-}
-
-int
-r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
-             enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
-{
-       struct nvkm_bar_func *rm;
-       struct nvkm_bar *bar;
-       int ret;
-
-       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_bar_dtor;
-       rm->oneinit = hw->oneinit;
-       rm->bar1.init = r535_bar_bar1_init;
-       rm->bar1.fini = r535_bar_bar1_fini;
-       rm->bar1.wait = r535_bar_bar1_wait;
-       rm->bar1.vmm = hw->bar1.vmm;
-       rm->bar2.init = r535_bar_bar2_init;
-       rm->bar2.fini = r535_bar_bar2_fini;
-       rm->bar2.wait = r535_bar_bar2_wait;
-       rm->bar2.vmm = hw->bar2.vmm;
-       rm->flush = r535_bar_flush;
-
-       ret = gf100_bar_new_(rm, device, type, inst, &bar);
-       if (ret) {
-               kfree(rm);
-               return ret;
-       }
-       *pbar = bar;
-
-       bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
-       if (!bar->flushBAR2PhysMode)
-               return -ENOMEM;
-
-       bar->flushBAR2 = bar->flushBAR2PhysMode;
-
-       gf100_bar(*pbar)->bar2_halve = true;
-       return 0;
-}
index af6e55603763d6f24c6d19f296d3e48fb441a9e4..ba892c111c26c0504ee5a3bd4a88aa99f000b85a 100644 (file)
@@ -9,6 +9,4 @@ nvkm-y += nvkm/subdev/gsp/ga100.o
 nvkm-y += nvkm/subdev/gsp/ga102.o
 nvkm-y += nvkm/subdev/gsp/ad102.o
 
-nvkm-y += nvkm/subdev/gsp/r535.o
-
 include $(src)/nvkm/subdev/gsp/rm/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
deleted file mode 100644 (file)
index f42879b..0000000
+++ /dev/null
@@ -1,2252 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include <rm/rpc.h>
-
-#include "priv.h"
-
-#include <core/pci.h>
-#include <subdev/timer.h>
-#include <subdev/vfn.h>
-#include <engine/fifo/chan.h>
-#include <engine/sec2.h>
-#include <nvif/log.h>
-
-#include <nvfw/fw.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
-#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
-#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
-
-#include <linux/acpi.h>
-#include <linux/ctype.h>
-#include <linux/parser.h>
-
-extern struct dentry *nouveau_debugfs_root;
-
-const struct nvkm_gsp_rm
-r535_gsp_rm = {
-       .api = &r535_rm,
-};
-
-static void
-r535_gsp_msgq_work(struct work_struct *work)
-{
-       struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work);
-
-       mutex_lock(&gsp->cmdq.mutex);
-       if (*gsp->msgq.rptr != *gsp->msgq.wptr)
-               r535_gsp_msg_recv(gsp, 0, 0);
-       mutex_unlock(&gsp->cmdq.mutex);
-}
-
-static irqreturn_t
-r535_gsp_intr(struct nvkm_inth *inth)
-{
-       struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth);
-       struct nvkm_subdev *subdev = &gsp->subdev;
-       u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008);
-       u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 +
-                                                 gsp->falcon.func->riscv_irqmask);
-       u32 stat = intr & inte;
-
-       if (!stat) {
-               nvkm_debug(subdev, "inte %08x %08x\n", intr, inte);
-               return IRQ_NONE;
-       }
-
-       if (stat & 0x00000040) {
-               nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040);
-               schedule_work(&gsp->msgq.work);
-               stat &= ~0x00000040;
-       }
-
-       if (stat) {
-               nvkm_error(subdev, "intr %08x\n", stat);
-               nvkm_falcon_wr32(&gsp->falcon, 0x014, stat);
-               nvkm_falcon_wr32(&gsp->falcon, 0x004, stat);
-       }
-
-       nvkm_falcon_intr_retrigger(&gsp->falcon);
-       return IRQ_HANDLED;
-}
-
-static int
-r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
-{
-       NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
-       int ret = 0;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
-                                   NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
-       if (WARN_ON(ret)) {
-               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
-               return ret;
-       }
-
-       for (unsigned i = 0; i < ctrl->tableLen; i++) {
-               enum nvkm_subdev_type type;
-               int inst;
-
-               nvkm_debug(&gsp->subdev,
-                          "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i,
-                          ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
-                          ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
-
-               switch (ctrl->table[i].engineIdx) {
-               case MC_ENGINE_IDX_GSP:
-                       type = NVKM_SUBDEV_GSP;
-                       inst = 0;
-                       break;
-               case MC_ENGINE_IDX_DISP:
-                       type = NVKM_ENGINE_DISP;
-                       inst = 0;
-                       break;
-               case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
-                       type = NVKM_ENGINE_CE;
-                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
-                       break;
-               case MC_ENGINE_IDX_GR0:
-                       type = NVKM_ENGINE_GR;
-                       inst = 0;
-                       break;
-               case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
-                       type = NVKM_ENGINE_NVDEC;
-                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
-                       break;
-               case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
-                       type = NVKM_ENGINE_NVENC;
-                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
-                       break;
-               case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
-                       type = NVKM_ENGINE_NVJPG;
-                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
-                       break;
-               case MC_ENGINE_IDX_OFA0:
-                       type = NVKM_ENGINE_OFA;
-                       inst = 0;
-                       break;
-               default:
-                       continue;
-               }
-
-               if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
-                       ret = -ENOSPC;
-                       break;
-               }
-
-               gsp->intr[gsp->intr_nr].type = type;
-               gsp->intr[gsp->intr_nr].inst = inst;
-               gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall;
-               gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall;
-               gsp->intr_nr++;
-       }
-
-       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
-       return ret;
-}
-
-static int
-r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
-{
-       GspStaticConfigInfo *rpc;
-       int last_usable = -1;
-
-       rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
-       if (IS_ERR(rpc))
-               return PTR_ERR(rpc);
-
-       gsp->internal.client.object.client = &gsp->internal.client;
-       gsp->internal.client.object.parent = NULL;
-       gsp->internal.client.object.handle = rpc->hInternalClient;
-       gsp->internal.client.gsp = gsp;
-
-       gsp->internal.device.object.client = &gsp->internal.client;
-       gsp->internal.device.object.parent = &gsp->internal.client.object;
-       gsp->internal.device.object.handle = rpc->hInternalDevice;
-
-       gsp->internal.device.subdevice.client = &gsp->internal.client;
-       gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
-       gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
-
-       gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
-       gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
-
-       for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
-               NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
-                       &rpc->fbRegionInfoParams.fbRegion[i];
-
-               nvkm_debug(&gsp->subdev, "fb region %d: "
-                          "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
-                          reg->base, reg->limit, reg->reserved, reg->performance,
-                          reg->supportCompressed, reg->supportISO, reg->bProtected);
-
-               if (!reg->reserved && !reg->bProtected) {
-                       if (reg->supportCompressed && reg->supportISO &&
-                           !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) {
-                                       const u64 size = (reg->limit + 1) - reg->base;
-
-                                       gsp->fb.region[gsp->fb.region_nr].addr = reg->base;
-                                       gsp->fb.region[gsp->fb.region_nr].size = size;
-                                       gsp->fb.region_nr++;
-                       }
-
-                       last_usable = i;
-               }
-       }
-
-       if (last_usable >= 0) {
-               u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
-
-               gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
-       }
-
-       for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
-               if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
-                       gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask);
-                       gsp->gr.gpcs++;
-               }
-       }
-
-       nvkm_gsp_rpc_done(gsp, rpc);
-       return 0;
-}
-
-static void
-nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
-{
-       if (mem->data) {
-               /*
-                * Poison the buffer to catch any unexpected access from
-                * GSP-RM if the buffer was prematurely freed.
-                */
-               memset(mem->data, 0xFF, mem->size);
-
-               dma_free_coherent(mem->dev, mem->size, mem->data, mem->addr);
-               put_device(mem->dev);
-
-               memset(mem, 0, sizeof(*mem));
-       }
-}
-
-/**
- * nvkm_gsp_mem_ctor - constructor for nvkm_gsp_mem objects
- * @gsp: gsp pointer
- * @size: number of bytes to allocate
- * @mem: nvkm_gsp_mem object to initialize
- *
- * Allocates a block of memory for use with GSP.
- *
- * This memory block can potentially out-live the driver's remove() callback,
- * so we take a device reference to ensure its lifetime. The reference is
- * dropped in the destructor.
- */
-static int
-nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
-{
-       mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
-       if (WARN_ON(!mem->data))
-               return -ENOMEM;
-
-       mem->size = size;
-       mem->dev = get_device(gsp->subdev.device->dev);
-
-       return 0;
-}
-
-static int
-r535_gsp_postinit(struct nvkm_gsp *gsp)
-{
-       struct nvkm_device *device = gsp->subdev.device;
-       int ret;
-
-       ret = r535_gsp_rpc_get_gsp_static_info(gsp);
-       if (WARN_ON(ret))
-               return ret;
-
-       INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work);
-
-       ret = r535_gsp_intr_get_table(gsp);
-       if (WARN_ON(ret))
-               return ret;
-
-       ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst);
-       if (WARN_ON(ret < 0))
-               return ret;
-
-       ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev,
-                           r535_gsp_intr, &gsp->subdev.inth);
-       if (WARN_ON(ret))
-               return ret;
-
-       nvkm_inth_allow(&gsp->subdev.inth);
-       nvkm_wr32(device, 0x110004, 0x00000040);
-
-       /* Release the DMA buffers that were needed only for boot and init */
-       nvkm_gsp_mem_dtor(&gsp->boot.fw);
-       nvkm_gsp_mem_dtor(&gsp->libos);
-
-       return ret;
-}
-
-static int
-r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
-{
-       rpc_unloading_guest_driver_v1F_07 *rpc;
-
-       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc));
-       if (IS_ERR(rpc))
-               return PTR_ERR(rpc);
-
-       if (suspend) {
-               rpc->bInPMTransition = 1;
-               rpc->bGc6Entering = 0;
-               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
-       } else {
-               rpc->bInPMTransition = 0;
-               rpc->bGc6Entering = 0;
-               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
-       }
-
-       return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
-}
-
-enum registry_type {
-       REGISTRY_TABLE_ENTRY_TYPE_DWORD  = 1, /* 32-bit unsigned integer */
-       REGISTRY_TABLE_ENTRY_TYPE_BINARY = 2, /* Binary blob */
-       REGISTRY_TABLE_ENTRY_TYPE_STRING = 3, /* Null-terminated string */
-};
-
-/* An arbitrary limit to the length of a registry key */
-#define REGISTRY_MAX_KEY_LENGTH                64
-
-/**
- * struct registry_list_entry - linked list member for a registry key/value
- * @head: list_head struct
- * @type: dword, binary, or string
- * @klen: the length of name of the key
- * @vlen: the length of the value
- * @key: the key name
- * @dword: the data, if REGISTRY_TABLE_ENTRY_TYPE_DWORD
- * @binary: the data, if TYPE_BINARY or TYPE_STRING
- *
- * Every registry key/value is represented internally by this struct.
- *
- * Type DWORD is a simple 32-bit unsigned integer, and its value is stored in
- * @dword.
- *
- * Types BINARY and STRING are variable-length binary blobs.  The only real
- * difference between BINARY and STRING is that STRING is null-terminated and
- * is expected to contain only printable characters.
- *
- * Note: it is technically possible to have multiple keys with the same name
- * but different types, but this is not useful since GSP-RM expects keys to
- * have only one specific type.
- */
-struct registry_list_entry {
-       struct list_head head;
-       enum registry_type type;
-       size_t klen;
-       char key[REGISTRY_MAX_KEY_LENGTH];
-       size_t vlen;
-       u32 dword;                      /* TYPE_DWORD */
-       u8 binary[] __counted_by(vlen); /* TYPE_BINARY or TYPE_STRING */
-};
-
-/**
- * add_registry -- adds a registry entry
- * @gsp: gsp pointer
- * @key: name of the registry key
- * @type: type of data
- * @data: pointer to value
- * @length: size of data, in bytes
- *
- * Adds a registry key/value pair to the registry database.
- *
- * This function collects the registry information in a linked list.  After
- * all registry keys have been added, build_registry() is used to create the
- * RPC data structure.
- *
- * registry_rpc_size is a running total of the size of all registry keys.
- * It's used to avoid an O(n) calculation of the size when the RPC is built.
- *
- * Returns 0 on success, or negative error code on error.
- */
-static int add_registry(struct nvkm_gsp *gsp, const char *key,
-                       enum registry_type type, const void *data, size_t length)
-{
-       struct registry_list_entry *reg;
-       const size_t nlen = strnlen(key, REGISTRY_MAX_KEY_LENGTH) + 1;
-       size_t alloc_size; /* extra bytes to alloc for binary or string value */
-
-       if (nlen > REGISTRY_MAX_KEY_LENGTH)
-               return -EINVAL;
-
-       alloc_size = (type == REGISTRY_TABLE_ENTRY_TYPE_DWORD) ? 0 : length;
-
-       reg = kmalloc(sizeof(*reg) + alloc_size, GFP_KERNEL);
-       if (!reg)
-               return -ENOMEM;
-
-       switch (type) {
-       case REGISTRY_TABLE_ENTRY_TYPE_DWORD:
-               reg->dword = *(const u32 *)(data);
-               break;
-       case REGISTRY_TABLE_ENTRY_TYPE_BINARY:
-       case REGISTRY_TABLE_ENTRY_TYPE_STRING:
-               memcpy(reg->binary, data, alloc_size);
-               break;
-       default:
-               nvkm_error(&gsp->subdev, "unrecognized registry type %u for '%s'\n",
-                          type, key);
-               kfree(reg);
-               return -EINVAL;
-       }
-
-       memcpy(reg->key, key, nlen);
-       reg->klen = nlen;
-       reg->vlen = length;
-       reg->type = type;
-
-       list_add_tail(&reg->head, &gsp->registry_list);
-       gsp->registry_rpc_size += sizeof(PACKED_REGISTRY_ENTRY) + nlen + alloc_size;
-
-       return 0;
-}
-
-static int add_registry_num(struct nvkm_gsp *gsp, const char *key, u32 value)
-{
-       return add_registry(gsp, key, REGISTRY_TABLE_ENTRY_TYPE_DWORD,
-                           &value, sizeof(u32));
-}
-
-static int add_registry_string(struct nvkm_gsp *gsp, const char *key, const char *value)
-{
-       return add_registry(gsp, key, REGISTRY_TABLE_ENTRY_TYPE_STRING,
-                           value, strlen(value) + 1);
-}
-
-/**
- * build_registry -- create the registry RPC data
- * @gsp: gsp pointer
- * @registry: pointer to the RPC payload to fill
- *
- * After all registry key/value pairs have been added, call this function to
- * build the RPC.
- *
- * The registry RPC looks like this:
- *
- * +-----------------+
- * |NvU32 size;      |
- * |NvU32 numEntries;|
- * +-----------------+
- * +----------------------------------------+
- * |PACKED_REGISTRY_ENTRY                   |
- * +----------------------------------------+
- * |Null-terminated key (string) for entry 0|
- * +----------------------------------------+
- * |Binary/string data value for entry 0    | (only if necessary)
- * +----------------------------------------+
- *
- * +----------------------------------------+
- * |PACKED_REGISTRY_ENTRY                   |
- * +----------------------------------------+
- * |Null-terminated key (string) for entry 1|
- * +----------------------------------------+
- * |Binary/string data value for entry 1    | (only if necessary)
- * +----------------------------------------+
- * ... (and so on, one copy for each entry)
- *
- *
- * The 'data' field of an entry is either a 32-bit integer (for type DWORD)
- * or an offset into the PACKED_REGISTRY_TABLE (for types BINARY and STRING).
- *
- * All memory allocated by add_registry() is released.
- */
-static void build_registry(struct nvkm_gsp *gsp, PACKED_REGISTRY_TABLE *registry)
-{
-       struct registry_list_entry *reg, *n;
-       size_t str_offset;
-       unsigned int i = 0;
-
-       registry->numEntries = list_count_nodes(&gsp->registry_list);
-       str_offset = struct_size(registry, entries, registry->numEntries);
-
-       list_for_each_entry_safe(reg, n, &gsp->registry_list, head) {
-               registry->entries[i].type = reg->type;
-               registry->entries[i].length = reg->vlen;
-
-               /* Append the key name to the table */
-               registry->entries[i].nameOffset = str_offset;
-               memcpy((void *)registry + str_offset, reg->key, reg->klen);
-               str_offset += reg->klen;
-
-               switch (reg->type) {
-               case REGISTRY_TABLE_ENTRY_TYPE_DWORD:
-                       registry->entries[i].data = reg->dword;
-                       break;
-               case REGISTRY_TABLE_ENTRY_TYPE_BINARY:
-               case REGISTRY_TABLE_ENTRY_TYPE_STRING:
-                       /* If the type is binary or string, also append the value */
-                       memcpy((void *)registry + str_offset, reg->binary, reg->vlen);
-                       registry->entries[i].data = str_offset;
-                       str_offset += reg->vlen;
-                       break;
-               default:
-                       break;
-               }
-
-               i++;
-               list_del(&reg->head);
-               kfree(reg);
-       }
-
-       /* Double-check that we calculated the sizes correctly */
-       WARN_ON(gsp->registry_rpc_size != str_offset);
-
-       registry->size = gsp->registry_rpc_size;
-}
-
-/**
- * clean_registry -- clean up registry memory in case of error
- * @gsp: gsp pointer
- *
- * Call this function to clean up all memory allocated by add_registry()
- * in case of error and build_registry() is not called.
- */
-static void clean_registry(struct nvkm_gsp *gsp)
-{
-       struct registry_list_entry *reg, *n;
-
-       list_for_each_entry_safe(reg, n, &gsp->registry_list, head) {
-               list_del(&reg->head);
-               kfree(reg);
-       }
-
-       gsp->registry_rpc_size = sizeof(PACKED_REGISTRY_TABLE);
-}
-
-MODULE_PARM_DESC(NVreg_RegistryDwords,
-                "A semicolon-separated list of key=integer pairs of GSP-RM registry keys");
-static char *NVreg_RegistryDwords;
-module_param(NVreg_RegistryDwords, charp, 0400);
-
-/* dword only */
-struct nv_gsp_registry_entries {
-       const char *name;
-       u32 value;
-};
-
-/*
- * r535_registry_entries - required registry entries for GSP-RM
- *
- * This array lists registry entries that are required for GSP-RM to
- * function correctly.
- *
- * RMSecBusResetEnable - enables PCI secondary bus reset
- * RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration
- *   registers on any PCI reset.
- */
-static const struct nv_gsp_registry_entries r535_registry_entries[] = {
-       { "RMSecBusResetEnable", 1 },
-       { "RMForcePcieConfigSave", 1 },
-};
-#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
-
-/**
- * strip - strips all characters in 'reject' from 's'
- * @s: string to strip
- * @reject: string of characters to remove
- *
- * 's' is modified.
- *
- * Returns the length of the new string.
- */
-static size_t strip(char *s, const char *reject)
-{
-       char *p = s, *p2 = s;
-       size_t length = 0;
-       char c;
-
-       do {
-               while ((c = *p2) && strchr(reject, c))
-                       p2++;
-
-               *p++ = c = *p2++;
-               length++;
-       } while (c);
-
-       return length;
-}
-
-/**
- * r535_gsp_rpc_set_registry - build registry RPC and call GSP-RM
- * @gsp: gsp pointer
- *
- * The GSP-RM registry is a set of key/value pairs that configure some aspects
- * of GSP-RM. The keys are strings, and the values are 32-bit integers.
- *
- * The registry is built from a combination of a static hard-coded list (see
- * above) and entries passed on the driver's command line.
- */
-static int
-r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
-{
-       PACKED_REGISTRY_TABLE *rpc;
-       unsigned int i;
-       int ret;
-
-       INIT_LIST_HEAD(&gsp->registry_list);
-       gsp->registry_rpc_size = sizeof(PACKED_REGISTRY_TABLE);
-
-       for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
-               ret = add_registry_num(gsp, r535_registry_entries[i].name,
-                                      r535_registry_entries[i].value);
-               if (ret)
-                       goto fail;
-       }
-
-       /*
-        * The NVreg_RegistryDwords parameter is a string of key=value
-        * pairs separated by semicolons. We need to extract and trim each
-        * substring, and then parse the substring to extract the key and
-        * value.
-        */
-       if (NVreg_RegistryDwords) {
-               char *p = kstrdup(NVreg_RegistryDwords, GFP_KERNEL);
-               char *start, *next = p, *equal;
-
-               if (!p) {
-                       ret = -ENOMEM;
-                       goto fail;
-               }
-
-               /* Remove any whitespace from the parameter string */
-               strip(p, " \t\n");
-
-               while ((start = strsep(&next, ";"))) {
-                       long value;
-
-                       equal = strchr(start, '=');
-                       if (!equal || equal == start || equal[1] == 0) {
-                               nvkm_error(&gsp->subdev,
-                                          "ignoring invalid registry string '%s'\n",
-                                          start);
-                               continue;
-                       }
-
-                       /* Truncate the key=value string to just key */
-                       *equal = 0;
-
-                       ret = kstrtol(equal + 1, 0, &value);
-                       if (!ret) {
-                               ret = add_registry_num(gsp, start, value);
-                       } else {
-                               /* Not a number, so treat it as a string */
-                               ret = add_registry_string(gsp, start, equal + 1);
-                       }
-
-                       if (ret) {
-                               nvkm_error(&gsp->subdev,
-                                          "ignoring invalid registry key/value '%s=%s'\n",
-                                          start, equal + 1);
-                               continue;
-                       }
-               }
-
-               kfree(p);
-       }
-
-       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, gsp->registry_rpc_size);
-       if (IS_ERR(rpc)) {
-               ret = PTR_ERR(rpc);
-               goto fail;
-       }
-
-       build_registry(gsp, rpc);
-
-       return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
-
-fail:
-       clean_registry(gsp);
-       return ret;
-}
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-static void
-r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
-{
-       const guid_t NVOP_DSM_GUID =
-               GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
-                         0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
-       u64 NVOP_DSM_REV = 0x00000100;
-       union acpi_object argv4 = {
-               .buffer.type    = ACPI_TYPE_BUFFER,
-               .buffer.length  = 4,
-               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
-       }, *obj;
-
-       caps->status = 0xffff;
-
-       if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a)))
-               return;
-
-       obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4);
-       if (!obj)
-               return;
-
-       if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
-           WARN_ON(obj->buffer.length != 4))
-               return;
-
-       caps->status = 0;
-       caps->optimusCaps = *(u32 *)obj->buffer.pointer;
-
-       ACPI_FREE(obj);
-
-       kfree(argv4.buffer.pointer);
-}
-
-static void
-r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
-{
-       const guid_t JT_DSM_GUID =
-               GUID_INIT(0xCBECA351L, 0x067B, 0x4924,
-                         0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34);
-       u64 JT_DSM_REV = 0x00000103;
-       u32 caps;
-       union acpi_object argv4 = {
-               .buffer.type    = ACPI_TYPE_BUFFER,
-               .buffer.length  = sizeof(caps),
-               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
-       }, *obj;
-
-       jt->status = 0xffff;
-
-       obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4);
-       if (!obj)
-               return;
-
-       if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
-           WARN_ON(obj->buffer.length != 4))
-               return;
-
-       jt->status = 0;
-       jt->jtCaps = *(u32 *)obj->buffer.pointer;
-       jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
-       jt->bSBIOSCaps = 0;
-
-       ACPI_FREE(obj);
-
-       kfree(argv4.buffer.pointer);
-}
-
-static void
-r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
-                                                MUX_METHOD_DATA_ELEMENT *part)
-{
-       union acpi_object mux_arg = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list input = { 1, &mux_arg };
-       acpi_handle iter = NULL, handle_mux = NULL;
-       acpi_status status;
-       unsigned long long value;
-
-       mode->status = 0xffff;
-       part->status = 0xffff;
-
-       do {
-               status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter);
-               if (ACPI_FAILURE(status) || !iter)
-                       return;
-
-               status = acpi_evaluate_integer(iter, "_ADR", NULL, &value);
-               if (ACPI_FAILURE(status) || value != id)
-                       continue;
-
-               handle_mux = iter;
-       } while (!handle_mux);
-
-       if (!handle_mux)
-               return;
-
-       /* I -think- 0 means "acquire" according to nvidia's driver source */
-       input.pointer->integer.type = ACPI_TYPE_INTEGER;
-       input.pointer->integer.value = 0;
-
-       status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value);
-       if (ACPI_SUCCESS(status)) {
-               mode->acpiId = id;
-               mode->mode   = value;
-               mode->status = 0;
-       }
-
-       status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value);
-       if (ACPI_SUCCESS(status)) {
-               part->acpiId = id;
-               part->mode   = value;
-               part->status = 0;
-       }
-}
-
-static void
-r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux)
-{
-       mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]);
-
-       for (int i = 0; i < mux->tableLen; i++) {
-               r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i],
-                                                                &mux->acpiIdMuxPartTable[i]);
-       }
-}
-
-static void
-r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
-{
-       acpi_status status;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *_DOD;
-
-       dod->status = 0xffff;
-
-       status = acpi_evaluate_object(handle, "_DOD", NULL, &output);
-       if (ACPI_FAILURE(status))
-               return;
-
-       _DOD = output.pointer;
-
-       if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) ||
-           WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList)))
-               return;
-
-       for (int i = 0; i < _DOD->package.count; i++) {
-               if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER))
-                       return;
-
-               dod->acpiIdList[i] = _DOD->package.elements[i].integer.value;
-               dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
-       }
-
-       dod->status = 0;
-       kfree(output.pointer);
-}
-#endif
-
-static void
-r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
-{
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-       acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
-
-       if (!handle)
-               return;
-
-       acpi->bValid = 1;
-
-       r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
-       if (acpi->dodMethodData.status == 0)
-               r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData);
-
-       r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
-       r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
-#endif
-}
-
-static int
-r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
-{
-       struct nvkm_device *device = gsp->subdev.device;
-       struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
-       GspSystemInfo *info;
-
-       if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
-               return -ENOSYS;
-
-       info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
-       if (IS_ERR(info))
-               return PTR_ERR(info);
-
-       info->gpuPhysAddr = device->func->resource_addr(device, 0);
-       info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
-       info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
-       info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
-       info->maxUserVa = TASK_SIZE;
-       info->pciConfigMirrorBase = 0x088000;
-       info->pciConfigMirrorSize = 0x001000;
-       r535_gsp_acpi_info(gsp, &info->acpiMethodData);
-
-       return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
-}
-
-static int
-r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
-{
-       struct nvkm_gsp *gsp = priv;
-       struct nvkm_subdev *subdev = &gsp->subdev;
-       rpc_os_error_log_v17_00 *msg = repv;
-
-       if (WARN_ON(repc < sizeof(*msg)))
-               return -EINVAL;
-
-       nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString);
-       return 0;
-}
-
-static int
-r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
-{
-       rpc_rc_triggered_v17_02 *msg = repv;
-       struct nvkm_gsp *gsp = priv;
-       struct nvkm_subdev *subdev = &gsp->subdev;
-       struct nvkm_chan *chan;
-       unsigned long flags;
-
-       if (WARN_ON(repc < sizeof(*msg)))
-               return -EINVAL;
-
-       nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
-                  msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
-                  msg->partitionAttributionId);
-
-       chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid, &flags);
-       if (!chan) {
-               nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
-               return 0;
-       }
-
-       nvkm_chan_error(chan, false);
-       nvkm_chan_put(&chan, flags);
-       return 0;
-}
-
-static int
-r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
-{
-       struct nvkm_gsp *gsp = priv;
-       struct nvkm_subdev *subdev = &gsp->subdev;
-
-       WARN_ON(repc != 0);
-
-       nvkm_error(subdev, "mmu fault queued\n");
-       return 0;
-}
-
-static int
-r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
-{
-       struct nvkm_gsp *gsp = priv;
-       struct nvkm_gsp_client *client;
-       struct nvkm_subdev *subdev = &gsp->subdev;
-       rpc_post_event_v17_00 *msg = repv;
-
-       if (WARN_ON(repc < sizeof(*msg)))
-               return -EINVAL;
-       if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize))
-               return -EINVAL;
-
-       nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n",
-                  msg->hClient, msg->hEvent, msg->notifyIndex, msg->data,
-                  msg->status, msg->eventDataSize, msg->bNotifyList);
-
-       mutex_lock(&gsp->client_id.mutex);
-       client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff);
-       if (client) {
-               struct nvkm_gsp_event *event;
-               bool handled = false;
-
-               list_for_each_entry(event, &client->events, head) {
-                       if (event->object.handle == msg->hEvent) {
-                               event->func(event, msg->eventData, msg->eventDataSize);
-                               handled = true;
-                       }
-               }
-
-               if (!handled) {
-                       nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n",
-                                  msg->hClient, msg->hEvent);
-               }
-       } else {
-               nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient);
-       }
-       mutex_unlock(&gsp->client_id.mutex);
-       return 0;
-}
-
-/**
- * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
- * @priv: gsp pointer
- * @fn: function number (ignored)
- * @repv: pointer to libos print RPC
- * @repc: message size
- *
- * The GSP sequencer is a list of I/O commands that the GSP can send to
- * the driver to perform for various purposes.  The most common usage is to
- * perform a special mid-initialization reset.
- */
-static int
-r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
-{
-       struct nvkm_gsp *gsp = priv;
-       struct nvkm_subdev *subdev = &gsp->subdev;
-       struct nvkm_device *device = subdev->device;
-       rpc_run_cpu_sequencer_v17_00 *seq = repv;
-       int ptr = 0, ret;
-
-       nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex);
-
-       while (ptr < seq->cmdIndex) {
-               GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr];
-
-               ptr += 1;
-               ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode);
-
-               switch (cmd->opCode) {
-               case GSP_SEQ_BUF_OPCODE_REG_WRITE: {
-                       u32 addr = cmd->payload.regWrite.addr;
-                       u32 data = cmd->payload.regWrite.val;
-
-                       nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data);
-                       nvkm_wr32(device, addr, data);
-               }
-                       break;
-               case GSP_SEQ_BUF_OPCODE_REG_MODIFY: {
-                       u32 addr = cmd->payload.regModify.addr;
-                       u32 mask = cmd->payload.regModify.mask;
-                       u32 data = cmd->payload.regModify.val;
-
-                       nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data);
-                       nvkm_mask(device, addr, mask, data);
-               }
-                       break;
-               case GSP_SEQ_BUF_OPCODE_REG_POLL: {
-                       u32 addr = cmd->payload.regPoll.addr;
-                       u32 mask = cmd->payload.regPoll.mask;
-                       u32 data = cmd->payload.regPoll.val;
-                       u32 usec = cmd->payload.regPoll.timeout ?: 4000000;
-                       //u32 error = cmd->payload.regPoll.error;
-
-                       nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec);
-                       nvkm_rd32(device, addr);
-                       nvkm_usec(device, usec,
-                               if ((nvkm_rd32(device, addr) & mask) == data)
-                                       break;
-                       );
-               }
-                       break;
-               case GSP_SEQ_BUF_OPCODE_DELAY_US: {
-                       u32 usec = cmd->payload.delayUs.val;
-
-                       nvkm_trace(subdev, "seq usec %d\n", usec);
-                       udelay(usec);
-               }
-                       break;
-               case GSP_SEQ_BUF_OPCODE_REG_STORE: {
-                       u32 addr = cmd->payload.regStore.addr;
-                       u32 slot = cmd->payload.regStore.index;
-
-                       seq->regSaveArea[slot] = nvkm_rd32(device, addr);
-                       nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot,
-                                  seq->regSaveArea[slot]);
-               }
-                       break;
-               case GSP_SEQ_BUF_OPCODE_CORE_RESET:
-                       nvkm_trace(subdev, "seq core reset\n");
-                       nvkm_falcon_reset(&gsp->falcon);
-                       nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080);
-                       nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000);
-                       break;
-               case GSP_SEQ_BUF_OPCODE_CORE_START:
-                       nvkm_trace(subdev, "seq core start\n");
-                       if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040)
-                               nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002);
-                       else
-                               nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002);
-                       break;
-               case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT:
-                       nvkm_trace(subdev, "seq core wait halt\n");
-                       nvkm_msec(device, 2000,
-                               if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010)
-                                       break;
-                       );
-                       break;
-               case GSP_SEQ_BUF_OPCODE_CORE_RESUME: {
-                       struct nvkm_sec2 *sec2 = device->sec2;
-                       u32 mbox0;
-
-                       nvkm_trace(subdev, "seq core resume\n");
-
-                       ret = gsp->func->reset(gsp);
-                       if (WARN_ON(ret))
-                               return ret;
-
-                       nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
-                       nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
-
-                       nvkm_falcon_start(&sec2->falcon);
-
-                       if (nvkm_msec(device, 2000,
-                               if (nvkm_rd32(device, 0x1180f8) & 0x04000000)
-                                       break;
-                       ) < 0)
-                               return -ETIMEDOUT;
-
-                       mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040);
-                       if (WARN_ON(mbox0)) {
-                               nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0);
-                               return -EIO;
-                       }
-
-                       nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
-
-                       if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
-                               return -EIO;
-               }
-                       break;
-               default:
-                       nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode);
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-static int
-r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
-{
-       GspFwWprMeta *meta;
-       int ret;
-
-       ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
-       if (ret)
-               return ret;
-
-       meta = gsp->wpr_meta.data;
-
-       meta->magic = GSP_FW_WPR_META_MAGIC;
-       meta->revision = GSP_FW_WPR_META_REVISION;
-
-       meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
-       meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
-
-       meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
-       meta->sizeOfBootloader = gsp->boot.fw.size;
-       meta->bootloaderCodeOffset = gsp->boot.code_offset;
-       meta->bootloaderDataOffset = gsp->boot.data_offset;
-       meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
-
-       meta->sysmemAddrOfSignature = gsp->sig.addr;
-       meta->sizeOfSignature = gsp->sig.size;
-
-       meta->gspFwRsvdStart = gsp->fb.heap.addr;
-       meta->nonWprHeapOffset = gsp->fb.heap.addr;
-       meta->nonWprHeapSize = gsp->fb.heap.size;
-       meta->gspFwWprStart = gsp->fb.wpr2.addr;
-       meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
-       meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
-       meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
-       meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
-       meta->frtsOffset = gsp->fb.wpr2.frts.addr;
-       meta->frtsSize = gsp->fb.wpr2.frts.size;
-       meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
-       meta->fbSize = gsp->fb.size;
-       meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
-       meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
-       meta->bootCount = 0;
-       meta->partitionRpcAddr = 0;
-       meta->partitionRpcRequestOffset = 0;
-       meta->partitionRpcReplyOffset = 0;
-       meta->verified = 0;
-       return 0;
-}
-
-static int
-r535_gsp_shared_init(struct nvkm_gsp *gsp)
-{
-       struct {
-               msgqTxHeader tx;
-               msgqRxHeader rx;
-       } *cmdq, *msgq;
-       int ret, i;
-
-       gsp->shm.cmdq.size = 0x40000;
-       gsp->shm.msgq.size = 0x40000;
-
-       gsp->shm.ptes.nr  = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT;
-       gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
-       gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
-
-       ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size +
-                                    gsp->shm.cmdq.size +
-                                    gsp->shm.msgq.size,
-                               &gsp->shm.mem);
-       if (ret)
-               return ret;
-
-       gsp->shm.ptes.ptr = gsp->shm.mem.data;
-       gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size;
-       gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size;
-
-       for (i = 0; i < gsp->shm.ptes.nr; i++)
-               gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT);
-
-       cmdq = gsp->shm.cmdq.ptr;
-       cmdq->tx.version = 0;
-       cmdq->tx.size = gsp->shm.cmdq.size;
-       cmdq->tx.entryOff = GSP_PAGE_SIZE;
-       cmdq->tx.msgSize = GSP_PAGE_SIZE;
-       cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize;
-       cmdq->tx.writePtr = 0;
-       cmdq->tx.flags = 1;
-       cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr);
-
-       msgq = gsp->shm.msgq.ptr;
-
-       gsp->cmdq.cnt = cmdq->tx.msgCount;
-       gsp->cmdq.wptr = &cmdq->tx.writePtr;
-       gsp->cmdq.rptr = &msgq->rx.readPtr;
-       gsp->msgq.cnt = cmdq->tx.msgCount;
-       gsp->msgq.wptr = &msgq->tx.writePtr;
-       gsp->msgq.rptr = &cmdq->rx.readPtr;
-       return 0;
-}
-
-int
-r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
-{
-       GSP_ARGUMENTS_CACHED *args;
-       int ret;
-
-       if (!resume) {
-               ret = r535_gsp_shared_init(gsp);
-               if (ret)
-                       return ret;
-
-               ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
-               if (ret)
-                       return ret;
-       }
-
-       args = gsp->rmargs.data;
-       args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
-       args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
-       args->messageQueueInitArguments.cmdQueueOffset =
-               (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
-       args->messageQueueInitArguments.statQueueOffset =
-               (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
-
-       if (!resume) {
-               args->srInitArguments.oldLevel = 0;
-               args->srInitArguments.flags = 0;
-               args->srInitArguments.bInPMTransition = 0;
-       } else {
-               args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
-               args->srInitArguments.flags = 0;
-               args->srInitArguments.bInPMTransition = 1;
-       }
-
-       return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-/*
- * If GSP-RM load fails, then the GSP nvkm object will be deleted, the logging
- * debugfs entries will be deleted, and it will not be possible to debug the
- * load failure. The keep_gsp_logging parameter tells Nouveau to copy the
- * logging buffers to new debugfs entries, and these entries are retained
- * until the driver unloads.
- */
-static bool keep_gsp_logging;
-module_param(keep_gsp_logging, bool, 0444);
-MODULE_PARM_DESC(keep_gsp_logging,
-                "Migrate the GSP-RM logging debugfs entries upon exit");
-
-/*
- * GSP-RM uses a pseudo-class mechanism to define of a variety of per-"engine"
- * data structures, and each engine has a "class ID" genererated by a
- * pre-processor. This is the class ID for the PMU.
- */
-#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU         0xf3d722
-
-/**
- * struct rpc_ucode_libos_print_v1e_08 - RPC payload for libos print buffers
- * @ucode_eng_desc: the engine descriptor
- * @libos_print_buf_size: the size of the libos_print_buf[]
- * @libos_print_buf: the actual buffer
- *
- * The engine descriptor is divided into 31:8 "class ID" and 7:0 "instance
- * ID". We only care about messages from PMU.
- */
-struct rpc_ucode_libos_print_v1e_08 {
-       u32 ucode_eng_desc;
-       u32 libos_print_buf_size;
-       u8 libos_print_buf[];
-};
-
-/**
- * r535_gsp_msg_libos_print - capture log message from the PMU
- * @priv: gsp pointer
- * @fn: function number (ignored)
- * @repv: pointer to libos print RPC
- * @repc: message size
- *
- * Called when we receive a UCODE_LIBOS_PRINT event RPC from GSP-RM. This RPC
- * contains the contents of the libos print buffer from PMU. It is typically
- * only written to when PMU encounters an error.
- *
- * Technically this RPC can be used to pass print buffers from any number of
- * GSP-RM engines, but we only expect to receive them for the PMU.
- *
- * For the PMU, the buffer is 4K in size and the RPC always contains the full
- * contents.
- */
-static int
-r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc)
-{
-       struct nvkm_gsp *gsp = priv;
-       struct nvkm_subdev *subdev = &gsp->subdev;
-       struct rpc_ucode_libos_print_v1e_08 *rpc = repv;
-       unsigned int class = rpc->ucode_eng_desc >> 8;
-
-       nvkm_debug(subdev, "received libos print from class 0x%x for %u bytes\n",
-                  class, rpc->libos_print_buf_size);
-
-       if (class != NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU) {
-               nvkm_warn(subdev,
-                         "received libos print from unknown class 0x%x\n",
-                         class);
-               return -ENOMSG;
-       }
-
-       if (rpc->libos_print_buf_size > GSP_PAGE_SIZE) {
-               nvkm_error(subdev, "libos print is too large (%u bytes)\n",
-                          rpc->libos_print_buf_size);
-               return -E2BIG;
-       }
-
-       memcpy(gsp->blob_pmu.data, rpc->libos_print_buf, rpc->libos_print_buf_size);
-
-       return 0;
-}
-
-/**
- * create_debugfs - create a blob debugfs entry
- * @gsp: gsp pointer
- * @name: name of this dentry
- * @blob: blob wrapper
- *
- * Creates a debugfs entry for a logging buffer with the name 'name'.
- */
-static struct dentry *create_debugfs(struct nvkm_gsp *gsp, const char *name,
-                                    struct debugfs_blob_wrapper *blob)
-{
-       struct dentry *dent;
-
-       dent = debugfs_create_blob(name, 0444, gsp->debugfs.parent, blob);
-       if (IS_ERR(dent)) {
-               nvkm_error(&gsp->subdev,
-                          "failed to create %s debugfs entry\n", name);
-               return NULL;
-       }
-
-       /*
-        * For some reason, debugfs_create_blob doesn't set the size of the
-        * dentry, so do that here.  See [1]
-        *
-        * [1] https://lore.kernel.org/r/linux-fsdevel/20240207200619.3354549-1-ttabi@nvidia.com/
-        */
-       i_size_write(d_inode(dent), blob->size);
-
-       return dent;
-}
-
-/**
- * r535_gsp_libos_debugfs_init - create logging debugfs entries
- * @gsp: gsp pointer
- *
- * Create the debugfs entries. This exposes the log buffers to userspace so
- * that an external tool can parse it.
- *
- * The 'logpmu' contains exception dumps from the PMU. It is written via an
- * RPC sent from GSP-RM and must be only 4KB. We create it here because it's
- * only useful if there is a debugfs entry to expose it. If we get the PMU
- * logging RPC and there is no debugfs entry, the RPC is just ignored.
- *
- * The blob_init, blob_rm, and blob_pmu objects can't be transient
- * because debugfs_create_blob doesn't copy them.
- *
- * NOTE: OpenRM loads the logging elf image and prints the log messages
- * in real-time. We may add that capability in the future, but that
- * requires loading ELF images that are not distributed with the driver and
- * adding the parsing code to Nouveau.
- *
- * Ideally, this should be part of nouveau_debugfs_init(), but that function
- * is called too late. We really want to create these debugfs entries before
- * r535_gsp_booter_load() is called, so that if GSP-RM fails to initialize,
- * there could still be a log to capture.
- */
-static void
-r535_gsp_libos_debugfs_init(struct nvkm_gsp *gsp)
-{
-       struct device *dev = gsp->subdev.device->dev;
-
-       /* Create a new debugfs directory with a name unique to this GPU. */
-       gsp->debugfs.parent = debugfs_create_dir(dev_name(dev), nouveau_debugfs_root);
-       if (IS_ERR(gsp->debugfs.parent)) {
-               nvkm_error(&gsp->subdev,
-                          "failed to create %s debugfs root\n", dev_name(dev));
-               return;
-       }
-
-       gsp->blob_init.data = gsp->loginit.data;
-       gsp->blob_init.size = gsp->loginit.size;
-       gsp->blob_intr.data = gsp->logintr.data;
-       gsp->blob_intr.size = gsp->logintr.size;
-       gsp->blob_rm.data = gsp->logrm.data;
-       gsp->blob_rm.size = gsp->logrm.size;
-
-       gsp->debugfs.init = create_debugfs(gsp, "loginit", &gsp->blob_init);
-       if (!gsp->debugfs.init)
-               goto error;
-
-       gsp->debugfs.intr = create_debugfs(gsp, "logintr", &gsp->blob_intr);
-       if (!gsp->debugfs.intr)
-               goto error;
-
-       gsp->debugfs.rm = create_debugfs(gsp, "logrm", &gsp->blob_rm);
-       if (!gsp->debugfs.rm)
-               goto error;
-
-       /*
-        * Since the PMU buffer is copied from an RPC, it doesn't need to be
-        * a DMA buffer.
-        */
-       gsp->blob_pmu.size = GSP_PAGE_SIZE;
-       gsp->blob_pmu.data = kzalloc(gsp->blob_pmu.size, GFP_KERNEL);
-       if (!gsp->blob_pmu.data)
-               goto error;
-
-       gsp->debugfs.pmu = create_debugfs(gsp, "logpmu", &gsp->blob_pmu);
-       if (!gsp->debugfs.pmu) {
-               kfree(gsp->blob_pmu.data);
-               goto error;
-       }
-
-       i_size_write(d_inode(gsp->debugfs.init), gsp->blob_init.size);
-       i_size_write(d_inode(gsp->debugfs.intr), gsp->blob_intr.size);
-       i_size_write(d_inode(gsp->debugfs.rm), gsp->blob_rm.size);
-       i_size_write(d_inode(gsp->debugfs.pmu), gsp->blob_pmu.size);
-
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT,
-                             r535_gsp_msg_libos_print, gsp);
-
-       nvkm_debug(&gsp->subdev, "created debugfs GSP-RM logging entries\n");
-
-       if (keep_gsp_logging) {
-               nvkm_info(&gsp->subdev,
-                         "logging buffers will be retained on failure\n");
-       }
-
-       return;
-
-error:
-       debugfs_remove(gsp->debugfs.parent);
-       gsp->debugfs.parent = NULL;
-}
-
-#endif
-
-static inline u64
-r535_gsp_libos_id8(const char *name)
-{
-       u64 id = 0;
-
-       for (int i = 0; i < sizeof(id) && *name; i++, name++)
-               id = (id << 8) | *name;
-
-       return id;
-}
-
-/**
- * create_pte_array() - creates a PTE array of a physically contiguous buffer
- * @ptes: pointer to the array
- * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned)
- * @size: size of the buffer
- *
- * GSP-RM sometimes expects physically-contiguous buffers to have an array of
- * "PTEs" for each page in that buffer.  Although in theory that allows for
- * the buffer to be physically discontiguous, GSP-RM does not currently
- * support that.
- *
- * In this case, the PTEs are DMA addresses of each page of the buffer.  Since
- * the buffer is physically contiguous, calculating all the PTEs is simple
- * math.
- *
- * See memdescGetPhysAddrsForGpu()
- */
-static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
-{
-       unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
-       unsigned int i;
-
-       for (i = 0; i < num_pages; i++)
-               ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
-}
-
-/**
- * r535_gsp_libos_init() -- create the libos arguments structure
- * @gsp: gsp pointer
- *
- * The logging buffers are byte queues that contain encoded printf-like
- * messages from GSP-RM.  They need to be decoded by a special application
- * that can parse the buffers.
- *
- * The 'loginit' buffer contains logs from early GSP-RM init and
- * exception dumps.  The 'logrm' buffer contains the subsequent logs. Both are
- * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
- *
- * The physical address map for the log buffer is stored in the buffer
- * itself, starting with offset 1. Offset 0 contains the "put" pointer (pp).
- * Initially, pp is equal to 0. If the buffer has valid logging data in it,
- * then pp points to index into the buffer where the next logging entry will
- * be written. Therefore, the logging data is valid if:
- *   1 <= pp < sizeof(buffer)/sizeof(u64)
- *
- * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
- * configured for a larger page size (e.g. 64K pages), we need to give
- * the GSP an array of 4K pages. Fortunately, since the buffer is
- * physically contiguous, it's simple math to calculate the addresses.
- *
- * The buffers must be a multiple of GSP_PAGE_SIZE.  GSP-RM also currently
- * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
- * buffers to be physically contiguous anyway.
- *
- * The memory allocated for the arguments must remain until the GSP sends the
- * init_done RPC.
- *
- * See _kgspInitLibosLoggingStructures (allocates memory for buffers)
- * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
- */
-static int
-r535_gsp_libos_init(struct nvkm_gsp *gsp)
-{
-       LibosMemoryRegionInitArgument *args;
-       int ret;
-
-       ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos);
-       if (ret)
-               return ret;
-
-       args = gsp->libos.data;
-
-       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit);
-       if (ret)
-               return ret;
-
-       args[0].id8  = r535_gsp_libos_id8("LOGINIT");
-       args[0].pa   = gsp->loginit.addr;
-       args[0].size = gsp->loginit.size;
-       args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
-       args[0].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
-       create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size);
-
-       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr);
-       if (ret)
-               return ret;
-
-       args[1].id8  = r535_gsp_libos_id8("LOGINTR");
-       args[1].pa   = gsp->logintr.addr;
-       args[1].size = gsp->logintr.size;
-       args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
-       args[1].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
-       create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size);
-
-       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm);
-       if (ret)
-               return ret;
-
-       args[2].id8  = r535_gsp_libos_id8("LOGRM");
-       args[2].pa   = gsp->logrm.addr;
-       args[2].size = gsp->logrm.size;
-       args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
-       args[2].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
-       create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size);
-
-       ret = r535_gsp_rmargs_init(gsp, false);
-       if (ret)
-               return ret;
-
-       args[3].id8  = r535_gsp_libos_id8("RMARGS");
-       args[3].pa   = gsp->rmargs.addr;
-       args[3].size = gsp->rmargs.size;
-       args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
-       args[3].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
-
-#ifdef CONFIG_DEBUG_FS
-       r535_gsp_libos_debugfs_init(gsp);
-#endif
-
-       return 0;
-}
-
-void
-nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt)
-{
-       struct scatterlist *sgl;
-       int i;
-
-       dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
-
-       for_each_sgtable_sg(sgt, sgl, i) {
-               struct page *page = sg_page(sgl);
-
-               __free_page(page);
-       }
-
-       sg_free_table(sgt);
-}
-
-int
-nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
-{
-       const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE);
-       struct scatterlist *sgl;
-       int ret, i;
-
-       ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
-       if (ret)
-               return ret;
-
-       for_each_sgtable_sg(sgt, sgl, i) {
-               struct page *page = alloc_page(GFP_KERNEL);
-
-               if (!page) {
-                       nvkm_gsp_sg_free(device, sgt);
-                       return -ENOMEM;
-               }
-
-               sg_set_page(sgl, page, PAGE_SIZE, 0);
-       }
-
-       ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
-       if (ret)
-               nvkm_gsp_sg_free(device, sgt);
-
-       return ret;
-}
-
-static void
-nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
-{
-       nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
-       nvkm_gsp_mem_dtor(&rx3->lvl1);
-       nvkm_gsp_mem_dtor(&rx3->lvl0);
-}
-
-/**
- * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
- * @gsp: gsp pointer
- * @sgt: S/G list to traverse
- * @size: size of the image, in bytes
- * @rx3: radix3 array to update
- *
- * The GSP uses a three-level page table, called radix3, to map the firmware.
- * Each 64-bit "pointer" in the table is either the bus address of an entry in
- * the next table (for levels 0 and 1) or the bus address of the next page in
- * the GSP firmware image itself.
- *
- * Level 0 contains a single entry in one page that points to the first page
- * of level 1.
- *
- * Level 1, since it's also only one page in size, contains up to 512 entries,
- * one for each page in Level 2.
- *
- * Level 2 can be up to 512 pages in size, and each of those entries points to
- * the next page of the firmware image.  Since there can be up to 512*512
- * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB.
- *
- * Internally, the GSP has its window into system memory, but the base
- * physical address of the aperture is not 0.  In fact, it varies depending on
- * the GPU architecture.  Since the GPU is a PCI device, this window is
- * accessed via DMA and is therefore bound by IOMMU translation.  The end
- * result is that GSP-RM must translate the bus addresses in the table to GSP
- * physical addresses.  All this should happen transparently.
- *
- * Returns 0 on success, or negative error code
- *
- * See kgspCreateRadix3_IMPL
- */
-static int
-nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
-                  struct nvkm_gsp_radix3 *rx3)
-{
-       struct sg_dma_page_iter sg_dma_iter;
-       struct scatterlist *sg;
-       size_t bufsize;
-       u64 *pte;
-       int ret, i, page_idx = 0;
-
-       ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0);
-       if (ret)
-               return ret;
-
-       ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1);
-       if (ret)
-               goto lvl1_fail;
-
-       // Allocate level 2
-       bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
-       ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2);
-       if (ret)
-               goto lvl2_fail;
-
-       // Write the bus address of level 1 to level 0
-       pte = rx3->lvl0.data;
-       *pte = rx3->lvl1.addr;
-
-       // Write the bus address of each page in level 2 to level 1
-       pte = rx3->lvl1.data;
-       for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0)
-               *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
-
-       // Finally, write the bus address of each page in sgt to level 2
-       for_each_sgtable_sg(&rx3->lvl2, sg, i) {
-               void *sgl_end;
-
-               pte = sg_virt(sg);
-               sgl_end = (void *)pte + sg->length;
-
-               for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) {
-                       *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
-                       page_idx++;
-
-                       // Go to the next scatterlist for level 2 if we've reached the end
-                       if ((void *)pte >= sgl_end)
-                               break;
-               }
-       }
-
-       if (ret) {
-lvl2_fail:
-               nvkm_gsp_mem_dtor(&rx3->lvl1);
-lvl1_fail:
-               nvkm_gsp_mem_dtor(&rx3->lvl0);
-       }
-
-       return ret;
-}
-
-int
-r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
-{
-       int ret;
-
-       if (suspend) {
-               GspFwWprMeta *meta = gsp->wpr_meta.data;
-               u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
-               GspFwSRMeta *sr;
-
-               ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
-               if (ret)
-                       return ret;
-
-               ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3);
-               if (ret)
-                       return ret;
-
-               ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta);
-               if (ret)
-                       return ret;
-
-               sr = gsp->sr.meta.data;
-               sr->magic = GSP_FW_SR_META_MAGIC;
-               sr->revision = GSP_FW_SR_META_REVISION;
-               sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
-               sr->sizeOfSuspendResumeData = len;
-       }
-
-       ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
-       if (WARN_ON(ret))
-               return ret;
-
-       nvkm_msec(gsp->subdev.device, 2000,
-               if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000)
-                       break;
-       );
-
-       gsp->running = false;
-       return 0;
-}
-
-int
-r535_gsp_init(struct nvkm_gsp *gsp)
-{
-       int ret;
-
-       nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
-
-       if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
-               return -EIO;
-
-       ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
-       if (ret)
-               goto done;
-
-       gsp->running = true;
-
-done:
-       if (gsp->sr.meta.data) {
-               nvkm_gsp_mem_dtor(&gsp->sr.meta);
-               nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
-               nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
-               return ret;
-       }
-
-       if (ret == 0)
-               ret = r535_gsp_postinit(gsp);
-
-       return ret;
-}
-
-static int
-r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
-{
-       const struct firmware *fw = gsp->fws.bl;
-       const struct nvfw_bin_hdr *hdr;
-       RM_RISCV_UCODE_DESC *desc;
-       int ret;
-
-       hdr = nvfw_bin_hdr(&gsp->subdev, fw->data);
-       desc = (void *)fw->data + hdr->header_offset;
-
-       ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw);
-       if (ret)
-               return ret;
-
-       memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size);
-
-       gsp->boot.code_offset = desc->monitorCodeOffset;
-       gsp->boot.data_offset = desc->monitorDataOffset;
-       gsp->boot.manifest_offset = desc->manifestOffset;
-       gsp->boot.app_version = desc->appVersion;
-       return 0;
-}
-
-static const struct nvkm_firmware_func
-r535_gsp_fw = {
-       .type = NVKM_FIRMWARE_IMG_SGT,
-};
-
-static int
-r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize)
-{
-       const u8 *img = gsp->fws.rm->data;
-       const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img;
-       const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff];
-       const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset];
-
-       for (int i = 0; i < ehdr->e_shnum; i++, shdr++) {
-               if (!strcmp(&names[shdr->sh_name], name)) {
-                       *pdata = &img[shdr->sh_offset];
-                       *psize = shdr->sh_size;
-                       return 0;
-               }
-       }
-
-       nvkm_error(&gsp->subdev, "section '%s' not found\n", name);
-       return -ENOENT;
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-struct r535_gsp_log {
-       struct nvif_log log;
-
-       /*
-        * Logging buffers in debugfs. The wrapper objects need to remain
-        * in memory until the dentry is deleted.
-        */
-       struct dentry *debugfs_logging_dir;
-       struct debugfs_blob_wrapper blob_init;
-       struct debugfs_blob_wrapper blob_intr;
-       struct debugfs_blob_wrapper blob_rm;
-       struct debugfs_blob_wrapper blob_pmu;
-};
-
-/**
- * r535_debugfs_shutdown - delete GSP-RM logging buffers for one GPU
- * @_log: nvif_log struct for this GPU
- *
- * Called when the driver is shutting down, to clean up the retained GSP-RM
- * logging buffers.
- */
-static void r535_debugfs_shutdown(struct nvif_log *_log)
-{
-       struct r535_gsp_log *log = container_of(_log, struct r535_gsp_log, log);
-
-       debugfs_remove(log->debugfs_logging_dir);
-
-       kfree(log->blob_init.data);
-       kfree(log->blob_intr.data);
-       kfree(log->blob_rm.data);
-       kfree(log->blob_pmu.data);
-
-       /* We also need to delete the list object */
-       kfree(log);
-}
-
-/**
- * is_empty - return true if the logging buffer was never written to
- * @b: blob wrapper with ->data field pointing to logging buffer
- *
- * The first 64-bit field of loginit, and logintr, and logrm is the 'put'
- * pointer, and it is initialized to 0. It's a dword-based index into the
- * circular buffer, indicating where the next printf write will be made.
- *
- * If the pointer is still 0 when GSP-RM is shut down, that means that the
- * buffer was never written to, so it can be ignored.
- *
- * This test also works for logpmu, even though it doesn't have a put pointer.
- */
-static bool is_empty(const struct debugfs_blob_wrapper *b)
-{
-       u64 *put = b->data;
-
-       return put ? (*put == 0) : true;
-}
-
-/**
- * r535_gsp_copy_log - preserve the logging buffers in a blob
- * @parent: the top-level dentry for this GPU
- * @name: name of debugfs entry to create
- * @s: original wrapper object to copy from
- * @t: new wrapper object to copy to
- *
- * When GSP shuts down, the nvkm_gsp object and all its memory is deleted.
- * To preserve the logging buffers, the buffers need to be copied, but only
- * if they actually have data.
- */
-static int r535_gsp_copy_log(struct dentry *parent,
-                            const char *name,
-                            const struct debugfs_blob_wrapper *s,
-                            struct debugfs_blob_wrapper *t)
-{
-       struct dentry *dent;
-       void *p;
-
-       if (is_empty(s))
-               return 0;
-
-       /* The original buffers will be deleted */
-       p = kmemdup(s->data, s->size, GFP_KERNEL);
-       if (!p)
-               return -ENOMEM;
-
-       t->data = p;
-       t->size = s->size;
-
-       dent = debugfs_create_blob(name, 0444, parent, t);
-       if (IS_ERR(dent)) {
-               kfree(p);
-               memset(t, 0, sizeof(*t));
-               return PTR_ERR(dent);
-       }
-
-       i_size_write(d_inode(dent), t->size);
-
-       return 0;
-}
-
-/**
- * r535_gsp_retain_logging - copy logging buffers to new debugfs root
- * @gsp: gsp pointer
- *
- * If keep_gsp_logging is enabled, then we want to preserve the GSP-RM logging
- * buffers and their debugfs entries, but all those objects would normally
- * deleted if GSP-RM fails to load.
- *
- * To preserve the logging buffers, we need to:
- *
- * 1) Allocate new buffers and copy the logs into them, so that the original
- * DMA buffers can be released.
- *
- * 2) Preserve the directories.  We don't need to save single dentries because
- * we're going to delete the parent when the
- *
- * If anything fails in this process, then all the dentries need to be
- * deleted.  We don't need to deallocate the original logging buffers because
- * the caller will do that regardless.
- */
-static void r535_gsp_retain_logging(struct nvkm_gsp *gsp)
-{
-       struct device *dev = gsp->subdev.device->dev;
-       struct r535_gsp_log *log = NULL;
-       int ret;
-
-       if (!keep_gsp_logging || !gsp->debugfs.parent) {
-               /* Nothing to do */
-               goto exit;
-       }
-
-       /* Check to make sure at least one buffer has data. */
-       if (is_empty(&gsp->blob_init) && is_empty(&gsp->blob_intr) &&
-           is_empty(&gsp->blob_rm) && is_empty(&gsp->blob_rm)) {
-               nvkm_warn(&gsp->subdev, "all logging buffers are empty\n");
-               goto exit;
-       }
-
-       log = kzalloc(sizeof(*log), GFP_KERNEL);
-       if (!log)
-               goto error;
-
-       /*
-        * Since the nvkm_gsp object is going away, the debugfs_blob_wrapper
-        * objects are also being deleted, which means the dentries will no
-        * longer be valid.  Delete the existing entries so that we can create
-        * new ones with the same name.
-        */
-       debugfs_remove(gsp->debugfs.init);
-       debugfs_remove(gsp->debugfs.intr);
-       debugfs_remove(gsp->debugfs.rm);
-       debugfs_remove(gsp->debugfs.pmu);
-
-       ret = r535_gsp_copy_log(gsp->debugfs.parent, "loginit", &gsp->blob_init, &log->blob_init);
-       if (ret)
-               goto error;
-
-       ret = r535_gsp_copy_log(gsp->debugfs.parent, "logintr", &gsp->blob_intr, &log->blob_intr);
-       if (ret)
-               goto error;
-
-       ret = r535_gsp_copy_log(gsp->debugfs.parent, "logrm", &gsp->blob_rm, &log->blob_rm);
-       if (ret)
-               goto error;
-
-       ret = r535_gsp_copy_log(gsp->debugfs.parent, "logpmu", &gsp->blob_pmu, &log->blob_pmu);
-       if (ret)
-               goto error;
-
-       /* The nvkm_gsp object is going away, so save the dentry */
-       log->debugfs_logging_dir = gsp->debugfs.parent;
-
-       log->log.shutdown = r535_debugfs_shutdown;
-       list_add(&log->log.entry, &gsp_logs.head);
-
-       nvkm_warn(&gsp->subdev,
-                 "logging buffers migrated to /sys/kernel/debug/nouveau/%s\n",
-                 dev_name(dev));
-
-       return;
-
-error:
-       nvkm_warn(&gsp->subdev, "failed to migrate logging buffers\n");
-
-exit:
-       debugfs_remove(gsp->debugfs.parent);
-
-       if (log) {
-               kfree(log->blob_init.data);
-               kfree(log->blob_intr.data);
-               kfree(log->blob_rm.data);
-               kfree(log->blob_pmu.data);
-               kfree(log);
-       }
-}
-
-#endif
-
-/**
- * r535_gsp_libos_debugfs_fini - cleanup/retain log buffers on shutdown
- * @gsp: gsp pointer
- *
- * If the log buffers are exposed via debugfs, the data for those entries
- * needs to be cleaned up when the GSP device shuts down.
- */
-static void
-r535_gsp_libos_debugfs_fini(struct nvkm_gsp __maybe_unused *gsp)
-{
-#ifdef CONFIG_DEBUG_FS
-       r535_gsp_retain_logging(gsp);
-
-       /*
-        * Unlike the other buffers, the PMU blob is a kmalloc'd buffer that
-        * exists only if the debugfs entries were created.
-        */
-       kfree(gsp->blob_pmu.data);
-       gsp->blob_pmu.data = NULL;
-#endif
-}
-
-void
-r535_gsp_dtor(struct nvkm_gsp *gsp)
-{
-       idr_destroy(&gsp->client_id.idr);
-       mutex_destroy(&gsp->client_id.mutex);
-
-       nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
-       nvkm_gsp_mem_dtor(&gsp->sig);
-       nvkm_firmware_dtor(&gsp->fw);
-
-       nvkm_falcon_fw_dtor(&gsp->booter.unload);
-       nvkm_falcon_fw_dtor(&gsp->booter.load);
-
-       mutex_destroy(&gsp->msgq.mutex);
-       mutex_destroy(&gsp->cmdq.mutex);
-
-       nvkm_gsp_dtor_fws(gsp);
-
-       nvkm_gsp_mem_dtor(&gsp->rmargs);
-       nvkm_gsp_mem_dtor(&gsp->wpr_meta);
-       nvkm_gsp_mem_dtor(&gsp->shm.mem);
-
-       r535_gsp_libos_debugfs_fini(gsp);
-
-       nvkm_gsp_mem_dtor(&gsp->loginit);
-       nvkm_gsp_mem_dtor(&gsp->logintr);
-       nvkm_gsp_mem_dtor(&gsp->logrm);
-}
-
-int
-r535_gsp_oneinit(struct nvkm_gsp *gsp)
-{
-       struct nvkm_device *device = gsp->subdev.device;
-       const u8 *data;
-       u64 size;
-       int ret;
-
-       mutex_init(&gsp->cmdq.mutex);
-       mutex_init(&gsp->msgq.mutex);
-
-       /* Load GSP firmware from ELF image into DMA-accessible memory. */
-       ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
-       if (ret)
-               return ret;
-
-       ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw);
-       if (ret)
-               return ret;
-
-       /* Load relevant signature from ELF image. */
-       ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size);
-       if (ret)
-               return ret;
-
-       ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig);
-       if (ret)
-               return ret;
-
-       memcpy(gsp->sig.data, data, size);
-
-       /* Build radix3 page table for ELF image. */
-       ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
-       if (ret)
-               return ret;
-
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
-                             r535_gsp_msg_run_cpu_sequencer, gsp);
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
-                             r535_gsp_msg_rc_triggered, gsp);
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
-                             r535_gsp_msg_mmu_fault_queued, gsp);
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
-       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
-       ret = r535_gsp_rm_boot_ctor(gsp);
-       if (ret)
-               return ret;
-
-       /* Release FW images - we've copied them to DMA buffers now. */
-       nvkm_gsp_dtor_fws(gsp);
-
-       /* Calculate FB layout. */
-       gsp->fb.wpr2.frts.size = 0x100000;
-       gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
-
-       gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
-       gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
-
-       gsp->fb.wpr2.elf.size = gsp->fw.len;
-       gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
-
-       {
-               u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
-
-               gsp->fb.wpr2.heap.size =
-                       gsp->func->wpr_heap.os_carveout_size +
-                       gsp->func->wpr_heap.base_size +
-                       ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
-                       ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
-
-               gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
-       }
-
-       gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
-       gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
-
-       gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
-       gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
-
-       gsp->fb.heap.size = 0x100000;
-       gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
-
-       ret = nvkm_gsp_fwsec_frts(gsp);
-       if (WARN_ON(ret))
-               return ret;
-
-       ret = r535_gsp_libos_init(gsp);
-       if (WARN_ON(ret))
-               return ret;
-
-       ret = r535_gsp_wpr_meta_init(gsp);
-       if (WARN_ON(ret))
-               return ret;
-
-       ret = r535_gsp_rpc_set_system_info(gsp);
-       if (WARN_ON(ret))
-               return ret;
-
-       ret = r535_gsp_rpc_set_registry(gsp);
-       if (WARN_ON(ret))
-               return ret;
-
-       mutex_init(&gsp->client_id.mutex);
-       idr_init(&gsp->client_id.idr);
-       return 0;
-}
index d50f2c351d9368aa9f85d0fef5b53e9d3316d9f7..a5f6b2abfd337f442b715943caabd95895b2ee29 100644 (file)
@@ -3,8 +3,23 @@
 # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
 
 nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gsp.o
 nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o
 nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o
 nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o
 nvkm-y += nvkm/subdev/gsp/rm/r535/client.o
 nvkm-y += nvkm/subdev/gsp/rm/r535/device.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/bar.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/vmm.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/disp.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ce.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvenc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvjpg.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
new file mode 100644 (file)
index 0000000..ce2c86c
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/bar/gf100.h>
+
+#include <core/mm.h>
+#include <subdev/fb.h>
+#include <subdev/gsp.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu/vmm.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+static void
+r535_bar_flush(struct nvkm_bar *bar)
+{
+       ioread32_native(bar->flushBAR2);
+}
+
+static void
+r535_bar_bar2_wait(struct nvkm_bar *base)
+{
+}
+
+static int
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
+{
+       rpc_update_bar_pde_v15_00 *rpc;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc));
+       if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+               return -EIO;
+
+       rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
+       rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
+       rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+}
+
+static void
+r535_bar_bar2_fini(struct nvkm_bar *bar)
+{
+       struct nvkm_gsp *gsp = bar->subdev.device->gsp;
+
+       bar->flushBAR2 = bar->flushBAR2PhysMode;
+       nvkm_done(bar->flushFBZero);
+
+       WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
+}
+
+static void
+r535_bar_bar2_init(struct nvkm_bar *bar)
+{
+       struct nvkm_device *device = bar->subdev.device;
+       struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
+       struct nvkm_gsp *gsp = device->gsp;
+
+       WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
+       vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
+
+       if (!bar->flushFBZero) {
+               struct nvkm_memory *fbZero;
+               int ret;
+
+               ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero);
+               if (ret == 0) {
+                       ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero);
+                       nvkm_memory_unref(&fbZero);
+               }
+               WARN_ON(ret);
+       }
+
+       bar->bar2 = true;
+       bar->flushBAR2 = nvkm_kmap(bar->flushFBZero);
+       WARN_ON(!bar->flushBAR2);
+}
+
+static void
+r535_bar_bar1_wait(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_fini(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_init(struct nvkm_bar *bar)
+{
+       struct nvkm_device *device = bar->subdev.device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm;
+       struct nvkm_memory *pd3;
+       int ret;
+
+       ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3);
+       if (WARN_ON(ret))
+               return;
+
+       nvkm_memory_unref(&vmm->pd->pt[0]->memory);
+
+       ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory);
+       nvkm_memory_unref(&pd3);
+       if (WARN_ON(ret))
+               return;
+
+       vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory);
+}
+
+static void *
+r535_bar_dtor(struct nvkm_bar *bar)
+{
+       void *data = gf100_bar_dtor(bar);
+
+       nvkm_memory_unref(&bar->flushFBZero);
+
+       if (bar->flushBAR2PhysMode)
+               iounmap(bar->flushBAR2PhysMode);
+
+       kfree(bar->func);
+       return data;
+}
+
+int
+r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
+{
+       struct nvkm_bar_func *rm;
+       struct nvkm_bar *bar;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_bar_dtor;
+       rm->oneinit = hw->oneinit;
+       rm->bar1.init = r535_bar_bar1_init;
+       rm->bar1.fini = r535_bar_bar1_fini;
+       rm->bar1.wait = r535_bar_bar1_wait;
+       rm->bar1.vmm = hw->bar1.vmm;
+       rm->bar2.init = r535_bar_bar2_init;
+       rm->bar2.fini = r535_bar_bar2_fini;
+       rm->bar2.wait = r535_bar_bar2_wait;
+       rm->bar2.vmm = hw->bar2.vmm;
+       rm->flush = r535_bar_flush;
+
+       ret = gf100_bar_new_(rm, device, type, inst, &bar);
+       if (ret) {
+               kfree(rm);
+               return ret;
+       }
+       *pbar = bar;
+
+       bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
+       if (!bar->flushBAR2PhysMode)
+               return -ENOMEM;
+
+       bar->flushBAR2 = bar->flushBAR2PhysMode;
+
+       gf100_bar(*pbar)->bar2_halve = true;
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
new file mode 100644 (file)
index 0000000..0d73906
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/ce/priv.h>
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
+
+struct r535_ce_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_ce_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_ce_obj = {
+       .dtor = r535_ce_obj_dtor,
+};
+
+static int
+r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_ce_obj *obj;
+       NVC0B5_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->version = 1;
+       args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_ce_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+           enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_ce_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_ce_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
new file mode 100644 (file)
index 0000000..1aae151
--- /dev/null
@@ -0,0 +1,1725 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/disp/priv.h>
+#include <engine/disp/chan.h>
+#include <engine/disp/conn.h>
+#include <engine/disp/dp.h>
+#include <engine/disp/head.h>
+#include <engine/disp/ior.h>
+#include <engine/disp/outp.h>
+
+#include <core/ramht.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
+#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
+
+#include <linux/acpi.h>
+
+static u64
+r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
+{
+       switch (chan->object.oclass & 0xff) {
+       case 0x7d: *psize = 0x10000; return 0x680000;
+       case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize);
+       case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize);
+       case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize);
+       default:
+               BUG_ON(1);
+               break;
+       }
+
+       return 0ULL;
+}
+
+static void
+r535_chan_intr(struct nvkm_disp_chan *chan, bool en)
+{
+}
+
+static void
+r535_chan_fini(struct nvkm_disp_chan *chan)
+{
+       nvkm_gsp_rm_free(&chan->rm.object);
+}
+
+static int
+r535_chan_push(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
+       NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       if (chan->memory) {
+               switch (nvkm_memory_target(chan->memory)) {
+               case NVKM_MEM_TARGET_NCOH:
+                       ctrl->addressSpace = ADDR_SYSMEM;
+                       ctrl->cacheSnoop = 0;
+                       break;
+               case NVKM_MEM_TARGET_HOST:
+                       ctrl->addressSpace = ADDR_SYSMEM;
+                       ctrl->cacheSnoop = 1;
+                       break;
+               case NVKM_MEM_TARGET_VRAM:
+                       ctrl->addressSpace = ADDR_FBMEM;
+                       break;
+               default:
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+
+               ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
+               ctrl->limit = nvkm_memory_size(chan->memory) - 1;
+       }
+
+       ctrl->hclass = chan->object.oclass;
+       ctrl->channelInstance = chan->head;
+       ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
+
+       return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r535_curs_init(struct nvkm_disp_chan *chan)
+{
+       NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = r535_chan_push(chan);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+                                    (chan->object.oclass << 16) | chan->head,
+                                    chan->object.oclass, sizeof(*args), &chan->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->channelInstance = chan->head;
+
+       return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static const struct nvkm_disp_chan_func
+r535_curs_func = {
+       .init = r535_curs_init,
+       .fini = r535_chan_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_curs = {
+       .func = &r535_curs_func,
+       .user = 73,
+};
+
+static int
+r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
+{
+       return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
+                                chan->chid.user << 25 |
+                                (chan->disp->rm.client.object.handle & 0x3fff));
+}
+
+static void
+r535_dmac_fini(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_device *device = chan->disp->engine.subdev.device;
+       const u32 uoff = (chan->chid.user - 1) * 0x1000;
+
+       chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
+       r535_chan_fini(chan);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+       NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = r535_chan_push(chan);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+                                    (chan->object.oclass << 16) | chan->head,
+                                    chan->object.oclass, sizeof(*args), &chan->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->channelInstance = chan->head;
+       args->offset = chan->suspend_put;
+
+       return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static int
+r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory)
+{
+       chan->memory = nvkm_umem_search(chan->object.client, memory);
+       if (IS_ERR(chan->memory))
+               return PTR_ERR(chan->memory);
+
+       return 0;
+}
+
+static const struct nvkm_disp_chan_func
+r535_dmac_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_dmac_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+       .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_func
+r535_wimm_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_dmac_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wimm = {
+       .func = &r535_wimm_func,
+       .user = 33,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wndw = {
+       .func = &r535_dmac_func,
+       .user = 1,
+};
+
+static void
+r535_core_fini(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_device *device = chan->disp->engine.subdev.device;
+
+       chan->suspend_put = nvkm_rd32(device, 0x680000);
+       r535_chan_fini(chan);
+}
+
+static const struct nvkm_disp_chan_func
+r535_core_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_core_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+       .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_user
+r535_core = {
+       .func = &r535_core_func,
+       .user = 0,
+};
+
+static int
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->brightness = lvl;
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+       int ret, lvl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ret;
+       }
+
+       lvl = ctrl->brightness;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return lvl;
+}
+
+static const struct nvkm_ior_func_bl
+r535_sor_bl = {
+       .get = r535_sor_bl_get,
+       .set = r535_sor_bl_set,
+};
+
+static void
+r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+       if (WARN_ON(size > sizeof(ctrl->bufferELD)))
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->numELDSize = size;
+       memcpy(ctrl->bufferELD, data, size);
+       ctrl->maxFreqSupported = 0; //XXX
+       ctrl->ctrl  = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE);
+       ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE);
+       ctrl->deviceEntry = head;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+       if (present)
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->deviceEntry = head;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hda
+r535_sor_hda = {
+       .hpd = r535_sor_hda_hpd,
+       .eld = r535_sor_hda_eld,
+};
+
+static void
+r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->mute = mute;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl;
+
+       if (!enable)
+               r535_sor_dp_audio_mute(sor, true);
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->enable = enable;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+
+       if (enable)
+               r535_sor_dp_audio_mute(sor, false);
+}
+
+static void
+r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn)
+{
+       struct nvkm_disp *disp = sor->disp;
+       struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+       ctrl->sorIndex = sor->id;
+       ctrl->dpLink = sor->asy.link == 2;
+       ctrl->bEnableOverride = 1;
+       ctrl->bMST = 1;
+       ctrl->hBlankSym = 0;
+       ctrl->vBlankSym = 0;
+       ctrl->colorFormat = 0;
+       ctrl->bEnableTwoHeadOneOr = 0;
+       ctrl->singleHeadMultistreamMode = 0;
+       ctrl->MST.slotStart = slot;
+       ctrl->MST.slotEnd = slot + slot_nr - 1;
+       ctrl->MST.PBN = pbn;
+       ctrl->MST.Timeslice = aligned_pbn;
+       ctrl->MST.sendACT = 0;
+       ctrl->MST.singleHeadMSTPipeline = 0;
+       ctrl->MST.bEnableAudioOverRightPanel = 0;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static int
+r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef,
+               u32 watermark, u32 hblanksym, u32 vblanksym)
+{
+       struct nvkm_disp *disp = sor->disp;
+       struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+       ctrl->sorIndex = sor->id;
+       ctrl->dpLink = sor->asy.link == 2;
+       ctrl->bEnableOverride = 1;
+       ctrl->bMST = 0;
+       ctrl->hBlankSym = hblanksym;
+       ctrl->vBlankSym = vblanksym;
+       ctrl->colorFormat = 0;
+       ctrl->bEnableTwoHeadOneOr = 0;
+       ctrl->SST.bEnhancedFraming = ef;
+       ctrl->SST.tuSize = 64;
+       ctrl->SST.waterMark = watermark;
+       ctrl->SST.bEnableAudioOverRightPanel = 0;
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static const struct nvkm_ior_func_dp
+r535_sor_dp = {
+       .sst = r535_sor_dp_sst,
+       .vcpi = r535_sor_dp_vcpi,
+       .audio = r535_sor_dp_audio,
+};
+
+static void
+r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling,
+                  bool scrambling_low_rates)
+{
+       struct nvkm_outp *outp = sor->asy.outp;
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->caps = 0;
+       if (support)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE);
+       if (scrambling)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE);
+       if (scrambling_low_rates)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE);
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->mute = mute;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->transmitControl =
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO);
+       ctrl->packetSize = 10;
+       ctrl->aPacket[0] = 0x03;
+       ctrl->aPacket[1] = 0x00;
+       ctrl->aPacket[2] = 0x00;
+       ctrl->aPacket[3] = enable ? 0x10 : 0x01;
+       ctrl->aPacket[4] = 0x00;
+       ctrl->aPacket[5] = 0x00;
+       ctrl->aPacket[6] = 0x00;
+       ctrl->aPacket[7] = 0x00;
+       ctrl->aPacket[8] = 0x00;
+       ctrl->aPacket[9] = 0x00;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       const u32 hdmi = head * 0x400;
+
+       r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable);
+       r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable);
+
+       /* General Control (GCP). */
+       nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
+       nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010);
+       nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
+}
+
+static void
+r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl;
+
+       if (!enable)
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->enable = enable;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hdmi
+r535_sor_hdmi = {
+       .ctrl = r535_sor_hdmi_ctrl,
+       .scdc = r535_sor_hdmi_scdc,
+       /*TODO: SF_USER -> KMS. */
+       .infoframe_avi = gv100_sor_hdmi_infoframe_avi,
+       .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi,
+       .audio = r535_sor_hdmi_audio,
+};
+
+static const struct nvkm_ior_func
+r535_sor = {
+       .hdmi = &r535_sor_hdmi,
+       .dp = &r535_sor_dp,
+       .hda = &r535_sor_hda,
+       .bl = &r535_sor_bl,
+};
+
+static int
+r535_sor_new(struct nvkm_disp *disp, int id)
+{
+       return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/);
+}
+
+static int
+r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+       *pmask = 0xf;
+       return 4;
+}
+
+static void
+r535_head_vblank_put(struct nvkm_head *head)
+{
+       struct nvkm_device *device = head->disp->engine.subdev.device;
+
+       nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000);
+}
+
+static void
+r535_head_vblank_get(struct nvkm_head *head)
+{
+       struct nvkm_device *device = head->disp->engine.subdev.device;
+
+       nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002);
+       nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002);
+}
+
+static void
+r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
+{
+}
+
+static const struct nvkm_head_func
+r535_head = {
+       .state = r535_head_state,
+       .vblank_get = r535_head_vblank_get,
+       .vblank_put = r535_head_vblank_put,
+};
+
+static struct nvkm_conn *
+r535_conn_new(struct nvkm_disp *disp, u32 id)
+{
+       NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl;
+       struct nvbios_connE dcbE = {};
+       struct nvkm_conn *conn;
+       int ret, index;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return (void *)ctrl;
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(id);
+
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ERR_PTR(ret);
+       }
+
+       list_for_each_entry(conn, &disp->conns, head) {
+               if (conn->index == ctrl->data[0].index) {
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+                       return conn;
+               }
+       }
+
+       dcbE.type = ctrl->data[0].type;
+       index = ctrl->data[0].index;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       ret = nvkm_conn_new(disp, index, &dcbE, &conn);
+       if (ret)
+               return ERR_PTR(ret);
+
+       list_add_tail(&conn->head, &disp->conns);
+       return conn;
+}
+
+static void
+r535_outp_release(struct nvkm_outp *outp)
+{
+       outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id);
+       outp->ior->asy.outp = NULL;
+       outp->ior = NULL;
+}
+
+static int
+r535_outp_acquire(struct nvkm_outp *outp, bool hda)
+{
+       struct nvkm_disp *disp = outp->disp;
+       struct nvkm_ior *ior;
+       NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
+       int ret, or;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl->sorExcludeMask = disp->rm.assigned_sors;
+       if (hda)
+               ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
+
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ret;
+       }
+
+       for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
+               if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
+                       disp->rm.assigned_sors |= BIT(or);
+                       break;
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag)))
+               return -EINVAL;
+
+       ior = nvkm_ior_find(disp, SOR, or);
+       if (WARN_ON(!ior))
+               return -EINVAL;
+
+       nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior);
+       return 0;
+}
+
+static int
+r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
+{
+       NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ret;
+       }
+
+       *displayid = ctrl->displayId;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static struct nvkm_ior *
+r535_outp_inherit(struct nvkm_outp *outp)
+{
+       struct nvkm_disp *disp = outp->disp;
+       struct nvkm_head *head;
+       u32 displayid;
+       int ret;
+
+       list_for_each_entry(head, &disp->heads, head) {
+               ret = r535_disp_head_displayid(disp, head->id, &displayid);
+               if (WARN_ON(ret))
+                       return NULL;
+
+               if (displayid == BIT(outp->index)) {
+                       NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+                       u32 id, proto;
+                       struct nvkm_ior *ior;
+
+                       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                                   NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
+                                                   sizeof(*ctrl));
+                       if (IS_ERR(ctrl))
+                               return NULL;
+
+                       ctrl->subDeviceInstance = 0;
+                       ctrl->displayId = displayid;
+
+                       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+                       if (ret) {
+                               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+                               return NULL;
+                       }
+
+                       id = ctrl->index;
+                       proto = ctrl->protocol;
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+                       ior = nvkm_ior_find(disp, SOR, id);
+                       if (WARN_ON(!ior))
+                               return NULL;
+
+                       switch (proto) {
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 1;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 2;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 3;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+                               ior->arm.proto = DP;
+                               ior->arm.link = 1;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+                               ior->arm.proto = DP;
+                               ior->arm.link = 2;
+                               break;
+                       default:
+                               WARN_ON(1);
+                               return NULL;
+                       }
+
+                       ior->arm.proto_evo = proto;
+                       ior->arm.head = BIT(head->id);
+                       disp->rm.assigned_sors |= BIT(ior->id);
+                       return ior;
+               }
+       }
+
+       return NULL;
+}
+
+static int
+r535_outp_dfp_get_info(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ret;
+       }
+
+       nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
+                  ctrl->displayId, ctrl->flags, ctrl->flags2);
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayMask = BIT(outp->index);
+
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ret;
+       }
+
+       if (ctrl->displayMask & BIT(outp->index)) {
+               ret = r535_outp_dfp_get_info(outp);
+               if (ret == 0)
+                       ret = 1;
+       } else {
+               ret = 0;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static int
+r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id)
+{
+       NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = id;
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
+{
+       NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ret;
+       }
+
+       *pid = ctrl->displayIdAssigned;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static int
+r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4])
+{
+       NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->numLanes = lanes;
+       for (int i = 0; i < lanes; i++)
+               ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA,  PREEMPHASIS, pe[i]) |
+                               NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]);
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
+       int ret, retries;
+       u32 cmd, data;
+
+       cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
+             NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
+             NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
+       data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
+              NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
+              NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
+
+       if (mst)
+               cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
+
+       if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+               cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
+
+       if (target == 0 &&
+            (outp->dp.dpcd[DPCD_RC02] & 0x20) &&
+           !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
+               cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
+
+       /* We should retry up to 3 times, but only if GSP asks politely */
+       for (retries = 0; retries < 3; ++retries) {
+               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->subDeviceInstance = 0;
+               ctrl->displayId = BIT(outp->index);
+               ctrl->retryTimeMs = 0;
+               ctrl->cmd = cmd;
+               ctrl->data = data;
+
+               ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+               if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
+                       /*
+                        * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+                        * by GSP before retrying again
+                        */
+                       nvkm_debug(&disp->engine.subdev,
+                                  "Waiting %dms for GSP LT panel delay before retrying\n",
+                                  ctrl->retryTimeMs);
+                       msleep(ctrl->retryTimeMs);
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               } else {
+                       /* GSP didn't say to retry, or we were successful */
+                       if (ctrl->err)
+                               ret = -EIO;
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static int
+r535_dp_train(struct nvkm_outp *outp, bool retrain)
+{
+       for (int target = outp->dp.lttprs; target >= 0; target--) {
+               int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst,
+                                                            outp->dp.lt.nr,
+                                                            outp->dp.lt.bw);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+           !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+               return 0;
+
+       if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+               return -EINVAL;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+       for (int i = 0; i < outp->dp.rates; i++)
+               ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
+       u8 size = *psize;
+       int ret;
+       int retries;
+
+       for (retries = 0; retries < 3; ++retries) {
+               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->subDeviceInstance = 0;
+               ctrl->displayId = BIT(outp->index);
+               ctrl->bAddrOnly = !size;
+               ctrl->cmd = type;
+               if (ctrl->bAddrOnly) {
+                       ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
+                       ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD,  I2C_MOT, FALSE);
+               }
+               ctrl->addr = addr;
+               ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
+               memcpy(ctrl->data, data, size);
+
+               ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+               if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
+                       /*
+                        * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+                        * by GSP before retrying again
+                        */
+                       nvkm_debug(&disp->engine.subdev,
+                                  "Waiting %dms for GSP LT panel delay before retrying in AUX\n",
+                                  ctrl->retryTimeMs);
+                       msleep(ctrl->retryTimeMs);
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               } else {
+                       memcpy(data, ctrl->data, size);
+                       *psize = ctrl->size;
+                       ret = ctrl->replyType;
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+                       break;
+               }
+       }
+       return ret;
+}
+
+static int
+r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu)
+{
+       return 0;
+}
+
+static void
+r535_dp_release(struct nvkm_outp *outp)
+{
+       if (!outp->dp.lt.bw) {
+               if (!WARN_ON(!outp->dp.rates))
+                       outp->dp.lt.bw = outp->dp.rate[0].rate / 27000;
+               else
+                       outp->dp.lt.bw = 0x06;
+       }
+
+       outp->dp.lt.nr = 0;
+
+       r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw);
+       r535_outp_release(outp);
+}
+
+static int
+r535_dp_acquire(struct nvkm_outp *outp, bool hda)
+{
+       int ret;
+
+       ret = r535_outp_acquire(outp, hda);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static const struct nvkm_outp_func
+r535_dp = {
+       .detect = r535_outp_detect,
+       .inherit = r535_outp_inherit,
+       .acquire = r535_dp_acquire,
+       .release = r535_dp_release,
+       .dp.aux_pwr = r535_dp_aux_pwr,
+       .dp.aux_xfer = r535_dp_aux_xfer,
+       .dp.mst_id_get = r535_dp_mst_id_get,
+       .dp.mst_id_put = r535_dp_mst_id_put,
+       .dp.rates = r535_dp_rates,
+       .dp.train = r535_dp_train,
+       .dp.drive = r535_dp_drive,
+};
+
+static int
+r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
+{
+       NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+       int ret = -E2BIG;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ret;
+       }
+
+       ret = -E2BIG;
+       if (ctrl->bufferSize <= *psize) {
+               memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
+               *psize = ctrl->bufferSize;
+               ret = 0;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static const struct nvkm_outp_func
+r535_tmds = {
+       .detect = r535_outp_detect,
+       .inherit = r535_outp_inherit,
+       .acquire = r535_outp_acquire,
+       .release = r535_outp_release,
+       .edid_get = r535_tmds_edid_get,
+};
+
+static int
+r535_outp_new(struct nvkm_disp *disp, u32 id)
+{
+       NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+       enum nvkm_ior_proto proto;
+       struct dcb_output dcbE = {};
+       struct nvkm_conn *conn;
+       struct nvkm_outp *outp;
+       u8 locn, link = 0;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(id);
+
+       ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+       if (ret) {
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+               return ret;
+       }
+
+       switch (ctrl->type) {
+       case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
+               return 0;
+       case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR:
+               switch (ctrl->protocol) {
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+                       proto = TMDS;
+                       link = 1;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+                       proto = TMDS;
+                       link = 2;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+                       proto = TMDS;
+                       link = 3;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+                       proto = DP;
+                       link = 1;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+                       proto = DP;
+                       link = 2;
+                       break;
+               default:
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+
+               break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       locn = ctrl->location;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       conn = r535_conn_new(disp, id);
+       if (IS_ERR(conn))
+               return PTR_ERR(conn);
+
+       switch (proto) {
+       case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break;
+       case   DP: dcbE.type = DCB_OUTPUT_DP; break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       dcbE.location = locn;
+       dcbE.connector = conn->index;
+       dcbE.heads = disp->head.mask;
+       dcbE.i2c_index = 0xff;
+       dcbE.link = dcbE.sorconf.link = link;
+
+       if (proto == TMDS) {
+               ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp);
+               if (ret)
+                       return ret;
+       } else {
+               NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+               bool mst, wm;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                           NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->sorIndex = ~0;
+
+               ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+               if (ret) {
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+                       return ret;
+               }
+
+               switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+                       dcbE.dpconf.link_bw = 0x06;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+                       dcbE.dpconf.link_bw = 0x0a;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+                       dcbE.dpconf.link_bw = 0x14;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+                       dcbE.dpconf.link_bw = 0x1e;
+                       break;
+               default:
+                       dcbE.dpconf.link_bw = 0x00;
+                       break;
+               }
+
+               mst = ctrl->bIsMultistreamSupported;
+               wm = ctrl->bHasIncreasedWatermarkLimits;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               if (WARN_ON(!dcbE.dpconf.link_bw))
+                       return -EINVAL;
+
+               dcbE.dpconf.link_nr = 4;
+
+               ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp);
+               if (ret)
+                       return ret;
+
+               outp->dp.mst = mst;
+               outp->dp.increased_wm = wm;
+       }
+
+
+       outp->conn = conn;
+       list_add_tail(&outp->head, &disp->outps);
+       return 0;
+}
+
+static void
+r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+       struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq);
+       Nv2080DpIrqNotification *irq = repv;
+
+       if (WARN_ON(repc < sizeof(*irq)))
+               return;
+
+       nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId);
+
+       if (irq->displayId)
+               nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ);
+}
+
+static void
+r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+       struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd);
+       Nv2080HotplugNotification *hpd = repv;
+
+       if (WARN_ON(repc < sizeof(*hpd)))
+               return;
+
+       nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n",
+                  hpd->plugDisplayMask, hpd->unplugDisplayMask);
+
+       for (int i = 0; i < 31; i++) {
+               u32 mask = 0;
+
+               if (hpd->plugDisplayMask & BIT(i))
+                       mask |= NVKM_DPYID_PLUG;
+               if (hpd->unplugDisplayMask & BIT(i))
+                       mask |= NVKM_DPYID_UNPLUG;
+
+               if (mask)
+                       nvkm_event_ntfy(&disp->rm.event, i, mask);
+       }
+}
+
+static const struct nvkm_event_func
+r535_disp_event = {
+};
+
+static void
+r535_disp_intr_head_timing(struct nvkm_disp *disp, int head)
+{
+       struct nvkm_subdev *subdev = &disp->engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04));
+
+       if (stat & 0x00000002) {
+               nvkm_disp_vblank(disp, head);
+
+               nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002);
+       }
+}
+
+static irqreturn_t
+r535_disp_intr(struct nvkm_inth *inth)
+{
+       struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth);
+       struct nvkm_subdev *subdev = &disp->engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff;
+       int head;
+
+       for_each_set_bit(head, &mask, 8)
+               r535_disp_intr_head_timing(disp, head);
+
+       return IRQ_HANDLED;
+}
+
+static void
+r535_disp_fini(struct nvkm_disp *disp, bool suspend)
+{
+       if (!disp->engine.subdev.use.enabled)
+               return;
+
+       nvkm_gsp_rm_free(&disp->rm.object);
+
+       if (!suspend) {
+               nvkm_gsp_event_dtor(&disp->rm.irq);
+               nvkm_gsp_event_dtor(&disp->rm.hpd);
+               nvkm_event_fini(&disp->rm.event);
+
+               nvkm_gsp_rm_free(&disp->rm.objcom);
+               nvkm_gsp_device_dtor(&disp->rm.device);
+               nvkm_gsp_client_dtor(&disp->rm.client);
+       }
+}
+
+static int
+r535_disp_init(struct nvkm_disp *disp)
+{
+       int ret;
+
+       ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16,
+                               disp->func->root.oclass, 0, &disp->rm.object);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+r535_disp_oneinit(struct nvkm_disp *disp)
+{
+       struct nvkm_device *device = disp->engine.subdev.device;
+       struct nvkm_gsp *gsp = device->gsp;
+       NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+       int ret, i;
+
+       /* RAMIN. */
+       ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
+       if (ret)
+               return ret;
+
+       if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM))
+               return -EINVAL;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory);
+       ctrl->instMemSize = nvkm_memory_size(disp->inst->memory);
+       ctrl->instMemAddrSpace = ADDR_FBMEM;
+       ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+       if (ret)
+               return ret;
+
+       /* OBJs. */
+       ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
+                               &disp->rm.objcom);
+       if (ret)
+               return ret;
+
+       {
+               NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                          NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->wndw.mask = ctrl->windowPresentMask;
+               disp->wndw.nr = fls(disp->wndw.mask);
+               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       }
+
+       /* */
+       {
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+               NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl;
+               struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(subdevice,
+                                           NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */
+
+               {
+                       const guid_t NBCI_DSM_GUID =
+                               GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7,
+                                         0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44);
+                       u64 NBCI_DSM_REV = 0x00000102;
+                       const guid_t NVHG_DSM_GUID =
+                               GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+                                         0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
+                       u64 NVHG_DSM_REV = 0x00000102;
+                       acpi_handle handle = ACPI_HANDLE(device->dev);
+
+                       if (handle && acpi_has_method(handle, "_DSM")) {
+                               bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV,
+                                                          1ULL << 0x00000014);
+                               bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
+                                                          1ULL << 0x00000014);
+
+                               if (nbci || nvhg) {
+                                       union acpi_object argv4 = {
+                                               .buffer.type    = ACPI_TYPE_BUFFER,
+                                               .buffer.length  = sizeof(ctrl->backLightData),
+                                               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+                                       }, *obj;
+
+                                       obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID,
+                                                               0x00000102, 0x14, &argv4);
+                                       if (!obj) {
+                                               acpi_handle_info(handle, "failed to evaluate _DSM\n");
+                                       } else {
+                                               for (int i = 0; i < obj->package.count; i++) {
+                                                       union acpi_object *elt = &obj->package.elements[i];
+                                                       u32 size;
+
+                                                       if (elt->integer.value & ~0xffffffffULL)
+                                                               size = 8;
+                                                       else
+                                                               size = 4;
+
+                                                       memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
+                                                       ctrl->backLightDataSize += size;
+                                               }
+
+                                               ctrl->status = 0;
+                                               ACPI_FREE(obj);
+                                       }
+
+                                       kfree(argv4.buffer.pointer);
+                               }
+                       }
+               }
+
+               ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl);
+               if (ret)
+                       return ret;
+#endif
+       }
+
+       /* */
+       {
+               NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                           NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+               if (ret)
+                       return ret;
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->head.nr = ctrl->numHeads;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->head.mask = ctrl->headMask;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
+                       ret = nvkm_head_new_(&r535_head, disp, i);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask);
+       nvkm_debug(&disp->engine.subdev, "   SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
+       for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
+               ret = disp->func->sor.new(disp, i);
+               if (ret)
+                       return ret;
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+               unsigned long mask;
+               int i;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               mask = ctrl->displayMask;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               for_each_set_bit(i, &mask, 32) {
+                       ret = r535_outp_new(disp, i);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG,
+                                        r535_disp_hpd, &disp->rm.hpd);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ,
+                                        r535_disp_irq, &disp->rm.irq);
+       if (ret)
+               return ret;
+
+       /* RAMHT. */
+       ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size :
+                            0x1000, 0, disp->inst, &disp->ramht);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst);
+       if (ret < 0)
+               return ret;
+
+       ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev,
+                           r535_disp_intr, &disp->engine.subdev.inth);
+       if (ret)
+               return ret;
+
+       nvkm_inth_allow(&disp->engine.subdev.inth);
+       return 0;
+}
+
+static void
+r535_disp_dtor(struct nvkm_disp *disp)
+{
+       kfree(disp->func);
+}
+
+int
+r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
+{
+       struct nvkm_disp_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_disp_dtor;
+       rm->oneinit = r535_disp_oneinit;
+       rm->init = r535_disp_init;
+       rm->fini = r535_disp_fini;
+       rm->uevent = hw->uevent;
+       rm->sor.cnt = r535_sor_cnt;
+       rm->sor.new = r535_sor_new;
+       rm->ramht_size = hw->ramht_size;
+
+       rm->root = hw->root;
+
+       for (int i = 0; hw->user[i].ctor; i++) {
+               switch (hw->user[i].base.oclass & 0xff) {
+               case 0x73: rm->user[i] = hw->user[i]; break;
+               case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
+               case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
+               case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
+               case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
+               default:
+                       WARN_ON(1);
+                       continue;
+               }
+       }
+
+       ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
+       if (ret)
+               kfree(rm);
+
+       mutex_init(&(*pdisp)->super.mutex); //XXX
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
new file mode 100644 (file)
index 0000000..6305f3a
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/instmem/priv.h>
+
+#include <subdev/gsp.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+
+struct fbsr_item {
+       const char *type;
+       u64 addr;
+       u64 size;
+
+       struct list_head head;
+};
+
+struct fbsr {
+       struct list_head items;
+
+       u64 size;
+       int regions;
+
+       struct nvkm_gsp_client client;
+       struct nvkm_gsp_device device;
+
+       u64 hmemory;
+       u64 sys_offset;
+};
+
+static int
+fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+            u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+{
+       struct nvkm_gsp_client *client = device->object.client;
+       struct nvkm_gsp *gsp = client->gsp;
+       const u32 pages = size / GSP_PAGE_SIZE;
+       rpc_alloc_memory_v13_01 *rpc;
+       int ret;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
+                              sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0]));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       rpc->hClient = client->object.handle;
+       rpc->hDevice = device->object.handle;
+       rpc->hMemory = handle;
+       if (aper == NVKM_MEM_TARGET_HOST) {
+               rpc->hClass = NV01_MEMORY_LIST_SYSTEM;
+               rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) |
+                            NVDEF(NVOS02, FLAGS, LOCATION, PCI) |
+                            NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+       } else {
+               rpc->hClass = NV01_MEMORY_LIST_FBMEM;
+               rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) |
+                            NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) |
+                            NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+               rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */
+       }
+       rpc->pteAdjust = 0;
+       rpc->length = size;
+       rpc->pageCount = pages;
+       rpc->pteDesc.idr = 0;
+       rpc->pteDesc.reserved1 = 0;
+       rpc->pteDesc.length = pages;
+
+       if (sgt) {
+               struct scatterlist *sgl;
+               int pte = 0, idx;
+
+               for_each_sgtable_dma_sg(sgt, sgl, idx) {
+                       for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++)
+                               rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i;
+
+               }
+       } else {
+               for (int i = 0; i < pages; i++)
+                       rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
+       }
+
+       ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
+       if (ret)
+               return ret;
+
+       object->client = device->object.client;
+       object->parent = &device->object;
+       object->handle = handle;
+       return 0;
+}
+
+static int
+fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
+{
+       NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fbsr->client.gsp;
+       struct nvkm_gsp_object memlist;
+       int ret;
+
+       ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+                          item->addr, item->size, NULL, &memlist);
+       if (ret)
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl)) {
+               ret = PTR_ERR(ctrl);
+               goto done;
+       }
+
+       ctrl->fbsrType = FBSR_TYPE_DMA;
+       ctrl->hClient = fbsr->client.object.handle;
+       ctrl->hVidMem = fbsr->hmemory++;
+       ctrl->vidOffset = 0;
+       ctrl->sysOffset = fbsr->sys_offset;
+       ctrl->size = item->size;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+done:
+       nvkm_gsp_rm_free(&memlist);
+       if (ret)
+               return ret;
+
+       fbsr->sys_offset += item->size;
+       return 0;
+}
+
+static int
+fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
+{
+       NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fbsr->client.gsp;
+       struct nvkm_gsp_object memlist;
+       int ret;
+
+       ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+                          0, fbsr->size, sgt, &memlist);
+       if (ret)
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->fbsrType = FBSR_TYPE_DMA;
+       ctrl->numRegions = fbsr->regions;
+       ctrl->hClient = fbsr->client.object.handle;
+       ctrl->hSysMem = fbsr->hmemory++;
+       ctrl->gspFbAllocsSysOffset = items_size;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+       if (ret)
+               return ret;
+
+       nvkm_gsp_rm_free(&memlist);
+       return 0;
+}
+
+static bool
+fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size)
+{
+       struct fbsr_item *item;
+
+       if (!(item = kzalloc(sizeof(*item), GFP_KERNEL)))
+               return false;
+
+       item->type = type;
+       item->addr = addr;
+       item->size = size;
+       list_add_tail(&item->head, &fbsr->items);
+       return true;
+}
+
+static bool
+fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
+{
+       return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
+}
+
+static void
+r535_instmem_resume(struct nvkm_instmem *imem)
+{
+       /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
+       if (imem->rm.fbsr_valid) {
+               nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
+               imem->rm.fbsr_valid = false;
+       }
+}
+
+static int
+r535_instmem_suspend(struct nvkm_instmem *imem)
+{
+       struct nvkm_subdev *subdev = &imem->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_instobj *iobj;
+       struct fbsr fbsr = {};
+       struct fbsr_item *item, *temp;
+       u64 items_size;
+       int ret;
+
+       INIT_LIST_HEAD(&fbsr.items);
+       fbsr.hmemory = 0xcaf00003;
+
+       /* Create a list of all regions we need RM to save during suspend. */
+       list_for_each_entry(iobj, &imem->list, head) {
+               if (iobj->preserve) {
+                       if (!fbsr_inst(&fbsr, "inst", &iobj->memory))
+                               return -ENOMEM;
+               }
+       }
+
+       list_for_each_entry(iobj, &imem->boot, head) {
+               if (!fbsr_inst(&fbsr, "boot", &iobj->memory))
+                       return -ENOMEM;
+       }
+
+       if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size))
+               return -ENOMEM;
+
+       /* Determine memory requirements. */
+       list_for_each_entry(item, &fbsr.items, head) {
+               nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n",
+                          item->addr, item->size, item->type);
+               fbsr.size += item->size;
+               fbsr.regions++;
+       }
+
+       items_size = fbsr.size;
+       nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size);
+
+       fbsr.size += gsp->fb.rsvd_size;
+       fbsr.size += gsp->fb.bios.vga_workspace.size;
+       nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
+
+       ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
+       if (ret)
+               goto done;
+
+       /* Tell RM about the sysmem which will hold VRAM contents across suspend. */
+       ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device);
+       if (ret)
+               goto done_sgt;
+
+       ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
+       if (WARN_ON(ret))
+               goto done_sgt;
+
+       /* Send VRAM regions that need saving. */
+       list_for_each_entry(item, &fbsr.items, head) {
+               ret = fbsr_send(&fbsr, item);
+               if (WARN_ON(ret))
+                       goto done_sgt;
+       }
+
+       imem->rm.fbsr_valid = true;
+
+       /* Cleanup everything except the sysmem backup, which will be removed after resume. */
+done_sgt:
+       if (ret) /* ... unless we failed already. */
+               nvkm_gsp_sg_free(device, &imem->rm.fbsr);
+done:
+       list_for_each_entry_safe(item, temp, &fbsr.items, head) {
+               list_del(&item->head);
+               kfree(item);
+       }
+
+       nvkm_gsp_device_dtor(&fbsr.device);
+       nvkm_gsp_client_dtor(&fbsr.client);
+       return ret;
+}
+
+static void *
+r535_instmem_dtor(struct nvkm_instmem *imem)
+{
+       kfree(imem->func);
+       return imem;
+}
+
+int
+r535_instmem_new(const struct nvkm_instmem_func *hw,
+                struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                struct nvkm_instmem **pinstmem)
+{
+       struct nvkm_instmem_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_instmem_dtor;
+       rm->fini = hw->fini;
+       rm->suspend = r535_instmem_suspend;
+       rm->resume  = r535_instmem_resume;
+       rm->memory_new = hw->memory_new;
+       rm->memory_wrap = hw->memory_wrap;
+       rm->zero = false;
+
+       ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
new file mode 100644 (file)
index 0000000..621e5df
--- /dev/null
@@ -0,0 +1,550 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/fifo/priv.h>
+#include <engine/fifo/cgrp.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/chid.h>
+#include <engine/fifo/runl.h>
+
+#include <core/gpuobj.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+#include <engine/gr.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+
+static u32
+r535_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+       return (chan->cgrp->runl->id << 16) | chan->id;
+}
+
+static void
+r535_chan_stop(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_start(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_ramfc_clear(struct nvkm_chan *chan)
+{
+       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+
+       nvkm_gsp_rm_free(&chan->rm.object);
+
+       dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size,
+                         chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr);
+
+       nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx);
+}
+
+#define CHID_PER_USERD 8
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+       struct nvkm_engn *engn;
+       struct nvkm_device *device = fifo->engine.subdev.device;
+       NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+       const int userd_p = chan->id / CHID_PER_USERD;
+       const int userd_i = chan->id % CHID_PER_USERD;
+       u32 eT = ~0;
+       int ret;
+
+       if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+               ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+               if (ret)
+                       return ret;
+       }
+
+       nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+               eT = engn->id;
+               break;
+       }
+
+       if (WARN_ON(eT == ~0))
+               return -EINVAL;
+
+       chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+                                                 fifo->rm.mthdbuf_size,
+                                                 &chan->rm.mthdbuf.addr, GFP_KERNEL);
+       if (!chan->rm.mthdbuf.ptr)
+               return -ENOMEM;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
+                                    fifo->func->chan.user.oclass, sizeof(*args),
+                                    &chan->rm.object);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->gpFifoOffset = offset;
+       args->gpFifoEntries = length / 8;
+
+       args->flags  = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+       args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+       args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
+       if (!priv)
+               args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+       else
+               args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+       args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+       args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+       args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+       args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+       args->hVASpace = chan->vmm->rm.object.handle;
+       args->engineType = eT;
+
+       args->instanceMem.base = chan->inst->addr;
+       args->instanceMem.size = chan->inst->size;
+       args->instanceMem.addressSpace = 2;
+       args->instanceMem.cacheAttrib = 1;
+
+       args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+       args->userdMem.size = fifo->func->chan.func->userd->size;
+       args->userdMem.addressSpace = 2;
+       args->userdMem.cacheAttrib = 1;
+
+       args->ramfcMem.base = chan->inst->addr + 0;
+       args->ramfcMem.size = 0x200;
+       args->ramfcMem.addressSpace = 2;
+       args->ramfcMem.cacheAttrib = 1;
+
+       args->mthdbufMem.base = chan->rm.mthdbuf.addr;
+       args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+       args->mthdbufMem.addressSpace = 1;
+       args->mthdbufMem.cacheAttrib = 0;
+
+       if (!priv)
+               args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+       else
+               args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+       args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+       args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+       ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+       if (ret)
+               return ret;
+
+       if (1) {
+               NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl;
+
+               if (1) {
+                       NVA06F_CTRL_BIND_PARAMS *ctrl;
+
+                       ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+                                                   NVA06F_CTRL_CMD_BIND, sizeof(*ctrl));
+                       if (WARN_ON(IS_ERR(ctrl)))
+                               return PTR_ERR(ctrl);
+
+                       ctrl->engineType = eT;
+
+                       ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+                       if (ret)
+                               return ret;
+               }
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+                                           NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl));
+               if (WARN_ON(IS_ERR(ctrl)))
+                       return PTR_ERR(ctrl);
+
+               ctrl->bEnable = 1;
+               ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+       }
+
+       return ret;
+}
+
+static const struct nvkm_chan_func_ramfc
+r535_chan_ramfc = {
+       .write = r535_chan_ramfc_write,
+       .clear = r535_chan_ramfc_clear,
+       .devm = 0xfff,
+       .priv = true,
+};
+
+static const struct nvkm_chan_func
+r535_chan = {
+       .inst = &gf100_chan_inst,
+       .userd = &gv100_chan_userd,
+       .ramfc = &r535_chan_ramfc,
+       .start = r535_chan_start,
+       .stop = r535_chan_stop,
+       .doorbell_handle = r535_chan_doorbell_handle,
+};
+
+static const struct nvkm_cgrp_func
+r535_cgrp = {
+};
+
+static int
+r535_engn_nonstall(struct nvkm_engn *engn)
+{
+       struct nvkm_subdev *subdev = &engn->engine->subdev;
+       int ret;
+
+       ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
+       WARN_ON(ret == -ENOENT);
+       return ret;
+}
+
+static const struct nvkm_engn_func
+r535_ce = {
+       .nonstall = r535_engn_nonstall,
+};
+
+static int
+r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       /* RM requires GR context buffers to remain mapped until after the
+        * channel has been destroyed (as opposed to after the last gr obj
+        * has been deleted).
+        *
+        * Take an extra ref here, which will be released once the channel
+        * object has been deleted.
+        */
+       refcount_inc(&vctx->refs);
+       chan->rm.grctx = vctx;
+       return 0;
+}
+
+static const struct nvkm_engn_func
+r535_gr = {
+       .nonstall = r535_engn_nonstall,
+       .ctor2 = r535_gr_ctor,
+};
+
+static int
+r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       struct nvkm_gsp_client *client = &chan->vmm->rm.client;
+       NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice,
+                                   NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->hClient = client->object.handle;
+       ctrl->hObject = chan->rm.object.handle;
+       ctrl->hChanClient = client->object.handle;
+       ctrl->virtAddress = vctx->vma->addr;
+       ctrl->size = vctx->inst->size;
+       ctrl->engineType = engn->id;
+       ctrl->ChID = chan->id;
+
+       return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       int ret;
+
+       if (WARN_ON(!engn->rm.size))
+               return -EINVAL;
+
+       ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL,
+                             &vctx->inst);
+       if (ret)
+               return ret;
+
+       ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
+       if (ret)
+               return ret;
+
+       return r535_flcn_bind(engn, vctx, chan);
+}
+
+static const struct nvkm_engn_func
+r535_flcn = {
+       .nonstall = r535_engn_nonstall,
+       .ctor2 = r535_flcn_ctor,
+};
+
+static void
+r535_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static void
+r535_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static const struct nvkm_runl_func
+r535_runl = {
+       .block = r535_runl_block,
+       .allow = r535_runl_allow,
+};
+
+static int
+r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
+{
+       switch (type) {
+       case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
+       case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
+       case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
+       case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
+       case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
+       case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
+       case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
+       case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
+       default:
+               break;
+       }
+
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+static int
+r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
+{
+       switch (rm) {
+       case RM_ENGINE_TYPE_GR0:
+               *ptype = NVKM_ENGINE_GR;
+               return 0;
+       case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
+               *ptype = NVKM_ENGINE_CE;
+               return rm - RM_ENGINE_TYPE_COPY0;
+       case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
+               *ptype = NVKM_ENGINE_NVDEC;
+               return rm - RM_ENGINE_TYPE_NVDEC0;
+       case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
+               *ptype = NVKM_ENGINE_NVENC;
+               return rm - RM_ENGINE_TYPE_NVENC0;
+       case RM_ENGINE_TYPE_SW:
+               *ptype = NVKM_ENGINE_SW;
+               return 0;
+       case RM_ENGINE_TYPE_SEC2:
+               *ptype = NVKM_ENGINE_SEC2;
+               return 0;
+       case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
+               *ptype = NVKM_ENGINE_NVJPG;
+               return rm - RM_ENGINE_TYPE_NVJPEG0;
+       case RM_ENGINE_TYPE_OFA:
+               *ptype = NVKM_ENGINE_OFA;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int
+r535_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+       NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+       struct nvkm_runl *runl;
+       struct nvkm_engn *engn;
+
+       ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO,
+                                  sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+               nvkm_runl_foreach(runl, fifo) {
+                       nvkm_runl_foreach_engn(engn, runl) {
+                               if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+                                       engn->rm.size =
+                                               ctrl->constructedFalconsTable[i].ctxBufferSize;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       return 0;
+}
+
+static int
+r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+       struct nvkm_subdev *subdev = &fifo->engine.subdev;
+       struct nvkm_gsp *gsp = subdev->device->gsp;
+       struct nvkm_runl *runl;
+       struct nvkm_engn *engn;
+       u32 cgids = 2048;
+       u32 chids = 2048;
+       int ret;
+       NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
+
+       if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
+           (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       for (int i = 0; i < ctrl->numEntries; i++) {
+               const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+               const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+
+               runl = nvkm_runl_get(fifo, id, addr);
+               if (!runl) {
+                       runl = nvkm_runl_new(fifo, id, addr, 0);
+                       if (WARN_ON(IS_ERR(runl)))
+                               continue;
+               }
+       }
+
+       for (int i = 0; i < ctrl->numEntries; i++) {
+               const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+               const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE];
+               const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+               enum nvkm_subdev_type type;
+               int inst, nv2080;
+
+               runl = nvkm_runl_get(fifo, id, addr);
+               if (!runl)
+                       continue;
+
+               inst = r535_fifo_engn_type(rmid, &type);
+               if (inst < 0) {
+                       nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               nv2080 = r535_fifo_2080_type(type, inst);
+               if (nv2080 < 0) {
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               switch (type) {
+               case NVKM_ENGINE_CE:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
+                       break;
+               case NVKM_ENGINE_GR:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
+                       break;
+               case NVKM_ENGINE_NVDEC:
+               case NVKM_ENGINE_NVENC:
+               case NVKM_ENGINE_NVJPG:
+               case NVKM_ENGINE_OFA:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst);
+                       break;
+               case NVKM_ENGINE_SW:
+                       continue;
+               default:
+                       engn = NULL;
+                       break;
+               }
+
+               if (!engn) {
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC];
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+
+       {
+               NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                          NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               fifo->rm.mthdbuf_size = ctrl->size;
+
+               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       }
+
+       return r535_fifo_ectx_size(fifo);
+}
+
+static void
+r535_fifo_dtor(struct nvkm_fifo *fifo)
+{
+       kfree(fifo->func);
+}
+
+int
+r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
+{
+       struct nvkm_fifo_func *rm;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_fifo_dtor;
+       rm->runl_ctor = r535_fifo_runl_ctor;
+       rm->runl = &r535_runl;
+       rm->cgrp = hw->cgrp;
+       rm->cgrp.func = &r535_cgrp;
+       rm->chan = hw->chan;
+       rm->chan.func = &r535_chan;
+       rm->nonstall = &ga100_fifo_nonstall;
+       rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
+
+       return nvkm_fifo_new_(rm, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
new file mode 100644 (file)
index 0000000..37bde54
--- /dev/null
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/gr/gf100.h>
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr {
+       struct nvkm_gr base;
+
+       struct {
+               u16 bufferId;
+               u32 size;
+               u8  page;
+               u8  align;
+               bool global;
+               bool init;
+               bool ro;
+       } ctxbuf[R515_GR_MAX_CTXBUFS];
+       int ctxbuf_nr;
+
+       struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr_chan {
+       struct nvkm_object object;
+       struct r535_gr *gr;
+
+       struct nvkm_vmm *vmm;
+       struct nvkm_chan *chan;
+
+       struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+       struct nvkm_vma    *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_gr_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_gr_obj = {
+       .dtor = r535_gr_obj_dtor,
+};
+
+static int
+r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+       struct r535_gr_obj *obj;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
+                                &obj->rm);
+}
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+       struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+       struct r535_gr *gr = grc->gr;
+
+       for (int i = 0; i < gr->ctxbuf_nr; i++) {
+               nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+               nvkm_memory_unref(&grc->mem[i]);
+       }
+
+       nvkm_vmm_unref(&grc->vmm);
+       return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+       .dtor = r535_gr_chan_dtor,
+};
+
+static int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+                   struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+                   struct nvkm_gsp_object *chan)
+{
+       struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+                                   NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       ctrl->engineType = 1;
+       ctrl->hChanClient = vmm->rm.client.object.handle;
+       ctrl->hObject = chan->handle;
+
+       for (int i = 0; i < gr->ctxbuf_nr; i++) {
+               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+                       &ctrl->promoteEntry[ctrl->entryCount];
+               const bool alloc = golden || !gr->ctxbuf[i].global;
+               int ret;
+
+               entry->bufferId = gr->ctxbuf[i].bufferId;
+               entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+               if (alloc) {
+                       ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+                                             NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+                                             gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+                                             gr->ctxbuf[i].init, &pmem[i]);
+                       if (WARN_ON(ret))
+                               return ret;
+
+                       if (gr->ctxbuf[i].bufferId ==
+                                       NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+                               entry->bNonmapped = 1;
+               } else {
+                       if (gr->ctxbuf[i].bufferId ==
+                               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+                               continue;
+
+                       pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+               }
+
+               if (!entry->bNonmapped) {
+                       struct gf100_vmm_map_v0 args = {
+                               .priv = 1,
+                               .ro   = gr->ctxbuf[i].ro,
+                       };
+
+                       mutex_lock(&vmm->mutex.vmm);
+                       ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+                                                 nvkm_memory_size(pmem[i]), &pvma[i]);
+                       mutex_unlock(&vmm->mutex.vmm);
+                       if (ret)
+                               return ret;
+
+                       ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+                       if (ret)
+                               return ret;
+
+                       entry->gpuVirtAddr = pvma[i]->addr;
+               }
+
+               if (entry->bInitialize) {
+                       entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+                       entry->size = gr->ctxbuf[i].size;
+                       entry->physAttr = 4;
+               }
+
+               nvkm_debug(subdev,
+                          "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+                          entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+                          entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+               ctrl->entryCount++;
+       }
+
+       return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+                struct nvkm_object **pobject)
+{
+       struct r535_gr *gr = r535_gr(base);
+       struct r535_gr_chan *grc;
+       int ret;
+
+       if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+       grc->gr = gr;
+       grc->vmm = nvkm_vmm_ref(chan->vmm);
+       grc->chan = chan;
+       *pobject = &grc->object;
+
+       ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+       struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+       return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+static int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+       NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+       struct r535_gr *gr = container_of(base, typeof(*gr), base);
+       struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_mmu *mmu = device->mmu;
+       struct {
+               struct nvkm_memory *inst;
+               struct nvkm_vmm *vmm;
+               struct nvkm_gsp_object chan;
+               struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+       } golden = {};
+       int ret;
+
+       /* Allocate a channel to use for golden context init. */
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+       if (ret)
+               goto done;
+
+       ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+       if (ret)
+               goto done;
+
+       ret = mmu->func->promote_vmm(golden.vmm);
+       if (ret)
+               goto done;
+
+       {
+               NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+               args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
+                                            device->fifo->func->chan.user.oclass,
+                                            sizeof(*args), &golden.chan);
+               if (IS_ERR(args)) {
+                       ret = PTR_ERR(args);
+                       goto done;
+               }
+
+               args->gpFifoOffset = 0;
+               args->gpFifoEntries = 0x1000 / 8;
+               args->flags =
+                       NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
+                       NVDEF(NVOS04, FLAGS, VPR, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
+                       NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
+                       NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
+                       NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
+                       NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
+                       NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
+                       NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
+                       NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
+                       NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
+                       NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+               args->hVASpace = golden.vmm->rm.object.handle;
+               args->engineType = 1;
+               args->instanceMem.base = nvkm_memory_addr(golden.inst);
+               args->instanceMem.size = 0x1000;
+               args->instanceMem.addressSpace = 2;
+               args->instanceMem.cacheAttrib = 1;
+               args->ramfcMem.base = nvkm_memory_addr(golden.inst);
+               args->ramfcMem.size = 0x200;
+               args->ramfcMem.addressSpace = 2;
+               args->ramfcMem.cacheAttrib = 1;
+               args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
+               args->userdMem.size = 0x200;
+               args->userdMem.addressSpace = 2;
+               args->userdMem.cacheAttrib = 1;
+               args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
+               args->mthdbufMem.size = 0x5000;
+               args->mthdbufMem.addressSpace = 2;
+               args->mthdbufMem.cacheAttrib = 1;
+               args->internalFlags =
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+               ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
+               if (ret)
+                       goto done;
+       }
+
+       /* Fetch context buffer info from RM and allocate each of them here to use
+        * during golden context init (or later as a global context buffer).
+        *
+        * Also build the information that'll be used to create channel contexts.
+        */
+       info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+                                  sizeof(*info));
+       if (WARN_ON(IS_ERR(info))) {
+               ret = PTR_ERR(info);
+               goto done;
+       }
+
+       for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
+               static const struct {
+                       u32     id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+                       u32     id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+                       bool global;
+                       bool   init;
+                       bool     ro;
+               } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+                       .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+                       .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+                       /*                                       global   init     ro */
+                       _A(           GRAPHICS,             MAIN, false,  true, false),
+                       _B(                                PATCH, false,  true, false),
+                       _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB,  true, false, false),
+                       _B(                             PAGEPOOL,  true, false, false),
+                       _B(                         ATTRIBUTE_CB,  true, false, false),
+                       _B(                        RTV_CB_GLOBAL,  true, false, false),
+                       _B(                           FECS_EVENT,  true,  true, false),
+                       _B(                      PRIV_ACCESS_MAP,  true,  true,  true),
+#undef _B
+#undef _A
+               };
+               u32 size = info->engineContextBuffersInfo[0].engine[i].size;
+               u8 align, page;
+               int id;
+
+               for (id = 0; id < ARRAY_SIZE(map); id++) {
+                       if (map[id].id0 == i)
+                               break;
+               }
+
+               nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+                          size, (id < ARRAY_SIZE(map)) ? "*" : "");
+               if (id >= ARRAY_SIZE(map))
+                       continue;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+                       size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+               if      (size >= 1 << 21) page = 21;
+               else if (size >= 1 << 16) page = 16;
+               else                      page = 12;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+                       align = order_base_2(size);
+               else
+                       align = page;
+
+               if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+                       continue;
+
+               gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+               gr->ctxbuf[gr->ctxbuf_nr].size     = size;
+               gr->ctxbuf[gr->ctxbuf_nr].page     = page;
+               gr->ctxbuf[gr->ctxbuf_nr].align    = align;
+               gr->ctxbuf[gr->ctxbuf_nr].global   = map[id].global;
+               gr->ctxbuf[gr->ctxbuf_nr].init     = map[id].init;
+               gr->ctxbuf[gr->ctxbuf_nr].ro       = map[id].ro;
+               gr->ctxbuf_nr++;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+                       if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+                               continue;
+
+                       gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+                       gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+                               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+                       gr->ctxbuf_nr++;
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+
+       /* Promote golden context to RM. */
+       ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+       if (ret)
+               goto done;
+
+       /* Allocate 3D class on channel to trigger golden context init in RM. */
+       {
+               int i;
+
+               for (i = 0; gr->base.func->sclass[i].ctor; i++) {
+                       if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
+                               struct nvkm_gsp_object threed;
+
+                               ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
+                                                       gr->base.func->sclass[i].oclass, 0,
+                                                       &threed);
+                               if (ret)
+                                       goto done;
+
+                               nvkm_gsp_rm_free(&threed);
+                               break;
+                       }
+               }
+
+               if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+       }
+
+done:
+       nvkm_gsp_rm_free(&golden.chan);
+       for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+               nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+       nvkm_vmm_unref(&golden.vmm);
+       nvkm_memory_unref(&golden.inst);
+       return ret;
+
+}
+
+static void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+       struct r535_gr *gr = r535_gr(base);
+
+       while (gr->ctxbuf_nr)
+               nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+       kfree(gr->base.func);
+       return gr;
+}
+
+int
+r535_gr_new(const struct gf100_gr_func *hw,
+           struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
+{
+       struct nvkm_gr_func *rm;
+       struct r535_gr *gr;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_gr_dtor;
+       rm->oneinit = r535_gr_oneinit;
+       rm->units = r535_gr_units;
+       rm->chan_new = r535_gr_chan_new;
+
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_gr_obj_ctor;
+       }
+
+       if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       *pgr = &gr->base;
+
+       return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
new file mode 100644 (file)
index 0000000..f42879b
--- /dev/null
@@ -0,0 +1,2252 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "priv.h"
+
+#include <core/pci.h>
+#include <subdev/timer.h>
+#include <subdev/vfn.h>
+#include <engine/fifo/chan.h>
+#include <engine/sec2.h>
+#include <nvif/log.h>
+
+#include <nvfw/fw.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
+#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
+#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+
+#include <linux/acpi.h>
+#include <linux/ctype.h>
+#include <linux/parser.h>
+
+extern struct dentry *nouveau_debugfs_root;
+
+const struct nvkm_gsp_rm
+r535_gsp_rm = {
+       .api = &r535_rm,
+};
+
+static void
+r535_gsp_msgq_work(struct work_struct *work)
+{
+       struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work);
+
+       mutex_lock(&gsp->cmdq.mutex);
+       if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+               r535_gsp_msg_recv(gsp, 0, 0);
+       mutex_unlock(&gsp->cmdq.mutex);
+}
+
+static irqreturn_t
+r535_gsp_intr(struct nvkm_inth *inth)
+{
+       struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth);
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008);
+       u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 +
+                                                 gsp->falcon.func->riscv_irqmask);
+       u32 stat = intr & inte;
+
+       if (!stat) {
+               nvkm_debug(subdev, "inte %08x %08x\n", intr, inte);
+               return IRQ_NONE;
+       }
+
+       if (stat & 0x00000040) {
+               nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040);
+               schedule_work(&gsp->msgq.work);
+               stat &= ~0x00000040;
+       }
+
+       if (stat) {
+               nvkm_error(subdev, "intr %08x\n", stat);
+               nvkm_falcon_wr32(&gsp->falcon, 0x014, stat);
+               nvkm_falcon_wr32(&gsp->falcon, 0x004, stat);
+       }
+
+       nvkm_falcon_intr_retrigger(&gsp->falcon);
+       return IRQ_HANDLED;
+}
+
+static int
+r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
+{
+       NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+       int ret = 0;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+       if (WARN_ON(ret)) {
+               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+               return ret;
+       }
+
+       for (unsigned i = 0; i < ctrl->tableLen; i++) {
+               enum nvkm_subdev_type type;
+               int inst;
+
+               nvkm_debug(&gsp->subdev,
+                          "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i,
+                          ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
+                          ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
+
+               switch (ctrl->table[i].engineIdx) {
+               case MC_ENGINE_IDX_GSP:
+                       type = NVKM_SUBDEV_GSP;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_DISP:
+                       type = NVKM_ENGINE_DISP;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+                       type = NVKM_ENGINE_CE;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
+                       break;
+               case MC_ENGINE_IDX_GR0:
+                       type = NVKM_ENGINE_GR;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+                       type = NVKM_ENGINE_NVDEC;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
+                       break;
+               case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+                       type = NVKM_ENGINE_NVENC;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
+                       break;
+               case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+                       type = NVKM_ENGINE_NVJPG;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
+                       break;
+               case MC_ENGINE_IDX_OFA0:
+                       type = NVKM_ENGINE_OFA;
+                       inst = 0;
+                       break;
+               default:
+                       continue;
+               }
+
+               if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
+                       ret = -ENOSPC;
+                       break;
+               }
+
+               gsp->intr[gsp->intr_nr].type = type;
+               gsp->intr[gsp->intr_nr].inst = inst;
+               gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall;
+               gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall;
+               gsp->intr_nr++;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       return ret;
+}
+
+static int
+r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+{
+       GspStaticConfigInfo *rpc;
+       int last_usable = -1;
+
+       rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       gsp->internal.client.object.client = &gsp->internal.client;
+       gsp->internal.client.object.parent = NULL;
+       gsp->internal.client.object.handle = rpc->hInternalClient;
+       gsp->internal.client.gsp = gsp;
+
+       gsp->internal.device.object.client = &gsp->internal.client;
+       gsp->internal.device.object.parent = &gsp->internal.client.object;
+       gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+       gsp->internal.device.subdevice.client = &gsp->internal.client;
+       gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+       gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+       gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+       gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+       for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
+               NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
+                       &rpc->fbRegionInfoParams.fbRegion[i];
+
+               nvkm_debug(&gsp->subdev, "fb region %d: "
+                          "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
+                          reg->base, reg->limit, reg->reserved, reg->performance,
+                          reg->supportCompressed, reg->supportISO, reg->bProtected);
+
+               if (!reg->reserved && !reg->bProtected) {
+                       if (reg->supportCompressed && reg->supportISO &&
+                           !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) {
+                                       const u64 size = (reg->limit + 1) - reg->base;
+
+                                       gsp->fb.region[gsp->fb.region_nr].addr = reg->base;
+                                       gsp->fb.region[gsp->fb.region_nr].size = size;
+                                       gsp->fb.region_nr++;
+                       }
+
+                       last_usable = i;
+               }
+       }
+
+       if (last_usable >= 0) {
+               u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+
+               gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
+       }
+
+       for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
+               if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
+                       gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask);
+                       gsp->gr.gpcs++;
+               }
+       }
+
+       nvkm_gsp_rpc_done(gsp, rpc);
+       return 0;
+}
+
+static void
+nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
+{
+       if (mem->data) {
+               /*
+                * Poison the buffer to catch any unexpected access from
+                * GSP-RM if the buffer was prematurely freed.
+                */
+               memset(mem->data, 0xFF, mem->size);
+
+               dma_free_coherent(mem->dev, mem->size, mem->data, mem->addr);
+               put_device(mem->dev);
+
+               memset(mem, 0, sizeof(*mem));
+       }
+}
+
+/**
+ * nvkm_gsp_mem_ctor - constructor for nvkm_gsp_mem objects
+ * @gsp: gsp pointer
+ * @size: number of bytes to allocate
+ * @mem: nvkm_gsp_mem object to initialize
+ *
+ * Allocates a block of memory for use with GSP.
+ *
+ * This memory block can potentially out-live the driver's remove() callback,
+ * so we take a device reference to ensure its lifetime. The reference is
+ * dropped in the destructor.
+ */
+static int
+nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
+{
+       mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
+       if (WARN_ON(!mem->data))
+               return -ENOMEM;
+
+       mem->size = size;
+       mem->dev = get_device(gsp->subdev.device->dev);
+
+       return 0;
+}
+
+static int
+r535_gsp_postinit(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       int ret;
+
+       ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work);
+
+       ret = r535_gsp_intr_get_table(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst);
+       if (WARN_ON(ret < 0))
+               return ret;
+
+       ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev,
+                           r535_gsp_intr, &gsp->subdev.inth);
+       if (WARN_ON(ret))
+               return ret;
+
+       nvkm_inth_allow(&gsp->subdev.inth);
+       nvkm_wr32(device, 0x110004, 0x00000040);
+
+       /* Release the DMA buffers that were needed only for boot and init */
+       nvkm_gsp_mem_dtor(&gsp->boot.fw);
+       nvkm_gsp_mem_dtor(&gsp->libos);
+
+       return ret;
+}
+
+static int
+r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
+{
+       rpc_unloading_guest_driver_v1F_07 *rpc;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       if (suspend) {
+               rpc->bInPMTransition = 1;
+               rpc->bGc6Entering = 0;
+               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+       } else {
+               rpc->bInPMTransition = 0;
+               rpc->bGc6Entering = 0;
+               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
+       }
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+}
+
+enum registry_type {
+       REGISTRY_TABLE_ENTRY_TYPE_DWORD  = 1, /* 32-bit unsigned integer */
+       REGISTRY_TABLE_ENTRY_TYPE_BINARY = 2, /* Binary blob */
+       REGISTRY_TABLE_ENTRY_TYPE_STRING = 3, /* Null-terminated string */
+};
+
+/* An arbitrary limit to the length of a registry key */
+#define REGISTRY_MAX_KEY_LENGTH                64
+
+/**
+ * struct registry_list_entry - linked list member for a registry key/value
+ * @head: list_head struct
+ * @type: dword, binary, or string
+ * @klen: the length of name of the key
+ * @vlen: the length of the value
+ * @key: the key name
+ * @dword: the data, if REGISTRY_TABLE_ENTRY_TYPE_DWORD
+ * @binary: the data, if TYPE_BINARY or TYPE_STRING
+ *
+ * Every registry key/value is represented internally by this struct.
+ *
+ * Type DWORD is a simple 32-bit unsigned integer, and its value is stored in
+ * @dword.
+ *
+ * Types BINARY and STRING are variable-length binary blobs.  The only real
+ * difference between BINARY and STRING is that STRING is null-terminated and
+ * is expected to contain only printable characters.
+ *
+ * Note: it is technically possible to have multiple keys with the same name
+ * but different types, but this is not useful since GSP-RM expects keys to
+ * have only one specific type.
+ */
+struct registry_list_entry {
+       struct list_head head;
+       enum registry_type type;
+       size_t klen;
+       char key[REGISTRY_MAX_KEY_LENGTH];
+       size_t vlen;
+       u32 dword;                      /* TYPE_DWORD */
+       u8 binary[] __counted_by(vlen); /* TYPE_BINARY or TYPE_STRING */
+};
+
+/**
+ * add_registry -- adds a registry entry
+ * @gsp: gsp pointer
+ * @key: name of the registry key
+ * @type: type of data
+ * @data: pointer to value
+ * @length: size of data, in bytes
+ *
+ * Adds a registry key/value pair to the registry database.
+ *
+ * This function collects the registry information in a linked list.  After
+ * all registry keys have been added, build_registry() is used to create the
+ * RPC data structure.
+ *
+ * registry_rpc_size is a running total of the size of all registry keys.
+ * It's used to avoid an O(n) calculation of the size when the RPC is built.
+ *
+ * Returns 0 on success, or negative error code on error.
+ */
+static int add_registry(struct nvkm_gsp *gsp, const char *key,
+                       enum registry_type type, const void *data, size_t length)
+{
+       struct registry_list_entry *reg;
+       const size_t nlen = strnlen(key, REGISTRY_MAX_KEY_LENGTH) + 1;
+       size_t alloc_size; /* extra bytes to alloc for binary or string value */
+
+       if (nlen > REGISTRY_MAX_KEY_LENGTH)
+               return -EINVAL;
+
+       alloc_size = (type == REGISTRY_TABLE_ENTRY_TYPE_DWORD) ? 0 : length;
+
+       reg = kmalloc(sizeof(*reg) + alloc_size, GFP_KERNEL);
+       if (!reg)
+               return -ENOMEM;
+
+       switch (type) {
+       case REGISTRY_TABLE_ENTRY_TYPE_DWORD:
+               reg->dword = *(const u32 *)(data);
+               break;
+       case REGISTRY_TABLE_ENTRY_TYPE_BINARY:
+       case REGISTRY_TABLE_ENTRY_TYPE_STRING:
+               memcpy(reg->binary, data, alloc_size);
+               break;
+       default:
+               nvkm_error(&gsp->subdev, "unrecognized registry type %u for '%s'\n",
+                          type, key);
+               kfree(reg);
+               return -EINVAL;
+       }
+
+       memcpy(reg->key, key, nlen);
+       reg->klen = nlen;
+       reg->vlen = length;
+       reg->type = type;
+
+       list_add_tail(&reg->head, &gsp->registry_list);
+       gsp->registry_rpc_size += sizeof(PACKED_REGISTRY_ENTRY) + nlen + alloc_size;
+
+       return 0;
+}
+
+static int add_registry_num(struct nvkm_gsp *gsp, const char *key, u32 value)
+{
+       return add_registry(gsp, key, REGISTRY_TABLE_ENTRY_TYPE_DWORD,
+                           &value, sizeof(u32));
+}
+
+static int add_registry_string(struct nvkm_gsp *gsp, const char *key, const char *value)
+{
+       return add_registry(gsp, key, REGISTRY_TABLE_ENTRY_TYPE_STRING,
+                           value, strlen(value) + 1);
+}
+
+/**
+ * build_registry -- create the registry RPC data
+ * @gsp: gsp pointer
+ * @registry: pointer to the RPC payload to fill
+ *
+ * After all registry key/value pairs have been added, call this function to
+ * build the RPC.
+ *
+ * The registry RPC looks like this:
+ *
+ * +-----------------+
+ * |NvU32 size;      |
+ * |NvU32 numEntries;|
+ * +-----------------+
+ * +----------------------------------------+
+ * |PACKED_REGISTRY_ENTRY                   |
+ * +----------------------------------------+
+ * |Null-terminated key (string) for entry 0|
+ * +----------------------------------------+
+ * |Binary/string data value for entry 0    | (only if necessary)
+ * +----------------------------------------+
+ *
+ * +----------------------------------------+
+ * |PACKED_REGISTRY_ENTRY                   |
+ * +----------------------------------------+
+ * |Null-terminated key (string) for entry 1|
+ * +----------------------------------------+
+ * |Binary/string data value for entry 1    | (only if necessary)
+ * +----------------------------------------+
+ * ... (and so on, one copy for each entry)
+ *
+ *
+ * The 'data' field of an entry is either a 32-bit integer (for type DWORD)
+ * or an offset into the PACKED_REGISTRY_TABLE (for types BINARY and STRING).
+ *
+ * All memory allocated by add_registry() is released.
+ */
+static void build_registry(struct nvkm_gsp *gsp, PACKED_REGISTRY_TABLE *registry)
+{
+       struct registry_list_entry *reg, *n;
+       size_t str_offset;
+       unsigned int i = 0;
+
+       registry->numEntries = list_count_nodes(&gsp->registry_list);
+       str_offset = struct_size(registry, entries, registry->numEntries);
+
+       list_for_each_entry_safe(reg, n, &gsp->registry_list, head) {
+               registry->entries[i].type = reg->type;
+               registry->entries[i].length = reg->vlen;
+
+               /* Append the key name to the table */
+               registry->entries[i].nameOffset = str_offset;
+               memcpy((void *)registry + str_offset, reg->key, reg->klen);
+               str_offset += reg->klen;
+
+               switch (reg->type) {
+               case REGISTRY_TABLE_ENTRY_TYPE_DWORD:
+                       registry->entries[i].data = reg->dword;
+                       break;
+               case REGISTRY_TABLE_ENTRY_TYPE_BINARY:
+               case REGISTRY_TABLE_ENTRY_TYPE_STRING:
+                       /* If the type is binary or string, also append the value */
+                       memcpy((void *)registry + str_offset, reg->binary, reg->vlen);
+                       registry->entries[i].data = str_offset;
+                       str_offset += reg->vlen;
+                       break;
+               default:
+                       break;
+               }
+
+               i++;
+               list_del(&reg->head);
+               kfree(reg);
+       }
+
+       /* Double-check that we calculated the sizes correctly */
+       WARN_ON(gsp->registry_rpc_size != str_offset);
+
+       registry->size = gsp->registry_rpc_size;
+}
+
+/**
+ * clean_registry -- clean up registry memory in case of error
+ * @gsp: gsp pointer
+ *
+ * Call this function to clean up all memory allocated by add_registry()
+ * in case of error and build_registry() is not called.
+ */
+static void clean_registry(struct nvkm_gsp *gsp)
+{
+       struct registry_list_entry *reg, *n;
+
+       list_for_each_entry_safe(reg, n, &gsp->registry_list, head) {
+               list_del(&reg->head);
+               kfree(reg);
+       }
+
+       gsp->registry_rpc_size = sizeof(PACKED_REGISTRY_TABLE);
+}
+
+MODULE_PARM_DESC(NVreg_RegistryDwords,
+                "A semicolon-separated list of key=integer pairs of GSP-RM registry keys");
+static char *NVreg_RegistryDwords;
+module_param(NVreg_RegistryDwords, charp, 0400);
+
+/* dword only */
+struct nv_gsp_registry_entries {
+       const char *name;
+       u32 value;
+};
+
+/*
+ * r535_registry_entries - required registry entries for GSP-RM
+ *
+ * This array lists registry entries that are required for GSP-RM to
+ * function correctly.
+ *
+ * RMSecBusResetEnable - enables PCI secondary bus reset
+ * RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration
+ *   registers on any PCI reset.
+ */
+static const struct nv_gsp_registry_entries r535_registry_entries[] = {
+       { "RMSecBusResetEnable", 1 },
+       { "RMForcePcieConfigSave", 1 },
+};
+#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
+
+/**
+ * strip - strips all characters in 'reject' from 's'
+ * @s: string to strip
+ * @reject: string of characters to remove
+ *
+ * 's' is modified.
+ *
+ * Returns the length of the new string.
+ */
+static size_t strip(char *s, const char *reject)
+{
+       char *p = s, *p2 = s;
+       size_t length = 0;
+       char c;
+
+       do {
+               while ((c = *p2) && strchr(reject, c))
+                       p2++;
+
+               *p++ = c = *p2++;
+               length++;
+       } while (c);
+
+       return length;
+}
+
+/**
+ * r535_gsp_rpc_set_registry - build registry RPC and call GSP-RM
+ * @gsp: gsp pointer
+ *
+ * The GSP-RM registry is a set of key/value pairs that configure some aspects
+ * of GSP-RM. The keys are strings, and the values are 32-bit integers.
+ *
+ * The registry is built from a combination of a static hard-coded list (see
+ * above) and entries passed on the driver's command line.
+ */
+static int
+r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
+{
+       PACKED_REGISTRY_TABLE *rpc;
+       unsigned int i;
+       int ret;
+
+       INIT_LIST_HEAD(&gsp->registry_list);
+       gsp->registry_rpc_size = sizeof(PACKED_REGISTRY_TABLE);
+
+       for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
+               ret = add_registry_num(gsp, r535_registry_entries[i].name,
+                                      r535_registry_entries[i].value);
+               if (ret)
+                       goto fail;
+       }
+
+       /*
+        * The NVreg_RegistryDwords parameter is a string of key=value
+        * pairs separated by semicolons. We need to extract and trim each
+        * substring, and then parse the substring to extract the key and
+        * value.
+        */
+       if (NVreg_RegistryDwords) {
+               char *p = kstrdup(NVreg_RegistryDwords, GFP_KERNEL);
+               char *start, *next = p, *equal;
+
+               if (!p) {
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+
+               /* Remove any whitespace from the parameter string */
+               strip(p, " \t\n");
+
+               while ((start = strsep(&next, ";"))) {
+                       long value;
+
+                       equal = strchr(start, '=');
+                       if (!equal || equal == start || equal[1] == 0) {
+                               nvkm_error(&gsp->subdev,
+                                          "ignoring invalid registry string '%s'\n",
+                                          start);
+                               continue;
+                       }
+
+                       /* Truncate the key=value string to just key */
+                       *equal = 0;
+
+                       ret = kstrtol(equal + 1, 0, &value);
+                       if (!ret) {
+                               ret = add_registry_num(gsp, start, value);
+                       } else {
+                               /* Not a number, so treat it as a string */
+                               ret = add_registry_string(gsp, start, equal + 1);
+                       }
+
+                       if (ret) {
+                               nvkm_error(&gsp->subdev,
+                                          "ignoring invalid registry key/value '%s=%s'\n",
+                                          start, equal + 1);
+                               continue;
+                       }
+               }
+
+               kfree(p);
+       }
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, gsp->registry_rpc_size);
+       if (IS_ERR(rpc)) {
+               ret = PTR_ERR(rpc);
+               goto fail;
+       }
+
+       build_registry(gsp, rpc);
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
+
+fail:
+       clean_registry(gsp);
+       return ret;
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+static void
+r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
+{
+       const guid_t NVOP_DSM_GUID =
+               GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
+                         0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
+       u64 NVOP_DSM_REV = 0x00000100;
+       union acpi_object argv4 = {
+               .buffer.type    = ACPI_TYPE_BUFFER,
+               .buffer.length  = 4,
+               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+       }, *obj;
+
+       caps->status = 0xffff;
+
+       if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a)))
+               return;
+
+       obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4);
+       if (!obj)
+               return;
+
+       if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+           WARN_ON(obj->buffer.length != 4))
+               return;
+
+       caps->status = 0;
+       caps->optimusCaps = *(u32 *)obj->buffer.pointer;
+
+       ACPI_FREE(obj);
+
+       kfree(argv4.buffer.pointer);
+}
+
+static void
+r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
+{
+       const guid_t JT_DSM_GUID =
+               GUID_INIT(0xCBECA351L, 0x067B, 0x4924,
+                         0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34);
+       u64 JT_DSM_REV = 0x00000103;
+       u32 caps;
+       union acpi_object argv4 = {
+               .buffer.type    = ACPI_TYPE_BUFFER,
+               .buffer.length  = sizeof(caps),
+               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+       }, *obj;
+
+       jt->status = 0xffff;
+
+       obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4);
+       if (!obj)
+               return;
+
+       if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+           WARN_ON(obj->buffer.length != 4))
+               return;
+
+       jt->status = 0;
+       jt->jtCaps = *(u32 *)obj->buffer.pointer;
+       jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
+       jt->bSBIOSCaps = 0;
+
+       ACPI_FREE(obj);
+
+       kfree(argv4.buffer.pointer);
+}
+
+static void
+r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
+                                                MUX_METHOD_DATA_ELEMENT *part)
+{
+       union acpi_object mux_arg = { ACPI_TYPE_INTEGER };
+       struct acpi_object_list input = { 1, &mux_arg };
+       acpi_handle iter = NULL, handle_mux = NULL;
+       acpi_status status;
+       unsigned long long value;
+
+       mode->status = 0xffff;
+       part->status = 0xffff;
+
+       do {
+               status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter);
+               if (ACPI_FAILURE(status) || !iter)
+                       return;
+
+               status = acpi_evaluate_integer(iter, "_ADR", NULL, &value);
+               if (ACPI_FAILURE(status) || value != id)
+                       continue;
+
+               handle_mux = iter;
+       } while (!handle_mux);
+
+       if (!handle_mux)
+               return;
+
+       /* I -think- 0 means "acquire" according to nvidia's driver source */
+       input.pointer->integer.type = ACPI_TYPE_INTEGER;
+       input.pointer->integer.value = 0;
+
+       status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value);
+       if (ACPI_SUCCESS(status)) {
+               mode->acpiId = id;
+               mode->mode   = value;
+               mode->status = 0;
+       }
+
+       status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value);
+       if (ACPI_SUCCESS(status)) {
+               part->acpiId = id;
+               part->mode   = value;
+               part->status = 0;
+       }
+}
+
+static void
+r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux)
+{
+       mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]);
+
+       for (int i = 0; i < mux->tableLen; i++) {
+               r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i],
+                                                                &mux->acpiIdMuxPartTable[i]);
+       }
+}
+
+static void
+r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
+{
+       acpi_status status;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *_DOD;
+
+       dod->status = 0xffff;
+
+       status = acpi_evaluate_object(handle, "_DOD", NULL, &output);
+       if (ACPI_FAILURE(status))
+               return;
+
+       _DOD = output.pointer;
+
+       if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) ||
+           WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList)))
+               return;
+
+       for (int i = 0; i < _DOD->package.count; i++) {
+               if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER))
+                       return;
+
+               dod->acpiIdList[i] = _DOD->package.elements[i].integer.value;
+               dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
+       }
+
+       dod->status = 0;
+       kfree(output.pointer);
+}
+#endif
+
+static void
+r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+       acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+       if (!handle)
+               return;
+
+       acpi->bValid = 1;
+
+       r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+       if (acpi->dodMethodData.status == 0)
+               r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData);
+
+       r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+       r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
+       GspSystemInfo *info;
+
+       if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+               return -ENOSYS;
+
+       info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+
+       info->gpuPhysAddr = device->func->resource_addr(device, 0);
+       info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
+       info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+       info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
+       info->maxUserVa = TASK_SIZE;
+       info->pciConfigMirrorBase = 0x088000;
+       info->pciConfigMirrorSize = 0x001000;
+       r535_gsp_acpi_info(gsp, &info->acpiMethodData);
+
+       return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
+}
+
+static int
+r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       rpc_os_error_log_v17_00 *msg = repv;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+
+       nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString);
+       return 0;
+}
+
+static int
+r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+       rpc_rc_triggered_v17_02 *msg = repv;
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_chan *chan;
+       unsigned long flags;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+
+       nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
+                  msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+                  msg->partitionAttributionId);
+
+       chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid, &flags);
+       if (!chan) {
+               nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
+               return 0;
+       }
+
+       nvkm_chan_error(chan, false);
+       nvkm_chan_put(&chan, flags);
+       return 0;
+}
+
+static int
+r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+
+       WARN_ON(repc != 0);
+
+       nvkm_error(subdev, "mmu fault queued\n");
+       return 0;
+}
+
+static int
+r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_gsp_client *client;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       rpc_post_event_v17_00 *msg = repv;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+       if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize))
+               return -EINVAL;
+
+       nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n",
+                  msg->hClient, msg->hEvent, msg->notifyIndex, msg->data,
+                  msg->status, msg->eventDataSize, msg->bNotifyList);
+
+       mutex_lock(&gsp->client_id.mutex);
+       client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff);
+       if (client) {
+               struct nvkm_gsp_event *event;
+               bool handled = false;
+
+               list_for_each_entry(event, &client->events, head) {
+                       if (event->object.handle == msg->hEvent) {
+                               event->func(event, msg->eventData, msg->eventDataSize);
+                               handled = true;
+                       }
+               }
+
+               if (!handled) {
+                       nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n",
+                                  msg->hClient, msg->hEvent);
+               }
+       } else {
+               nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient);
+       }
+       mutex_unlock(&gsp->client_id.mutex);
+       return 0;
+}
+
+/**
+ * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
+ *
+ * The GSP sequencer is a list of I/O commands that the GSP can send to
+ * the driver to perform for various purposes.  The most common usage is to
+ * perform a special mid-initialization reset.
+ */
+static int
+r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       rpc_run_cpu_sequencer_v17_00 *seq = repv;
+       int ptr = 0, ret;
+
+       nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex);
+
+       while (ptr < seq->cmdIndex) {
+               GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr];
+
+               ptr += 1;
+               ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode);
+
+               switch (cmd->opCode) {
+               case GSP_SEQ_BUF_OPCODE_REG_WRITE: {
+                       u32 addr = cmd->payload.regWrite.addr;
+                       u32 data = cmd->payload.regWrite.val;
+
+                       nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data);
+                       nvkm_wr32(device, addr, data);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_MODIFY: {
+                       u32 addr = cmd->payload.regModify.addr;
+                       u32 mask = cmd->payload.regModify.mask;
+                       u32 data = cmd->payload.regModify.val;
+
+                       nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data);
+                       nvkm_mask(device, addr, mask, data);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_POLL: {
+                       u32 addr = cmd->payload.regPoll.addr;
+                       u32 mask = cmd->payload.regPoll.mask;
+                       u32 data = cmd->payload.regPoll.val;
+                       u32 usec = cmd->payload.regPoll.timeout ?: 4000000;
+                       //u32 error = cmd->payload.regPoll.error;
+
+                       nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec);
+                       nvkm_rd32(device, addr);
+                       nvkm_usec(device, usec,
+                               if ((nvkm_rd32(device, addr) & mask) == data)
+                                       break;
+                       );
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_DELAY_US: {
+                       u32 usec = cmd->payload.delayUs.val;
+
+                       nvkm_trace(subdev, "seq usec %d\n", usec);
+                       udelay(usec);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_STORE: {
+                       u32 addr = cmd->payload.regStore.addr;
+                       u32 slot = cmd->payload.regStore.index;
+
+                       seq->regSaveArea[slot] = nvkm_rd32(device, addr);
+                       nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot,
+                                  seq->regSaveArea[slot]);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_RESET:
+                       nvkm_trace(subdev, "seq core reset\n");
+                       nvkm_falcon_reset(&gsp->falcon);
+                       nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080);
+                       nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000);
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_START:
+                       nvkm_trace(subdev, "seq core start\n");
+                       if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040)
+                               nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002);
+                       else
+                               nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002);
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT:
+                       nvkm_trace(subdev, "seq core wait halt\n");
+                       nvkm_msec(device, 2000,
+                               if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010)
+                                       break;
+                       );
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_RESUME: {
+                       struct nvkm_sec2 *sec2 = device->sec2;
+                       u32 mbox0;
+
+                       nvkm_trace(subdev, "seq core resume\n");
+
+                       ret = gsp->func->reset(gsp);
+                       if (WARN_ON(ret))
+                               return ret;
+
+                       nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+                       nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+
+                       nvkm_falcon_start(&sec2->falcon);
+
+                       if (nvkm_msec(device, 2000,
+                               if (nvkm_rd32(device, 0x1180f8) & 0x04000000)
+                                       break;
+                       ) < 0)
+                               return -ETIMEDOUT;
+
+                       mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040);
+                       if (WARN_ON(mbox0)) {
+                               nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0);
+                               return -EIO;
+                       }
+
+                       nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
+
+                       if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+                               return -EIO;
+               }
+                       break;
+               default:
+                       nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int
+r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+       GspFwWprMeta *meta;
+       int ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
+       if (ret)
+               return ret;
+
+       meta = gsp->wpr_meta.data;
+
+       meta->magic = GSP_FW_WPR_META_MAGIC;
+       meta->revision = GSP_FW_WPR_META_REVISION;
+
+       meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+       meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+       meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+       meta->sizeOfBootloader = gsp->boot.fw.size;
+       meta->bootloaderCodeOffset = gsp->boot.code_offset;
+       meta->bootloaderDataOffset = gsp->boot.data_offset;
+       meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+       meta->sysmemAddrOfSignature = gsp->sig.addr;
+       meta->sizeOfSignature = gsp->sig.size;
+
+       meta->gspFwRsvdStart = gsp->fb.heap.addr;
+       meta->nonWprHeapOffset = gsp->fb.heap.addr;
+       meta->nonWprHeapSize = gsp->fb.heap.size;
+       meta->gspFwWprStart = gsp->fb.wpr2.addr;
+       meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+       meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+       meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+       meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+       meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+       meta->frtsSize = gsp->fb.wpr2.frts.size;
+       meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+       meta->fbSize = gsp->fb.size;
+       meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+       meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+       meta->bootCount = 0;
+       meta->partitionRpcAddr = 0;
+       meta->partitionRpcRequestOffset = 0;
+       meta->partitionRpcReplyOffset = 0;
+       meta->verified = 0;
+       return 0;
+}
+
+static int
+r535_gsp_shared_init(struct nvkm_gsp *gsp)
+{
+       struct {
+               msgqTxHeader tx;
+               msgqRxHeader rx;
+       } *cmdq, *msgq;
+       int ret, i;
+
+       gsp->shm.cmdq.size = 0x40000;
+       gsp->shm.msgq.size = 0x40000;
+
+       gsp->shm.ptes.nr  = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT;
+       gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
+       gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
+
+       ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size +
+                                    gsp->shm.cmdq.size +
+                                    gsp->shm.msgq.size,
+                               &gsp->shm.mem);
+       if (ret)
+               return ret;
+
+       gsp->shm.ptes.ptr = gsp->shm.mem.data;
+       gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size;
+       gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size;
+
+       for (i = 0; i < gsp->shm.ptes.nr; i++)
+               gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT);
+
+       cmdq = gsp->shm.cmdq.ptr;
+       cmdq->tx.version = 0;
+       cmdq->tx.size = gsp->shm.cmdq.size;
+       cmdq->tx.entryOff = GSP_PAGE_SIZE;
+       cmdq->tx.msgSize = GSP_PAGE_SIZE;
+       cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize;
+       cmdq->tx.writePtr = 0;
+       cmdq->tx.flags = 1;
+       cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr);
+
+       msgq = gsp->shm.msgq.ptr;
+
+       gsp->cmdq.cnt = cmdq->tx.msgCount;
+       gsp->cmdq.wptr = &cmdq->tx.writePtr;
+       gsp->cmdq.rptr = &msgq->rx.readPtr;
+       gsp->msgq.cnt = cmdq->tx.msgCount;
+       gsp->msgq.wptr = &msgq->tx.writePtr;
+       gsp->msgq.rptr = &cmdq->rx.readPtr;
+       return 0;
+}
+
+int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+       GSP_ARGUMENTS_CACHED *args;
+       int ret;
+
+       if (!resume) {
+               ret = r535_gsp_shared_init(gsp);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+               if (ret)
+                       return ret;
+       }
+
+       args = gsp->rmargs.data;
+       args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+       args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+       args->messageQueueInitArguments.cmdQueueOffset =
+               (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+       args->messageQueueInitArguments.statQueueOffset =
+               (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+       if (!resume) {
+               args->srInitArguments.oldLevel = 0;
+               args->srInitArguments.flags = 0;
+               args->srInitArguments.bInPMTransition = 0;
+       } else {
+               args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+               args->srInitArguments.flags = 0;
+               args->srInitArguments.bInPMTransition = 1;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * If GSP-RM load fails, then the GSP nvkm object will be deleted, the logging
+ * debugfs entries will be deleted, and it will not be possible to debug the
+ * load failure. The keep_gsp_logging parameter tells Nouveau to copy the
+ * logging buffers to new debugfs entries, and these entries are retained
+ * until the driver unloads.
+ */
+static bool keep_gsp_logging;
+module_param(keep_gsp_logging, bool, 0444);
+MODULE_PARM_DESC(keep_gsp_logging,
+                "Migrate the GSP-RM logging debugfs entries upon exit");
+
+/*
+ * GSP-RM uses a pseudo-class mechanism to define of a variety of per-"engine"
+ * data structures, and each engine has a "class ID" genererated by a
+ * pre-processor. This is the class ID for the PMU.
+ */
+#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU         0xf3d722
+
+/**
+ * struct rpc_ucode_libos_print_v1e_08 - RPC payload for libos print buffers
+ * @ucode_eng_desc: the engine descriptor
+ * @libos_print_buf_size: the size of the libos_print_buf[]
+ * @libos_print_buf: the actual buffer
+ *
+ * The engine descriptor is divided into 31:8 "class ID" and 7:0 "instance
+ * ID". We only care about messages from PMU.
+ */
+struct rpc_ucode_libos_print_v1e_08 {
+       u32 ucode_eng_desc;
+       u32 libos_print_buf_size;
+       u8 libos_print_buf[];
+};
+
+/**
+ * r535_gsp_msg_libos_print - capture log message from the PMU
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
+ *
+ * Called when we receive a UCODE_LIBOS_PRINT event RPC from GSP-RM. This RPC
+ * contains the contents of the libos print buffer from PMU. It is typically
+ * only written to when PMU encounters an error.
+ *
+ * Technically this RPC can be used to pass print buffers from any number of
+ * GSP-RM engines, but we only expect to receive them for the PMU.
+ *
+ * For the PMU, the buffer is 4K in size and the RPC always contains the full
+ * contents.
+ */
+static int
+r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct rpc_ucode_libos_print_v1e_08 *rpc = repv;
+       unsigned int class = rpc->ucode_eng_desc >> 8;
+
+       nvkm_debug(subdev, "received libos print from class 0x%x for %u bytes\n",
+                  class, rpc->libos_print_buf_size);
+
+       if (class != NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU) {
+               nvkm_warn(subdev,
+                         "received libos print from unknown class 0x%x\n",
+                         class);
+               return -ENOMSG;
+       }
+
+       if (rpc->libos_print_buf_size > GSP_PAGE_SIZE) {
+               nvkm_error(subdev, "libos print is too large (%u bytes)\n",
+                          rpc->libos_print_buf_size);
+               return -E2BIG;
+       }
+
+       memcpy(gsp->blob_pmu.data, rpc->libos_print_buf, rpc->libos_print_buf_size);
+
+       return 0;
+}
+
+/**
+ * create_debugfs - create a blob debugfs entry
+ * @gsp: gsp pointer
+ * @name: name of this dentry
+ * @blob: blob wrapper
+ *
+ * Creates a debugfs entry for a logging buffer with the name 'name'.
+ */
+static struct dentry *create_debugfs(struct nvkm_gsp *gsp, const char *name,
+                                    struct debugfs_blob_wrapper *blob)
+{
+       struct dentry *dent;
+
+       dent = debugfs_create_blob(name, 0444, gsp->debugfs.parent, blob);
+       if (IS_ERR(dent)) {
+               nvkm_error(&gsp->subdev,
+                          "failed to create %s debugfs entry\n", name);
+               return NULL;
+       }
+
+       /*
+        * For some reason, debugfs_create_blob doesn't set the size of the
+        * dentry, so do that here.  See [1]
+        *
+        * [1] https://lore.kernel.org/r/linux-fsdevel/20240207200619.3354549-1-ttabi@nvidia.com/
+        */
+       i_size_write(d_inode(dent), blob->size);
+
+       return dent;
+}
+
+/**
+ * r535_gsp_libos_debugfs_init - create logging debugfs entries
+ * @gsp: gsp pointer
+ *
+ * Create the debugfs entries. This exposes the log buffers to userspace so
+ * that an external tool can parse it.
+ *
+ * The 'logpmu' contains exception dumps from the PMU. It is written via an
+ * RPC sent from GSP-RM and must be only 4KB. We create it here because it's
+ * only useful if there is a debugfs entry to expose it. If we get the PMU
+ * logging RPC and there is no debugfs entry, the RPC is just ignored.
+ *
+ * The blob_init, blob_rm, and blob_pmu objects can't be transient
+ * because debugfs_create_blob doesn't copy them.
+ *
+ * NOTE: OpenRM loads the logging elf image and prints the log messages
+ * in real-time. We may add that capability in the future, but that
+ * requires loading ELF images that are not distributed with the driver and
+ * adding the parsing code to Nouveau.
+ *
+ * Ideally, this should be part of nouveau_debugfs_init(), but that function
+ * is called too late. We really want to create these debugfs entries before
+ * r535_gsp_booter_load() is called, so that if GSP-RM fails to initialize,
+ * there could still be a log to capture.
+ */
+static void
+r535_gsp_libos_debugfs_init(struct nvkm_gsp *gsp)
+{
+       struct device *dev = gsp->subdev.device->dev;
+
+       /* Create a new debugfs directory with a name unique to this GPU. */
+       gsp->debugfs.parent = debugfs_create_dir(dev_name(dev), nouveau_debugfs_root);
+       if (IS_ERR(gsp->debugfs.parent)) {
+               nvkm_error(&gsp->subdev,
+                          "failed to create %s debugfs root\n", dev_name(dev));
+               return;
+       }
+
+       gsp->blob_init.data = gsp->loginit.data;
+       gsp->blob_init.size = gsp->loginit.size;
+       gsp->blob_intr.data = gsp->logintr.data;
+       gsp->blob_intr.size = gsp->logintr.size;
+       gsp->blob_rm.data = gsp->logrm.data;
+       gsp->blob_rm.size = gsp->logrm.size;
+
+       gsp->debugfs.init = create_debugfs(gsp, "loginit", &gsp->blob_init);
+       if (!gsp->debugfs.init)
+               goto error;
+
+       gsp->debugfs.intr = create_debugfs(gsp, "logintr", &gsp->blob_intr);
+       if (!gsp->debugfs.intr)
+               goto error;
+
+       gsp->debugfs.rm = create_debugfs(gsp, "logrm", &gsp->blob_rm);
+       if (!gsp->debugfs.rm)
+               goto error;
+
+       /*
+        * Since the PMU buffer is copied from an RPC, it doesn't need to be
+        * a DMA buffer.
+        */
+       gsp->blob_pmu.size = GSP_PAGE_SIZE;
+       gsp->blob_pmu.data = kzalloc(gsp->blob_pmu.size, GFP_KERNEL);
+       if (!gsp->blob_pmu.data)
+               goto error;
+
+       gsp->debugfs.pmu = create_debugfs(gsp, "logpmu", &gsp->blob_pmu);
+       if (!gsp->debugfs.pmu) {
+               kfree(gsp->blob_pmu.data);
+               goto error;
+       }
+
+       i_size_write(d_inode(gsp->debugfs.init), gsp->blob_init.size);
+       i_size_write(d_inode(gsp->debugfs.intr), gsp->blob_intr.size);
+       i_size_write(d_inode(gsp->debugfs.rm), gsp->blob_rm.size);
+       i_size_write(d_inode(gsp->debugfs.pmu), gsp->blob_pmu.size);
+
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT,
+                             r535_gsp_msg_libos_print, gsp);
+
+       nvkm_debug(&gsp->subdev, "created debugfs GSP-RM logging entries\n");
+
+       if (keep_gsp_logging) {
+               nvkm_info(&gsp->subdev,
+                         "logging buffers will be retained on failure\n");
+       }
+
+       return;
+
+error:
+       debugfs_remove(gsp->debugfs.parent);
+       gsp->debugfs.parent = NULL;
+}
+
+#endif
+
+static inline u64
+r535_gsp_libos_id8(const char *name)
+{
+       u64 id = 0;
+
+       for (int i = 0; i < sizeof(id) && *name; i++, name++)
+               id = (id << 8) | *name;
+
+       return id;
+}
+
+/**
+ * create_pte_array() - creates a PTE array of a physically contiguous buffer
+ * @ptes: pointer to the array
+ * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned)
+ * @size: size of the buffer
+ *
+ * GSP-RM sometimes expects physically-contiguous buffers to have an array of
+ * "PTEs" for each page in that buffer.  Although in theory that allows for
+ * the buffer to be physically discontiguous, GSP-RM does not currently
+ * support that.
+ *
+ * In this case, the PTEs are DMA addresses of each page of the buffer.  Since
+ * the buffer is physically contiguous, calculating all the PTEs is simple
+ * math.
+ *
+ * See memdescGetPhysAddrsForGpu()
+ */
+static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
+{
+       unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
+       unsigned int i;
+
+       for (i = 0; i < num_pages; i++)
+               ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
+}
+
+/**
+ * r535_gsp_libos_init() -- create the libos arguments structure
+ * @gsp: gsp pointer
+ *
+ * The logging buffers are byte queues that contain encoded printf-like
+ * messages from GSP-RM.  They need to be decoded by a special application
+ * that can parse the buffers.
+ *
+ * The 'loginit' buffer contains logs from early GSP-RM init and
+ * exception dumps.  The 'logrm' buffer contains the subsequent logs. Both are
+ * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
+ *
+ * The physical address map for the log buffer is stored in the buffer
+ * itself, starting with offset 1. Offset 0 contains the "put" pointer (pp).
+ * Initially, pp is equal to 0. If the buffer has valid logging data in it,
+ * then pp points to index into the buffer where the next logging entry will
+ * be written. Therefore, the logging data is valid if:
+ *   1 <= pp < sizeof(buffer)/sizeof(u64)
+ *
+ * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
+ * configured for a larger page size (e.g. 64K pages), we need to give
+ * the GSP an array of 4K pages. Fortunately, since the buffer is
+ * physically contiguous, it's simple math to calculate the addresses.
+ *
+ * The buffers must be a multiple of GSP_PAGE_SIZE.  GSP-RM also currently
+ * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
+ * buffers to be physically contiguous anyway.
+ *
+ * The memory allocated for the arguments must remain until the GSP sends the
+ * init_done RPC.
+ *
+ * See _kgspInitLibosLoggingStructures (allocates memory for buffers)
+ * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
+ */
+static int
+r535_gsp_libos_init(struct nvkm_gsp *gsp)
+{
+       LibosMemoryRegionInitArgument *args;
+       int ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos);
+       if (ret)
+               return ret;
+
+       args = gsp->libos.data;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit);
+       if (ret)
+               return ret;
+
+       args[0].id8  = r535_gsp_libos_id8("LOGINIT");
+       args[0].pa   = gsp->loginit.addr;
+       args[0].size = gsp->loginit.size;
+       args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[0].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size);
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr);
+       if (ret)
+               return ret;
+
+       args[1].id8  = r535_gsp_libos_id8("LOGINTR");
+       args[1].pa   = gsp->logintr.addr;
+       args[1].size = gsp->logintr.size;
+       args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[1].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size);
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm);
+       if (ret)
+               return ret;
+
+       args[2].id8  = r535_gsp_libos_id8("LOGRM");
+       args[2].pa   = gsp->logrm.addr;
+       args[2].size = gsp->logrm.size;
+       args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[2].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size);
+
+       ret = r535_gsp_rmargs_init(gsp, false);
+       if (ret)
+               return ret;
+
+       args[3].id8  = r535_gsp_libos_id8("RMARGS");
+       args[3].pa   = gsp->rmargs.addr;
+       args[3].size = gsp->rmargs.size;
+       args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[3].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+
+#ifdef CONFIG_DEBUG_FS
+       r535_gsp_libos_debugfs_init(gsp);
+#endif
+
+       return 0;
+}
+
+void
+nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt)
+{
+       struct scatterlist *sgl;
+       int i;
+
+       dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
+
+       for_each_sgtable_sg(sgt, sgl, i) {
+               struct page *page = sg_page(sgl);
+
+               __free_page(page);
+       }
+
+       sg_free_table(sgt);
+}
+
+int
+nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
+{
+       const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE);
+       struct scatterlist *sgl;
+       int ret, i;
+
+       ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
+       if (ret)
+               return ret;
+
+       for_each_sgtable_sg(sgt, sgl, i) {
+               struct page *page = alloc_page(GFP_KERNEL);
+
+               if (!page) {
+                       nvkm_gsp_sg_free(device, sgt);
+                       return -ENOMEM;
+               }
+
+               sg_set_page(sgl, page, PAGE_SIZE, 0);
+       }
+
+       ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
+       if (ret)
+               nvkm_gsp_sg_free(device, sgt);
+
+       return ret;
+}
+
+static void
+nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
+{
+       nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
+       nvkm_gsp_mem_dtor(&rx3->lvl1);
+       nvkm_gsp_mem_dtor(&rx3->lvl0);
+}
+
+/**
+ * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ * @gsp: gsp pointer
+ * @sgt: S/G list to traverse
+ * @size: size of the image, in bytes
+ * @rx3: radix3 array to update
+ *
+ * The GSP uses a three-level page table, called radix3, to map the firmware.
+ * Each 64-bit "pointer" in the table is either the bus address of an entry in
+ * the next table (for levels 0 and 1) or the bus address of the next page in
+ * the GSP firmware image itself.
+ *
+ * Level 0 contains a single entry in one page that points to the first page
+ * of level 1.
+ *
+ * Level 1, since it's also only one page in size, contains up to 512 entries,
+ * one for each page in Level 2.
+ *
+ * Level 2 can be up to 512 pages in size, and each of those entries points to
+ * the next page of the firmware image.  Since there can be up to 512*512
+ * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB.
+ *
+ * Internally, the GSP has its window into system memory, but the base
+ * physical address of the aperture is not 0.  In fact, it varies depending on
+ * the GPU architecture.  Since the GPU is a PCI device, this window is
+ * accessed via DMA and is therefore bound by IOMMU translation.  The end
+ * result is that GSP-RM must translate the bus addresses in the table to GSP
+ * physical addresses.  All this should happen transparently.
+ *
+ * Returns 0 on success, or negative error code
+ *
+ * See kgspCreateRadix3_IMPL
+ */
+static int
+nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
+                  struct nvkm_gsp_radix3 *rx3)
+{
+       struct sg_dma_page_iter sg_dma_iter;
+       struct scatterlist *sg;
+       size_t bufsize;
+       u64 *pte;
+       int ret, i, page_idx = 0;
+
+       ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1);
+       if (ret)
+               goto lvl1_fail;
+
+       // Allocate level 2
+       bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+       ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2);
+       if (ret)
+               goto lvl2_fail;
+
+       // Write the bus address of level 1 to level 0
+       pte = rx3->lvl0.data;
+       *pte = rx3->lvl1.addr;
+
+       // Write the bus address of each page in level 2 to level 1
+       pte = rx3->lvl1.data;
+       for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0)
+               *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
+
+       // Finally, write the bus address of each page in sgt to level 2
+       for_each_sgtable_sg(&rx3->lvl2, sg, i) {
+               void *sgl_end;
+
+               pte = sg_virt(sg);
+               sgl_end = (void *)pte + sg->length;
+
+               for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) {
+                       *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
+                       page_idx++;
+
+                       // Go to the next scatterlist for level 2 if we've reached the end
+                       if ((void *)pte >= sgl_end)
+                               break;
+               }
+       }
+
+       if (ret) {
+lvl2_fail:
+               nvkm_gsp_mem_dtor(&rx3->lvl1);
+lvl1_fail:
+               nvkm_gsp_mem_dtor(&rx3->lvl0);
+       }
+
+       return ret;
+}
+
+int
+r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+       int ret;
+
+       if (suspend) {
+               GspFwWprMeta *meta = gsp->wpr_meta.data;
+               u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+               GspFwSRMeta *sr;
+
+               ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta);
+               if (ret)
+                       return ret;
+
+               sr = gsp->sr.meta.data;
+               sr->magic = GSP_FW_SR_META_MAGIC;
+               sr->revision = GSP_FW_SR_META_REVISION;
+               sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
+               sr->sizeOfSuspendResumeData = len;
+       }
+
+       ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
+       if (WARN_ON(ret))
+               return ret;
+
+       nvkm_msec(gsp->subdev.device, 2000,
+               if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000)
+                       break;
+       );
+
+       gsp->running = false;
+       return 0;
+}
+
+int
+r535_gsp_init(struct nvkm_gsp *gsp)
+{
+       int ret;
+
+       nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
+
+       if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+               return -EIO;
+
+       ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
+       if (ret)
+               goto done;
+
+       gsp->running = true;
+
+done:
+       if (gsp->sr.meta.data) {
+               nvkm_gsp_mem_dtor(&gsp->sr.meta);
+               nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+               nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+               return ret;
+       }
+
+       if (ret == 0)
+               ret = r535_gsp_postinit(gsp);
+
+       return ret;
+}
+
+static int
+r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
+{
+       const struct firmware *fw = gsp->fws.bl;
+       const struct nvfw_bin_hdr *hdr;
+       RM_RISCV_UCODE_DESC *desc;
+       int ret;
+
+       hdr = nvfw_bin_hdr(&gsp->subdev, fw->data);
+       desc = (void *)fw->data + hdr->header_offset;
+
+       ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw);
+       if (ret)
+               return ret;
+
+       memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size);
+
+       gsp->boot.code_offset = desc->monitorCodeOffset;
+       gsp->boot.data_offset = desc->monitorDataOffset;
+       gsp->boot.manifest_offset = desc->manifestOffset;
+       gsp->boot.app_version = desc->appVersion;
+       return 0;
+}
+
+static const struct nvkm_firmware_func
+r535_gsp_fw = {
+       .type = NVKM_FIRMWARE_IMG_SGT,
+};
+
+static int
+r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize)
+{
+       const u8 *img = gsp->fws.rm->data;
+       const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img;
+       const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff];
+       const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset];
+
+       for (int i = 0; i < ehdr->e_shnum; i++, shdr++) {
+               if (!strcmp(&names[shdr->sh_name], name)) {
+                       *pdata = &img[shdr->sh_offset];
+                       *psize = shdr->sh_size;
+                       return 0;
+               }
+       }
+
+       nvkm_error(&gsp->subdev, "section '%s' not found\n", name);
+       return -ENOENT;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+struct r535_gsp_log {
+       struct nvif_log log;
+
+       /*
+        * Logging buffers in debugfs. The wrapper objects need to remain
+        * in memory until the dentry is deleted.
+        */
+       struct dentry *debugfs_logging_dir;
+       struct debugfs_blob_wrapper blob_init;
+       struct debugfs_blob_wrapper blob_intr;
+       struct debugfs_blob_wrapper blob_rm;
+       struct debugfs_blob_wrapper blob_pmu;
+};
+
+/**
+ * r535_debugfs_shutdown - delete GSP-RM logging buffers for one GPU
+ * @_log: nvif_log struct for this GPU
+ *
+ * Called when the driver is shutting down, to clean up the retained GSP-RM
+ * logging buffers.
+ */
+static void r535_debugfs_shutdown(struct nvif_log *_log)
+{
+       struct r535_gsp_log *log = container_of(_log, struct r535_gsp_log, log);
+
+       debugfs_remove(log->debugfs_logging_dir);
+
+       kfree(log->blob_init.data);
+       kfree(log->blob_intr.data);
+       kfree(log->blob_rm.data);
+       kfree(log->blob_pmu.data);
+
+       /* We also need to delete the list object */
+       kfree(log);
+}
+
+/**
+ * is_empty - return true if the logging buffer was never written to
+ * @b: blob wrapper with ->data field pointing to logging buffer
+ *
+ * The first 64-bit field of loginit, and logintr, and logrm is the 'put'
+ * pointer, and it is initialized to 0. It's a dword-based index into the
+ * circular buffer, indicating where the next printf write will be made.
+ *
+ * If the pointer is still 0 when GSP-RM is shut down, that means that the
+ * buffer was never written to, so it can be ignored.
+ *
+ * This test also works for logpmu, even though it doesn't have a put pointer.
+ */
+static bool is_empty(const struct debugfs_blob_wrapper *b)
+{
+       u64 *put = b->data;
+
+       return put ? (*put == 0) : true;
+}
+
+/**
+ * r535_gsp_copy_log - preserve the logging buffers in a blob
+ * @parent: the top-level dentry for this GPU
+ * @name: name of debugfs entry to create
+ * @s: original wrapper object to copy from
+ * @t: new wrapper object to copy to
+ *
+ * When GSP shuts down, the nvkm_gsp object and all its memory is deleted.
+ * To preserve the logging buffers, the buffers need to be copied, but only
+ * if they actually have data.
+ */
+static int r535_gsp_copy_log(struct dentry *parent,
+                            const char *name,
+                            const struct debugfs_blob_wrapper *s,
+                            struct debugfs_blob_wrapper *t)
+{
+       struct dentry *dent;
+       void *p;
+
+       if (is_empty(s))
+               return 0;
+
+       /* The original buffers will be deleted */
+       p = kmemdup(s->data, s->size, GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       t->data = p;
+       t->size = s->size;
+
+       dent = debugfs_create_blob(name, 0444, parent, t);
+       if (IS_ERR(dent)) {
+               kfree(p);
+               memset(t, 0, sizeof(*t));
+               return PTR_ERR(dent);
+       }
+
+       i_size_write(d_inode(dent), t->size);
+
+       return 0;
+}
+
+/**
+ * r535_gsp_retain_logging - copy logging buffers to new debugfs root
+ * @gsp: gsp pointer
+ *
+ * If keep_gsp_logging is enabled, then we want to preserve the GSP-RM logging
+ * buffers and their debugfs entries, but all those objects would normally
+ * deleted if GSP-RM fails to load.
+ *
+ * To preserve the logging buffers, we need to:
+ *
+ * 1) Allocate new buffers and copy the logs into them, so that the original
+ * DMA buffers can be released.
+ *
+ * 2) Preserve the directories.  We don't need to save single dentries because
+ * we're going to delete the parent when the
+ *
+ * If anything fails in this process, then all the dentries need to be
+ * deleted.  We don't need to deallocate the original logging buffers because
+ * the caller will do that regardless.
+ */
+static void r535_gsp_retain_logging(struct nvkm_gsp *gsp)
+{
+       struct device *dev = gsp->subdev.device->dev;
+       struct r535_gsp_log *log = NULL;
+       int ret;
+
+       if (!keep_gsp_logging || !gsp->debugfs.parent) {
+               /* Nothing to do */
+               goto exit;
+       }
+
+       /* Check to make sure at least one buffer has data. */
+       if (is_empty(&gsp->blob_init) && is_empty(&gsp->blob_intr) &&
+           is_empty(&gsp->blob_rm) && is_empty(&gsp->blob_rm)) {
+               nvkm_warn(&gsp->subdev, "all logging buffers are empty\n");
+               goto exit;
+       }
+
+       log = kzalloc(sizeof(*log), GFP_KERNEL);
+       if (!log)
+               goto error;
+
+       /*
+        * Since the nvkm_gsp object is going away, the debugfs_blob_wrapper
+        * objects are also being deleted, which means the dentries will no
+        * longer be valid.  Delete the existing entries so that we can create
+        * new ones with the same name.
+        */
+       debugfs_remove(gsp->debugfs.init);
+       debugfs_remove(gsp->debugfs.intr);
+       debugfs_remove(gsp->debugfs.rm);
+       debugfs_remove(gsp->debugfs.pmu);
+
+       ret = r535_gsp_copy_log(gsp->debugfs.parent, "loginit", &gsp->blob_init, &log->blob_init);
+       if (ret)
+               goto error;
+
+       ret = r535_gsp_copy_log(gsp->debugfs.parent, "logintr", &gsp->blob_intr, &log->blob_intr);
+       if (ret)
+               goto error;
+
+       ret = r535_gsp_copy_log(gsp->debugfs.parent, "logrm", &gsp->blob_rm, &log->blob_rm);
+       if (ret)
+               goto error;
+
+       ret = r535_gsp_copy_log(gsp->debugfs.parent, "logpmu", &gsp->blob_pmu, &log->blob_pmu);
+       if (ret)
+               goto error;
+
+       /* The nvkm_gsp object is going away, so save the dentry */
+       log->debugfs_logging_dir = gsp->debugfs.parent;
+
+       log->log.shutdown = r535_debugfs_shutdown;
+       list_add(&log->log.entry, &gsp_logs.head);
+
+       nvkm_warn(&gsp->subdev,
+                 "logging buffers migrated to /sys/kernel/debug/nouveau/%s\n",
+                 dev_name(dev));
+
+       return;
+
+error:
+       nvkm_warn(&gsp->subdev, "failed to migrate logging buffers\n");
+
+exit:
+       debugfs_remove(gsp->debugfs.parent);
+
+       if (log) {
+               kfree(log->blob_init.data);
+               kfree(log->blob_intr.data);
+               kfree(log->blob_rm.data);
+               kfree(log->blob_pmu.data);
+               kfree(log);
+       }
+}
+
+#endif
+
+/**
+ * r535_gsp_libos_debugfs_fini - cleanup/retain log buffers on shutdown
+ * @gsp: gsp pointer
+ *
+ * If the log buffers are exposed via debugfs, the data for those entries
+ * needs to be cleaned up when the GSP device shuts down.
+ */
+static void
+r535_gsp_libos_debugfs_fini(struct nvkm_gsp __maybe_unused *gsp)
+{
+#ifdef CONFIG_DEBUG_FS
+       r535_gsp_retain_logging(gsp);
+
+       /*
+        * Unlike the other buffers, the PMU blob is a kmalloc'd buffer that
+        * exists only if the debugfs entries were created.
+        */
+       kfree(gsp->blob_pmu.data);
+       gsp->blob_pmu.data = NULL;
+#endif
+}
+
+void
+r535_gsp_dtor(struct nvkm_gsp *gsp)
+{
+       idr_destroy(&gsp->client_id.idr);
+       mutex_destroy(&gsp->client_id.mutex);
+
+       nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
+       nvkm_gsp_mem_dtor(&gsp->sig);
+       nvkm_firmware_dtor(&gsp->fw);
+
+       nvkm_falcon_fw_dtor(&gsp->booter.unload);
+       nvkm_falcon_fw_dtor(&gsp->booter.load);
+
+       mutex_destroy(&gsp->msgq.mutex);
+       mutex_destroy(&gsp->cmdq.mutex);
+
+       nvkm_gsp_dtor_fws(gsp);
+
+       nvkm_gsp_mem_dtor(&gsp->rmargs);
+       nvkm_gsp_mem_dtor(&gsp->wpr_meta);
+       nvkm_gsp_mem_dtor(&gsp->shm.mem);
+
+       r535_gsp_libos_debugfs_fini(gsp);
+
+       nvkm_gsp_mem_dtor(&gsp->loginit);
+       nvkm_gsp_mem_dtor(&gsp->logintr);
+       nvkm_gsp_mem_dtor(&gsp->logrm);
+}
+
+int
+r535_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       const u8 *data;
+       u64 size;
+       int ret;
+
+       mutex_init(&gsp->cmdq.mutex);
+       mutex_init(&gsp->msgq.mutex);
+
+       /* Load GSP firmware from ELF image into DMA-accessible memory. */
+       ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
+       if (ret)
+               return ret;
+
+       ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw);
+       if (ret)
+               return ret;
+
+       /* Load relevant signature from ELF image. */
+       ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig);
+       if (ret)
+               return ret;
+
+       memcpy(gsp->sig.data, data, size);
+
+       /* Build radix3 page table for ELF image. */
+       ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
+       if (ret)
+               return ret;
+
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
+                             r535_gsp_msg_run_cpu_sequencer, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
+                             r535_gsp_msg_rc_triggered, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
+                             r535_gsp_msg_mmu_fault_queued, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+       ret = r535_gsp_rm_boot_ctor(gsp);
+       if (ret)
+               return ret;
+
+       /* Release FW images - we've copied them to DMA buffers now. */
+       nvkm_gsp_dtor_fws(gsp);
+
+       /* Calculate FB layout. */
+       gsp->fb.wpr2.frts.size = 0x100000;
+       gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+       gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+       gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+       gsp->fb.wpr2.elf.size = gsp->fw.len;
+       gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+       {
+               u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+
+               gsp->fb.wpr2.heap.size =
+                       gsp->func->wpr_heap.os_carveout_size +
+                       gsp->func->wpr_heap.base_size +
+                       ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+                       ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+               gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
+       }
+
+       gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+       gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+       gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+       gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+       gsp->fb.heap.size = 0x100000;
+       gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+       ret = nvkm_gsp_fwsec_frts(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_libos_init(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_wpr_meta_init(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_rpc_set_system_info(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_rpc_set_registry(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       mutex_init(&gsp->client_id.mutex);
+       idr_init(&gsp->client_id.idr);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
new file mode 100644 (file)
index 0000000..16c1928
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/nvdec/priv.h>
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvdec_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvdec_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvdec_obj = {
+       .dtor = r535_nvdec_obj_dtor,
+};
+
+static int
+r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvdec_obj *obj;
+       NV_BSP_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvdec_dtor(struct nvkm_engine *engine)
+{
+       struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
+
+       kfree(nvdec->engine.func);
+       return nvdec;
+}
+
+int
+r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
+{
+       struct nvkm_engine_func *rm;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvdec_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvdec_obj_ctor;
+       }
+
+       if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
new file mode 100644 (file)
index 0000000..b6808a5
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/nvenc/priv.h>
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvenc_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvenc_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvenc_obj = {
+       .dtor = r535_nvenc_obj_dtor,
+};
+
+static int
+r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvenc_obj *obj;
+       NV_MSENC_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvenc_dtor(struct nvkm_engine *engine)
+{
+       struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
+
+       kfree(nvenc->engine.func);
+       return nvenc;
+}
+
+int
+r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
+{
+       struct nvkm_engine_func *rm;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvenc_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvenc_obj_ctor;
+       }
+
+       if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
new file mode 100644 (file)
index 0000000..994232b
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/nvjpg/priv.h>
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvjpg_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvjpg_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvjpg_obj = {
+       .dtor = r535_nvjpg_obj_dtor,
+};
+
+static int
+r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                   struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvjpg_obj *obj;
+       NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvjpg_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvjpg_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
new file mode 100644 (file)
index 0000000..200201c
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <engine/ofa/priv.h>
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_ofa_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_ofa_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_ofa_obj = {
+       .dtor = r535_ofa_obj_dtor,
+};
+
+static int
+r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_ofa_obj *obj;
+       NV_OFA_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_ofa_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+            enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_ofa_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_ofa_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
new file mode 100644 (file)
index 0000000..94cad29
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/mmu/vmm.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+       NV_VASPACE_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp,
+                                         &vmm->rm.client, &vmm->rm.device);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
+                                    sizeof(*args), &vmm->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+
+       ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
+       if (ret)
+               return ret;
+
+       {
+               NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+
+               mutex_lock(&vmm->mutex.vmm);
+               ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
+                                         &vmm->rm.rsvd);
+               mutex_unlock(&vmm->mutex.vmm);
+               if (ret)
+                       return ret;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
+                                           NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->pageSize = 0x20000000;
+               ctrl->virtAddrLo = vmm->rm.rsvd->addr;
+               ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
+               ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
+               ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
+               ctrl->levels[0].size = 0x20;
+               ctrl->levels[0].aperture = 1;
+               ctrl->levels[0].pageShift = 0x2f;
+               ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
+               ctrl->levels[1].size = 0x1000;
+               ctrl->levels[1].aperture = 1;
+               ctrl->levels[1].pageShift = 0x26;
+               if (vmm->pd->pde[0]->pde[0]) {
+                       ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
+                       ctrl->levels[2].size = 0x1000;
+                       ctrl->levels[2].aperture = 1;
+                       ctrl->levels[2].pageShift = 0x1d;
+               }
+
+               ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+       }
+
+       return ret;
+}
+
+static void
+r535_mmu_dtor(struct nvkm_mmu *mmu)
+{
+       kfree(mmu->func);
+}
+
+int
+r535_mmu_new(const struct nvkm_mmu_func *hw,
+            struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+            struct nvkm_mmu **pmmu)
+{
+       struct nvkm_mmu_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_mmu_dtor;
+       rm->dma_bits = hw->dma_bits;
+       rm->mmu = hw->mmu;
+       rm->mem = hw->mem;
+       rm->vmm = hw->vmm;
+       rm->kind = hw->kind;
+       rm->kind_sys = hw->kind_sys;
+       rm->promote_vmm = r535_mmu_promote_vmm;
+
+       ret = nvkm_mmu_new_(rm, device, type, inst, pmmu);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 553d540f27365a625828c519a2597de7a45083af..06cbe19ce3766e9f182be86268a6fea800b56ec5 100644 (file)
@@ -4,5 +4,3 @@ nvkm-y += nvkm/subdev/instmem/nv04.o
 nvkm-y += nvkm/subdev/instmem/nv40.o
 nvkm-y += nvkm/subdev/instmem/nv50.o
 nvkm-y += nvkm/subdev/instmem/gk20a.o
-
-nvkm-y += nvkm/subdev/instmem/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
deleted file mode 100644 (file)
index 35ba179..0000000
+++ /dev/null
@@ -1,333 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvhw/drf.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
-
-struct fbsr_item {
-       const char *type;
-       u64 addr;
-       u64 size;
-
-       struct list_head head;
-};
-
-struct fbsr {
-       struct list_head items;
-
-       u64 size;
-       int regions;
-
-       struct nvkm_gsp_client client;
-       struct nvkm_gsp_device device;
-
-       u64 hmemory;
-       u64 sys_offset;
-};
-
-static int
-fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
-            u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
-{
-       struct nvkm_gsp_client *client = device->object.client;
-       struct nvkm_gsp *gsp = client->gsp;
-       const u32 pages = size / GSP_PAGE_SIZE;
-       rpc_alloc_memory_v13_01 *rpc;
-       int ret;
-
-       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
-                              sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0]));
-       if (IS_ERR(rpc))
-               return PTR_ERR(rpc);
-
-       rpc->hClient = client->object.handle;
-       rpc->hDevice = device->object.handle;
-       rpc->hMemory = handle;
-       if (aper == NVKM_MEM_TARGET_HOST) {
-               rpc->hClass = NV01_MEMORY_LIST_SYSTEM;
-               rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) |
-                            NVDEF(NVOS02, FLAGS, LOCATION, PCI) |
-                            NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
-       } else {
-               rpc->hClass = NV01_MEMORY_LIST_FBMEM;
-               rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) |
-                            NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) |
-                            NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
-               rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */
-       }
-       rpc->pteAdjust = 0;
-       rpc->length = size;
-       rpc->pageCount = pages;
-       rpc->pteDesc.idr = 0;
-       rpc->pteDesc.reserved1 = 0;
-       rpc->pteDesc.length = pages;
-
-       if (sgt) {
-               struct scatterlist *sgl;
-               int pte = 0, idx;
-
-               for_each_sgtable_dma_sg(sgt, sgl, idx) {
-                       for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++)
-                               rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i;
-
-               }
-       } else {
-               for (int i = 0; i < pages; i++)
-                       rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
-       }
-
-       ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
-       if (ret)
-               return ret;
-
-       object->client = device->object.client;
-       object->parent = &device->object;
-       object->handle = handle;
-       return 0;
-}
-
-static int
-fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
-{
-       NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl;
-       struct nvkm_gsp *gsp = fbsr->client.gsp;
-       struct nvkm_gsp_object memlist;
-       int ret;
-
-       ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
-                          item->addr, item->size, NULL, &memlist);
-       if (ret)
-               return ret;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
-                                   NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO,
-                                   sizeof(*ctrl));
-       if (IS_ERR(ctrl)) {
-               ret = PTR_ERR(ctrl);
-               goto done;
-       }
-
-       ctrl->fbsrType = FBSR_TYPE_DMA;
-       ctrl->hClient = fbsr->client.object.handle;
-       ctrl->hVidMem = fbsr->hmemory++;
-       ctrl->vidOffset = 0;
-       ctrl->sysOffset = fbsr->sys_offset;
-       ctrl->size = item->size;
-
-       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
-done:
-       nvkm_gsp_rm_free(&memlist);
-       if (ret)
-               return ret;
-
-       fbsr->sys_offset += item->size;
-       return 0;
-}
-
-static int
-fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
-{
-       NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
-       struct nvkm_gsp *gsp = fbsr->client.gsp;
-       struct nvkm_gsp_object memlist;
-       int ret;
-
-       ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
-                          0, fbsr->size, sgt, &memlist);
-       if (ret)
-               return ret;
-
-       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
-                                   NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
-       if (IS_ERR(ctrl))
-               return PTR_ERR(ctrl);
-
-       ctrl->fbsrType = FBSR_TYPE_DMA;
-       ctrl->numRegions = fbsr->regions;
-       ctrl->hClient = fbsr->client.object.handle;
-       ctrl->hSysMem = fbsr->hmemory++;
-       ctrl->gspFbAllocsSysOffset = items_size;
-
-       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
-       if (ret)
-               return ret;
-
-       nvkm_gsp_rm_free(&memlist);
-       return 0;
-}
-
-static bool
-fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size)
-{
-       struct fbsr_item *item;
-
-       if (!(item = kzalloc(sizeof(*item), GFP_KERNEL)))
-               return false;
-
-       item->type = type;
-       item->addr = addr;
-       item->size = size;
-       list_add_tail(&item->head, &fbsr->items);
-       return true;
-}
-
-static bool
-fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
-{
-       return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
-}
-
-static void
-r535_instmem_resume(struct nvkm_instmem *imem)
-{
-       /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
-       if (imem->rm.fbsr_valid) {
-               nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
-               imem->rm.fbsr_valid = false;
-       }
-}
-
-static int
-r535_instmem_suspend(struct nvkm_instmem *imem)
-{
-       struct nvkm_subdev *subdev = &imem->subdev;
-       struct nvkm_device *device = subdev->device;
-       struct nvkm_gsp *gsp = device->gsp;
-       struct nvkm_instobj *iobj;
-       struct fbsr fbsr = {};
-       struct fbsr_item *item, *temp;
-       u64 items_size;
-       int ret;
-
-       INIT_LIST_HEAD(&fbsr.items);
-       fbsr.hmemory = 0xcaf00003;
-
-       /* Create a list of all regions we need RM to save during suspend. */
-       list_for_each_entry(iobj, &imem->list, head) {
-               if (iobj->preserve) {
-                       if (!fbsr_inst(&fbsr, "inst", &iobj->memory))
-                               return -ENOMEM;
-               }
-       }
-
-       list_for_each_entry(iobj, &imem->boot, head) {
-               if (!fbsr_inst(&fbsr, "boot", &iobj->memory))
-                       return -ENOMEM;
-       }
-
-       if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size))
-               return -ENOMEM;
-
-       /* Determine memory requirements. */
-       list_for_each_entry(item, &fbsr.items, head) {
-               nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n",
-                          item->addr, item->size, item->type);
-               fbsr.size += item->size;
-               fbsr.regions++;
-       }
-
-       items_size = fbsr.size;
-       nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size);
-
-       fbsr.size += gsp->fb.rsvd_size;
-       fbsr.size += gsp->fb.bios.vga_workspace.size;
-       nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
-
-       ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
-       if (ret)
-               goto done;
-
-       /* Tell RM about the sysmem which will hold VRAM contents across suspend. */
-       ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device);
-       if (ret)
-               goto done_sgt;
-
-       ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
-       if (WARN_ON(ret))
-               goto done_sgt;
-
-       /* Send VRAM regions that need saving. */
-       list_for_each_entry(item, &fbsr.items, head) {
-               ret = fbsr_send(&fbsr, item);
-               if (WARN_ON(ret))
-                       goto done_sgt;
-       }
-
-       imem->rm.fbsr_valid = true;
-
-       /* Cleanup everything except the sysmem backup, which will be removed after resume. */
-done_sgt:
-       if (ret) /* ... unless we failed already. */
-               nvkm_gsp_sg_free(device, &imem->rm.fbsr);
-done:
-       list_for_each_entry_safe(item, temp, &fbsr.items, head) {
-               list_del(&item->head);
-               kfree(item);
-       }
-
-       nvkm_gsp_device_dtor(&fbsr.device);
-       nvkm_gsp_client_dtor(&fbsr.client);
-       return ret;
-}
-
-static void *
-r535_instmem_dtor(struct nvkm_instmem *imem)
-{
-       kfree(imem->func);
-       return imem;
-}
-
-int
-r535_instmem_new(const struct nvkm_instmem_func *hw,
-                struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
-                struct nvkm_instmem **pinstmem)
-{
-       struct nvkm_instmem_func *rm;
-       int ret;
-
-       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_instmem_dtor;
-       rm->fini = hw->fini;
-       rm->suspend = r535_instmem_suspend;
-       rm->resume  = r535_instmem_resume;
-       rm->memory_new = hw->memory_new;
-       rm->memory_wrap = hw->memory_wrap;
-       rm->zero = false;
-
-       ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
-       if (ret)
-               kfree(rm);
-
-       return ret;
-}
index 7ba35ea59c06d001109e59408f167f84103a8003..a602b0cb5b31d9089781cd5961b0afad294f1b09 100644 (file)
@@ -16,8 +16,6 @@ nvkm-y += nvkm/subdev/mmu/gp10b.o
 nvkm-y += nvkm/subdev/mmu/gv100.o
 nvkm-y += nvkm/subdev/mmu/tu102.o
 
-nvkm-y += nvkm/subdev/mmu/r535.o
-
 nvkm-y += nvkm/subdev/mmu/mem.o
 nvkm-y += nvkm/subdev/mmu/memnv04.o
 nvkm-y += nvkm/subdev/mmu/memnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
deleted file mode 100644 (file)
index d3e9545..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "vmm.h"
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-static int
-r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
-{
-       NV_VASPACE_ALLOCATION_PARAMETERS *args;
-       int ret;
-
-       ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp,
-                                         &vmm->rm.client, &vmm->rm.device);
-       if (ret)
-               return ret;
-
-       args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
-                                    sizeof(*args), &vmm->rm.object);
-       if (IS_ERR(args))
-               return PTR_ERR(args);
-
-       args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
-
-       ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
-       if (ret)
-               return ret;
-
-       {
-               NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
-
-               mutex_lock(&vmm->mutex.vmm);
-               ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
-                                         &vmm->rm.rsvd);
-               mutex_unlock(&vmm->mutex.vmm);
-               if (ret)
-                       return ret;
-
-               ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
-                                           NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
-                                           sizeof(*ctrl));
-               if (IS_ERR(ctrl))
-                       return PTR_ERR(ctrl);
-
-               ctrl->pageSize = 0x20000000;
-               ctrl->virtAddrLo = vmm->rm.rsvd->addr;
-               ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
-               ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
-               ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
-               ctrl->levels[0].size = 0x20;
-               ctrl->levels[0].aperture = 1;
-               ctrl->levels[0].pageShift = 0x2f;
-               ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
-               ctrl->levels[1].size = 0x1000;
-               ctrl->levels[1].aperture = 1;
-               ctrl->levels[1].pageShift = 0x26;
-               if (vmm->pd->pde[0]->pde[0]) {
-                       ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
-                       ctrl->levels[2].size = 0x1000;
-                       ctrl->levels[2].aperture = 1;
-                       ctrl->levels[2].pageShift = 0x1d;
-               }
-
-               ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
-       }
-
-       return ret;
-}
-
-static void
-r535_mmu_dtor(struct nvkm_mmu *mmu)
-{
-       kfree(mmu->func);
-}
-
-int
-r535_mmu_new(const struct nvkm_mmu_func *hw,
-            struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
-            struct nvkm_mmu **pmmu)
-{
-       struct nvkm_mmu_func *rm;
-       int ret;
-
-       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
-               return -ENOMEM;
-
-       rm->dtor = r535_mmu_dtor;
-       rm->dma_bits = hw->dma_bits;
-       rm->mmu = hw->mmu;
-       rm->mem = hw->mem;
-       rm->vmm = hw->vmm;
-       rm->kind = hw->kind;
-       rm->kind_sys = hw->kind_sys;
-       rm->promote_vmm = r535_mmu_promote_vmm;
-
-       ret = nvkm_mmu_new_(rm, device, type, inst, pmmu);
-       if (ret)
-               kfree(rm);
-
-       return ret;
-}