Merge tag 'drm-intel-gt-next-2022-09-16' of git://anongit.freedesktop.org/drm/drm...
authorDave Airlie <airlied@redhat.com>
Tue, 20 Sep 2022 21:35:00 +0000 (07:35 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 20 Sep 2022 21:42:47 +0000 (07:42 +1000)
Cross-subsystem Changes:

- MEI subsystem pieces for XeHP SDV GSC support
  These are Acked-by Greg.

Driver Changes:

- Release mmaps on RPM suspend on discrete GPUs (Anshuman)
- Update GuC version to 7.5 on DG1, DG2 and ADL
- Revert "drm/i915/dg2: extend Wa_1409120013 to DG2" (Lucas)
- MTL enabling incl. standalone media (Matt R, Lucas)
- Explicitly clear BB_OFFSET for new contexts on Gen8+ (Chris)
- Fix throttling / perf limit reason decoding (Ashutosh)
- XeHP SDV GSC support (Vitaly, Alexander, Tomas)

- Fix issues with overrding firmware file paths (John)
- Invert if-else ladders to check latest version first (Lucas)
- Cancel GuC engine busyness worker synchronously (Umesh)

- Skip applying copy engine fuses outside PVC (Lucas)
- Eliminate Gen10 frequency read function (Lucas)
- Static code checker fixes (Gaosheng)
- Selftest improvements (Chris)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YyQ4Jgl3cpGL1/As@jlahtine-mobl.ger.corp.intel.com
20 files changed:
1  2 
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/i915_driver.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/misc/mei/hbm.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/pci-me.c

index 522ef9b4aff329625086f8ef5cf9ed9053a2f716,6672c4dadc2ce04b6d2af05096fdd8c05858789b..e83e4cd469681a29442bf60974791b5d0591311b
@@@ -123,6 -123,7 +123,7 @@@ gt-y += 
        gt/intel_ring.o \
        gt/intel_ring_submission.o \
        gt/intel_rps.o \
+       gt/intel_sa_media.o \
        gt/intel_sseu.o \
        gt/intel_sseu_debugfs.o \
        gt/intel_timeline.o \
@@@ -221,7 -222,6 +222,7 @@@ i915-y += 
        display/intel_combo_phy.o \
        display/intel_connector.o \
        display/intel_crtc.o \
 +      display/intel_crtc_state_dump.o \
        display/intel_cursor.o \
        display/intel_display.o \
        display/intel_display_power.o \
        display/intel_hdcp.o \
        display/intel_hotplug.o \
        display/intel_lpe_audio.o \
 +      display/intel_modeset_verify.o \
 +      display/intel_modeset_setup.o \
        display/intel_overlay.o \
        display/intel_pch_display.o \
        display/intel_pch_refclk.o \
index 4f4c9461a23b3983cd92adb59ed5021ec96acdf1,ed4a67ddab1948f9698663d9b86c33f169126be7..acc561c0f0aa2a2a72f98502cfe3ab2f708e4b74
  #include "gt/intel_region_lmem.h"
  #include "i915_drv.h"
  #include "i915_gem_stolen.h"
 +#include "i915_pci.h"
  #include "i915_reg.h"
  #include "i915_utils.h"
  #include "i915_vgpu.h"
  #include "intel_mchbar_regs.h"
 +#include "intel_pci_config.h"
  
  /*
   * The BIOS typically reserves some of the system's memory for the exclusive
@@@ -430,48 -428,29 +430,29 @@@ static int i915_gem_init_stolen(struct 
        reserved_base = stolen_top;
        reserved_size = 0;
  
-       switch (GRAPHICS_VER(i915)) {
-       case 2:
-       case 3:
-               break;
-       case 4:
-               if (!IS_G4X(i915))
-                       break;
-               fallthrough;
-       case 5:
-               g4x_get_stolen_reserved(i915, uncore,
+       if (GRAPHICS_VER(i915) >= 11) {
+               icl_get_stolen_reserved(i915, uncore,
                                        &reserved_base, &reserved_size);
-               break;
-       case 6:
-               gen6_get_stolen_reserved(i915, uncore,
-                                        &reserved_base, &reserved_size);
-               break;
-       case 7:
-               if (IS_VALLEYVIEW(i915))
-                       vlv_get_stolen_reserved(i915, uncore,
-                                               &reserved_base, &reserved_size);
-               else
-                       gen7_get_stolen_reserved(i915, uncore,
-                                                &reserved_base, &reserved_size);
-               break;
-       case 8:
-       case 9:
+       } else if (GRAPHICS_VER(i915) >= 8) {
                if (IS_LP(i915))
                        chv_get_stolen_reserved(i915, uncore,
                                                &reserved_base, &reserved_size);
                else
                        bdw_get_stolen_reserved(i915, uncore,
                                                &reserved_base, &reserved_size);
-               break;
-       default:
-               MISSING_CASE(GRAPHICS_VER(i915));
-               fallthrough;
-       case 11:
-       case 12:
-               icl_get_stolen_reserved(i915, uncore,
-                                       &reserved_base,
-                                       &reserved_size);
-               break;
+       } else if (GRAPHICS_VER(i915) >= 7) {
+               if (IS_VALLEYVIEW(i915))
+                       vlv_get_stolen_reserved(i915, uncore,
+                                               &reserved_base, &reserved_size);
+               else
+                       gen7_get_stolen_reserved(i915, uncore,
+                                                &reserved_base, &reserved_size);
+       } else if (GRAPHICS_VER(i915) >= 6) {
+               gen6_get_stolen_reserved(i915, uncore,
+                                        &reserved_base, &reserved_size);
+       } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
+               g4x_get_stolen_reserved(i915, uncore,
+                                       &reserved_base, &reserved_size);
        }
  
        /*
@@@ -829,13 -808,10 +810,13 @@@ i915_gem_stolen_lmem_setup(struct drm_i
        if (WARN_ON_ONCE(instance))
                return ERR_PTR(-ENODEV);
  
 +      if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
 +              return ERR_PTR(-ENXIO);
 +
        /* Use DSM base address instead for stolen memory */
        dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
        if (IS_DG1(uncore->i915)) {
 -              lmem_size = pci_resource_len(pdev, 2);
 +              lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
                if (WARN_ON(lmem_size < dsm_base))
                        return ERR_PTR(-ENODEV);
        } else {
        }
  
        dsm_size = lmem_size - dsm_base;
 -      if (pci_resource_len(pdev, 2) < lmem_size) {
 +      if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
                io_start = 0;
                io_size = 0;
        } else {
 -              io_start = pci_resource_start(pdev, 2) + dsm_base;
 +              io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
                io_size = dsm_size;
        }
  
index f64a3deb12fce16ae8a4404ba454299c45316cd3,dea6e378946f738eb45b1ddbfcc95a05a245ae59..0544b0a4a43ad16e39ac05bdc0a9779b542d2993
@@@ -361,6 -361,7 +361,6 @@@ static bool i915_ttm_eviction_valuable(
                                       const struct ttm_place *place)
  {
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 -      struct ttm_resource *res = bo->resource;
  
        if (!obj)
                return false;
        if (!i915_gem_object_evictable(obj))
                return false;
  
 -      switch (res->mem_type) {
 -      case I915_PL_LMEM0: {
 -              struct ttm_resource_manager *man =
 -                      ttm_manager_type(bo->bdev, res->mem_type);
 -              struct i915_ttm_buddy_resource *bman_res =
 -                      to_ttm_buddy_resource(res);
 -              struct drm_buddy *mm = bman_res->mm;
 -              struct drm_buddy_block *block;
 -
 -              if (!place->fpfn && !place->lpfn)
 -                      return true;
 -
 -              GEM_BUG_ON(!place->lpfn);
 -
 -              /*
 -               * If we just want something mappable then we can quickly check
 -               * if the current victim resource is using any of the CPU
 -               * visible portion.
 -               */
 -              if (!place->fpfn &&
 -                  place->lpfn == i915_ttm_buddy_man_visible_size(man))
 -                      return bman_res->used_visible_size > 0;
 -
 -              /* Real range allocation */
 -              list_for_each_entry(block, &bman_res->blocks, link) {
 -                      unsigned long fpfn =
 -                              drm_buddy_block_offset(block) >> PAGE_SHIFT;
 -                      unsigned long lpfn = fpfn +
 -                              (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
 -
 -                      if (place->fpfn < lpfn && place->lpfn > fpfn)
 -                              return true;
 -              }
 -              return false;
 -      } default:
 -              break;
 -      }
 -
 -      return true;
 +      return ttm_bo_eviction_valuable(bo, place);
  }
  
  static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
@@@ -509,9 -548,18 +509,18 @@@ static int i915_ttm_shrink(struct drm_i
  static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
  {
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+       intel_wakeref_t wakeref = 0;
  
        if (likely(obj)) {
+               /* ttm_bo_release() already has dma_resv_lock */
+               if (i915_ttm_cpu_maps_iomem(bo->resource))
+                       wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
                __i915_gem_object_pages_fini(obj);
+               if (wakeref)
+                       intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
                i915_ttm_free_cached_io_rsgt(obj);
        }
  }
@@@ -981,6 -1029,7 +990,7 @@@ static vm_fault_t vm_fault_ttm(struct v
        struct ttm_buffer_object *bo = area->vm_private_data;
        struct drm_device *dev = bo->base.dev;
        struct drm_i915_gem_object *obj;
+       intel_wakeref_t wakeref = 0;
        vm_fault_t ret;
        int idx;
  
                return VM_FAULT_SIGBUS;
        }
  
+       if (i915_ttm_cpu_maps_iomem(bo->resource))
+               wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
        if (!i915_ttm_resource_mappable(bo->resource)) {
                int err = -ENODEV;
                int i;
                if (err) {
                        drm_dbg(dev, "Unable to make resource CPU accessible\n");
                        dma_resv_unlock(bo->base.resv);
-                       return VM_FAULT_SIGBUS;
+                       ret = VM_FAULT_SIGBUS;
+                       goto out_rpm;
                }
        }
  
        } else {
                ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
        }
        if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
-               return ret;
+               goto out_rpm;
+       /* ttm_bo_vm_reserve() already has dma_resv_lock */
+       if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
+               obj->userfault_count = 1;
+               mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+               list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
+               mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+       }
+       if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+               intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
+                                  msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
  
        i915_ttm_adjust_lru(obj);
  
        dma_resv_unlock(bo->base.resv);
+ out_rpm:
+       if (wakeref)
+               intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
        return ret;
  }
  
@@@ -1203,8 -1274,9 +1235,8 @@@ int __i915_gem_ttm_object_init(struct i
         * Similarly, in delayed_destroy, we can't call ttm_bo_put()
         * until successful initialization.
         */
 -      ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
 -                                 bo_type, &i915_sys_placement,
 -                                 page_size >> PAGE_SHIFT,
 +      ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
 +                                 &i915_sys_placement, page_size >> PAGE_SHIFT,
                                   &ctx, NULL, NULL, i915_ttm_bo_destroy);
        if (ret)
                return i915_ttm_err_to_gem(ret);
index cf4a326f5f481071c3de9961a44b3e2fb8266eb4,f920d54841325729222996e86b516593759d144c..ea775e601686d1e271258df9ddc346f4e1a37051
@@@ -727,7 -727,7 +727,7 @@@ static void detect_bit_6_swizzle(struc
                 * bit17 dependent, and so we need to also prevent the pages
                 * from being moved.
                 */
 -              i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
 +              i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES;
                swizzle_x = I915_BIT_6_SWIZZLE_NONE;
                swizzle_y = I915_BIT_6_SWIZZLE_NONE;
        }
@@@ -842,7 -842,6 +842,6 @@@ void intel_ggtt_init_fences(struct i915
  
        INIT_LIST_HEAD(&ggtt->fence_list);
        INIT_LIST_HEAD(&ggtt->userfault_list);
-       intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
  
        detect_bit_6_swizzle(ggtt);
  
index e4bac2431e41607de83512f4e6620acaa4c498d4,926a1515cd3b09bce69531327b77e2e84050da9f..d0b03a928b9acaaae274907fa73e4431186470e6
  #include "intel_gt_requests.h"
  #include "intel_migrate.h"
  #include "intel_mocs.h"
 +#include "intel_pci_config.h"
  #include "intel_pm.h"
  #include "intel_rc6.h"
  #include "intel_renderstate.h"
  #include "intel_rps.h"
+ #include "intel_sa_media.h"
  #include "intel_gt_sysfs.h"
  #include "intel_uncore.h"
  #include "shmem_utils.h"
  
static void __intel_gt_init_early(struct intel_gt *gt)
void intel_gt_common_init_early(struct intel_gt *gt)
  {
-       spin_lock_init(&gt->irq_lock);
+       spin_lock_init(gt->irq_lock);
  
+       INIT_LIST_HEAD(&gt->lmem_userfault_list);
+       mutex_init(&gt->lmem_userfault_lock);
        INIT_LIST_HEAD(&gt->closed_vma);
        spin_lock_init(&gt->closed_lock);
  
  }
  
  /* Preliminary initialization of Tile 0 */
void intel_root_gt_init_early(struct drm_i915_private *i915)
int intel_root_gt_init_early(struct drm_i915_private *i915)
  {
        struct intel_gt *gt = to_gt(i915);
  
        gt->i915 = i915;
        gt->uncore = &i915->uncore;
+       gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
+       if (!gt->irq_lock)
+               return -ENOMEM;
  
-       __intel_gt_init_early(gt);
+       intel_gt_common_init_early(gt);
+       return 0;
  }
  
  static int intel_gt_probe_lmem(struct intel_gt *gt)
@@@ -781,26 -788,25 +789,25 @@@ static int intel_gt_tile_setup(struct i
        int ret;
  
        if (!gt_is_root(gt)) {
-               struct intel_uncore_mmio_debug *mmio_debug;
                struct intel_uncore *uncore;
+               spinlock_t *irq_lock;
  
-               uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
+               uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
                if (!uncore)
                        return -ENOMEM;
  
-               mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
-               if (!mmio_debug) {
-                       kfree(uncore);
+               irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
+               if (!irq_lock)
                        return -ENOMEM;
-               }
  
                gt->uncore = uncore;
-               gt->uncore->debug = mmio_debug;
+               gt->irq_lock = irq_lock;
  
-               __intel_gt_init_early(gt);
+               intel_gt_common_init_early(gt);
        }
  
        intel_uncore_init_early(gt->uncore, gt);
+       intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
  
        ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
        if (ret)
        return 0;
  }
  
- static void
- intel_gt_tile_cleanup(struct intel_gt *gt)
- {
-       intel_uncore_cleanup_mmio(gt->uncore);
-       if (!gt_is_root(gt)) {
-               kfree(gt->uncore->debug);
-               kfree(gt->uncore);
-               kfree(gt);
-       }
- }
  int intel_gt_probe_all(struct drm_i915_private *i915)
  {
        struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
        struct intel_gt *gt = &i915->gt0;
+       const struct intel_gt_definition *gtdef;
        phys_addr_t phys_addr;
        unsigned int mmio_bar;
+       unsigned int i;
        int ret;
  
 -      mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
 +      mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
        phys_addr = pci_resource_start(pdev, mmio_bar);
  
        /*
         * and it has been already initialized early during probe
         * in i915_driver_probe()
         */
 -      gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
+       gt->i915 = i915;
+       gt->name = "Primary GT";
++      gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
+       drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
        ret = intel_gt_tile_setup(gt, phys_addr);
        if (ret)
                return ret;
  
        i915->gt[0] = gt;
  
-       /* TODO: add more tiles */
+       if (!HAS_EXTRA_GT_LIST(i915))
+               return 0;
+       for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
+            gtdef->name != NULL;
+            i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
+               gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
+               if (!gt) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               gt->i915 = i915;
+               gt->name = gtdef->name;
+               gt->type = gtdef->type;
+               gt->info.engine_mask = gtdef->engine_mask;
+               gt->info.id = i;
+               drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+               if (GEM_WARN_ON(range_overflows_t(resource_size_t,
+                                                 gtdef->mapping_base,
+                                                 SZ_16M,
+                                                 pci_resource_len(pdev, mmio_bar)))) {
+                       ret = -ENODEV;
+                       goto err;
+               }
+               switch (gtdef->type) {
+               case GT_TILE:
+                       ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
+                       break;
+               case GT_MEDIA:
+                       ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
+                                                    gtdef->gsi_offset);
+                       break;
+               case GT_PRIMARY:
+                       /* Primary GT should not appear in extra GT list */
+               default:
+                       MISSING_CASE(gtdef->type);
+                       ret = -ENODEV;
+               }
+               if (ret)
+                       goto err;
+               i915->gt[i] = gt;
+       }
        return 0;
+ err:
+       i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+       intel_gt_release_all(i915);
+       return ret;
  }
  
  int intel_gt_tiles_init(struct drm_i915_private *i915)
@@@ -869,10 -925,8 +926,8 @@@ void intel_gt_release_all(struct drm_i9
        struct intel_gt *gt;
        unsigned int id;
  
-       for_each_gt(gt, i915, id) {
-               intel_gt_tile_cleanup(gt);
+       for_each_gt(gt, i915, id)
                i915->gt[id] = NULL;
-       }
  }
  
  void intel_gt_info_print(const struct intel_gt_info *info,
index 053a7dab55061667b49614cd5dbd57dd51eae77a,01f42777b6e32f347f8816cf219337fbd08d923d..d94e183d716a547f0ef82e18f3c1ef0746a53a93
@@@ -105,6 -105,12 +105,12 @@@ static const char irst_name[] = "INT339
  
  static const struct drm_driver i915_drm_driver;
  
+ static void i915_release_bridge_dev(struct drm_device *dev,
+                                   void *bridge)
+ {
+       pci_dev_put(bridge);
+ }
  static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
  {
        int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
                drm_err(&dev_priv->drm, "bridge device not found\n");
                return -EIO;
        }
-       return 0;
+       return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
+                                       dev_priv->bridge_dev);
  }
  
  /* Allocate space for the MCH regs if needed, return nonzero on error */
@@@ -252,8 -260,8 +260,8 @@@ static int i915_workqueues_init(struct 
        if (dev_priv->wq == NULL)
                goto out_err;
  
 -      dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
 -      if (dev_priv->hotplug.dp_wq == NULL)
 +      dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
 +      if (dev_priv->display.hotplug.dp_wq == NULL)
                goto out_free_wq;
  
        return 0;
@@@ -268,7 -276,7 +276,7 @@@ out_err
  
  static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
  {
 -      destroy_workqueue(dev_priv->hotplug.dp_wq);
 +      destroy_workqueue(dev_priv->display.hotplug.dp_wq);
        destroy_workqueue(dev_priv->wq);
  }
  
@@@ -302,8 -310,13 +310,13 @@@ static void intel_detect_preproduction_
  
  static void sanitize_gpu(struct drm_i915_private *i915)
  {
-       if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
-               __intel_gt_reset(to_gt(i915), ALL_ENGINES);
+       if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
+               struct intel_gt *gt;
+               unsigned int i;
+               for_each_gt(gt, i915, i)
+                       __intel_gt_reset(gt, ALL_ENGINES);
+       }
  }
  
  /**
@@@ -326,7 -339,7 +339,7 @@@ static int i915_driver_early_probe(stru
        intel_device_info_subplatform_init(dev_priv);
        intel_step_init(dev_priv);
  
-       intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
+       intel_uncore_mmio_debug_init_early(dev_priv);
  
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
        mutex_init(&dev_priv->sb_lock);
        cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
  
 -      mutex_init(&dev_priv->audio.mutex);
 -      mutex_init(&dev_priv->wm.wm_mutex);
 -      mutex_init(&dev_priv->pps_mutex);
 +      mutex_init(&dev_priv->display.audio.mutex);
 +      mutex_init(&dev_priv->display.wm.wm_mutex);
 +      mutex_init(&dev_priv->display.pps.mutex);
        mutex_init(&dev_priv->hdcp_comp_mutex);
  
        i915_memcpy_init_early(dev_priv);
  
        intel_wopcm_init_early(&dev_priv->wopcm);
  
-       intel_root_gt_init_early(dev_priv);
+       ret = intel_root_gt_init_early(dev_priv);
+       if (ret < 0)
+               goto err_rootgt;
  
        i915_drm_clients_init(&dev_priv->clients, dev_priv);
  
@@@ -382,6 -397,7 +397,7 @@@ err_gem
        i915_gem_cleanup_early(dev_priv);
        intel_gt_driver_late_release_all(dev_priv);
        i915_drm_clients_fini(&dev_priv->clients);
+ err_rootgt:
        intel_region_ttm_device_fini(dev_priv);
  err_ttm:
        vlv_suspend_cleanup(dev_priv);
@@@ -423,7 -439,8 +439,8 @@@ static void i915_driver_late_release(st
   */
  static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
  {
-       int ret;
+       struct intel_gt *gt;
+       int ret, i;
  
        if (i915_inject_probe_failure(dev_priv))
                return -ENODEV;
        if (ret < 0)
                return ret;
  
-       ret = intel_uncore_init_mmio(&dev_priv->uncore);
-       if (ret)
-               return ret;
+       for_each_gt(gt, dev_priv, i) {
+               ret = intel_uncore_init_mmio(gt->uncore);
+               if (ret)
+                       return ret;
+               ret = drmm_add_action_or_reset(&dev_priv->drm,
+                                              intel_uncore_fini_mmio,
+                                              gt->uncore);
+               if (ret)
+                       return ret;
+       }
  
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev_priv);
        intel_device_info_runtime_init(dev_priv);
  
-       ret = intel_gt_init_mmio(to_gt(dev_priv));
-       if (ret)
-               goto err_uncore;
+       for_each_gt(gt, dev_priv, i) {
+               ret = intel_gt_init_mmio(gt);
+               if (ret)
+                       goto err_uncore;
+       }
  
        /* As early as possible, scrub existing GPU state before clobbering */
        sanitize_gpu(dev_priv);
  
  err_uncore:
        intel_teardown_mchbar(dev_priv);
-       intel_uncore_fini_mmio(&dev_priv->uncore);
-       pci_dev_put(dev_priv->bridge_dev);
  
        return ret;
  }
  static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
  {
        intel_teardown_mchbar(dev_priv);
-       intel_uncore_fini_mmio(&dev_priv->uncore);
-       pci_dev_put(dev_priv->bridge_dev);
  }
  
  /**
@@@ -549,7 -572,6 +572,7 @@@ static int i915_pcode_init(struct drm_i
  static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
  {
        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
 +      struct pci_dev *root_pdev;
        int ret;
  
        if (i915_inject_probe_failure(dev_priv))
  
        intel_bw_init_hw(dev_priv);
  
 +      /*
 +       * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
 +       * This should be totally removed when we handle the pci states properly
 +       * on runtime PM and on s2idle cases.
 +       */
 +      root_pdev = pcie_find_root_port(pdev);
 +      if (root_pdev)
 +              pci_d3cold_disable(root_pdev);
 +
        return 0;
  
  err_msi:
@@@ -693,16 -706,11 +716,16 @@@ err_perf
  static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
  {
        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
 +      struct pci_dev *root_pdev;
  
        i915_perf_fini(dev_priv);
  
        if (pdev->msi_enabled)
                pci_disable_msi(pdev);
 +
 +      root_pdev = pcie_find_root_port(pdev);
 +      if (root_pdev)
 +              pci_d3cold_enable(root_pdev);
  }
  
  /**
  static void i915_driver_register(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = &dev_priv->drm;
+       struct intel_gt *gt;
+       unsigned int i;
  
        i915_gem_driver_register(dev_priv);
        i915_pmu_register(dev_priv);
        /* Depends on sysfs having been initialized */
        i915_perf_register(dev_priv);
  
-       intel_gt_driver_register(to_gt(dev_priv));
+       for_each_gt(gt, dev_priv, i)
+               intel_gt_driver_register(gt);
  
        intel_display_driver_register(dev_priv);
  
   */
  static void i915_driver_unregister(struct drm_i915_private *dev_priv)
  {
+       struct intel_gt *gt;
+       unsigned int i;
        i915_switcheroo_unregister(dev_priv);
  
        intel_unregister_dsm_handler();
  
        intel_display_driver_unregister(dev_priv);
  
-       intel_gt_driver_unregister(to_gt(dev_priv));
+       for_each_gt(gt, dev_priv, i)
+               intel_gt_driver_unregister(gt);
  
        i915_perf_unregister(dev_priv);
        i915_pmu_unregister(dev_priv);
@@@ -784,6 -799,8 +814,8 @@@ static void i915_welcome_messages(struc
  {
        if (drm_debug_enabled(DRM_UT_DRIVER)) {
                struct drm_printer p = drm_debug_printer("i915 device info:");
+               struct intel_gt *gt;
+               unsigned int i;
  
                drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
                           INTEL_DEVID(dev_priv),
                                             INTEL_INFO(dev_priv)->platform),
                           GRAPHICS_VER(dev_priv));
  
 -              intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
 -              intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
 +              intel_device_info_print(INTEL_INFO(dev_priv),
 +                                      RUNTIME_INFO(dev_priv), &p);
                i915_print_iommu_status(dev_priv, &p);
-               intel_gt_info_print(&to_gt(dev_priv)->info, &p);
+               for_each_gt(gt, dev_priv, i)
+                       intel_gt_info_print(&gt->info, &p);
        }
  
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@@ -814,7 -832,6 +847,7 @@@ i915_driver_create(struct pci_dev *pdev
        const struct intel_device_info *match_info =
                (struct intel_device_info *)ent->driver_data;
        struct intel_device_info *device_info;
 +      struct intel_runtime_info *runtime;
        struct drm_i915_private *i915;
  
        i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
        /* Setup the write-once "constant" device info */
        device_info = mkwrite_device_info(i915);
        memcpy(device_info, match_info, sizeof(*device_info));
 -      RUNTIME_INFO(i915)->device_id = pdev->device;
 +
 +      /* Initialize initial runtime info from static const data and pdev. */
 +      runtime = RUNTIME_INFO(i915);
 +      memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
 +      runtime->device_id = pdev->device;
  
        return i915;
  }
   */
  int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
 -      const struct intel_device_info *match_info =
 -              (struct intel_device_info *)ent->driver_data;
        struct drm_i915_private *i915;
        int ret;
  
                return PTR_ERR(i915);
  
        /* Disable nuclear pageflip by default on pre-ILK */
 -      if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5)
 +      if (!i915->params.nuclear_pageflip && DISPLAY_VER(i915) < 5)
                i915->drm.driver_features &= ~DRIVER_ATOMIC;
  
        ret = pci_enable_device(pdev);
@@@ -1088,6 -1103,8 +1121,6 @@@ void i915_driver_shutdown(struct drm_i9
        intel_runtime_pm_disable(&i915->runtime_pm);
        intel_power_domains_disable(i915);
  
 -      i915_gem_suspend(i915);
 -
        if (HAS_DISPLAY(i915)) {
                drm_kms_helper_poll_disable(&i915->drm);
  
  
        intel_dmc_ucode_suspend(i915);
  
 +      i915_gem_suspend(i915);
 +
        /*
         * The only requirement is to reboot with display DC states disabled,
         * for now leaving all display power wells in the INIT power domain
@@@ -1189,8 -1204,6 +1222,8 @@@ static int i915_drm_suspend(struct drm_
  
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
 +      i915_gem_drain_freed_objects(dev_priv);
 +
        return 0;
  }
  
@@@ -1211,13 -1224,15 +1244,15 @@@ static int i915_drm_suspend_late(struc
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-       int ret;
+       struct intel_gt *gt;
+       int ret, i;
  
        disable_rpm_wakeref_asserts(rpm);
  
        i915_gem_suspend_late(dev_priv);
  
-       intel_uncore_suspend(&dev_priv->uncore);
+       for_each_gt(gt, dev_priv, i)
+               intel_uncore_suspend(gt->uncore);
  
        intel_power_domains_suspend(dev_priv,
                                    get_suspend_mode(dev_priv, hibernation));
                goto out;
        }
  
 -      /*
 -       * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
 -       * This should be totally removed when we handle the pci states properly
 -       * on runtime PM and on s2idle cases.
 -       */
 -      if (suspend_to_idle(dev_priv))
 -              pci_d3cold_disable(pdev);
 -
        pci_disable_device(pdev);
        /*
         * During hibernation on some platforms the BIOS may try to access
@@@ -1349,7 -1372,8 +1384,8 @@@ static int i915_drm_resume_early(struc
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
-       int ret;
+       struct intel_gt *gt;
+       int ret, i;
  
        /*
         * We have a resume ordering issue with the snd-hda driver also
  
        pci_set_master(pdev);
  
 -      pci_d3cold_enable(pdev);
 -
        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
        ret = vlv_resume_prepare(dev_priv, false);
                drm_err(&dev_priv->drm,
                        "Resume prepare failed: %d, continuing anyway\n", ret);
  
-       intel_uncore_resume_early(&dev_priv->uncore);
-       intel_gt_check_and_clear_faults(to_gt(dev_priv));
+       for_each_gt(gt, dev_priv, i) {
+               intel_uncore_resume_early(gt->uncore);
+               intel_gt_check_and_clear_faults(gt);
+       }
  
        intel_display_power_resume_early(dev_priv);
  
@@@ -1585,7 -1612,9 +1622,8 @@@ static int intel_runtime_suspend(struc
  {
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-       int ret;
 -      struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+       struct intel_gt *gt;
+       int ret, i;
  
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
                return -ENODEV;
         */
        i915_gem_runtime_suspend(dev_priv);
  
-       intel_gt_runtime_suspend(to_gt(dev_priv));
+       for_each_gt(gt, dev_priv, i)
+               intel_gt_runtime_suspend(gt);
  
        intel_runtime_pm_disable_interrupts(dev_priv);
  
-       intel_uncore_suspend(&dev_priv->uncore);
+       for_each_gt(gt, dev_priv, i)
+               intel_uncore_suspend(gt->uncore);
  
        intel_display_power_suspend(dev_priv);
  
                drm_err(&dev_priv->drm,
                        "Unclaimed access detected prior to suspending\n");
  
 -      /*
 -       * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
 -       * This should be totally removed when we handle the pci states properly
 -       * on runtime PM and on s2idle cases.
 -       */
 -      pci_d3cold_disable(pdev);
        rpm->suspended = true;
  
        /*
@@@ -1668,7 -1705,9 +1708,8 @@@ static int intel_runtime_resume(struct 
  {
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-       int ret;
 -      struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+       struct intel_gt *gt;
+       int ret, i;
  
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
                return -ENODEV;
  
        intel_opregion_notify_adapter(dev_priv, PCI_D0);
        rpm->suspended = false;
 -      pci_d3cold_enable(pdev);
        if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
                drm_dbg(&dev_priv->drm,
                        "Unclaimed access during suspend, bios?\n");
  
        ret = vlv_resume_prepare(dev_priv, true);
  
-       intel_uncore_runtime_resume(&dev_priv->uncore);
+       for_each_gt(gt, dev_priv, i)
+               intel_uncore_runtime_resume(gt->uncore);
  
        intel_runtime_pm_enable_interrupts(dev_priv);
  
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
         */
-       intel_gt_runtime_resume(to_gt(dev_priv));
+       for_each_gt(gt, dev_priv, i)
+               intel_gt_runtime_resume(gt);
  
        /*
         * On VLV/CHV display interrupts are part of the display
index ba8a16d0fe7ca02d169b3c1d3eb95882db448775,b11c212f1b0d6a3a321cce90c8cae3da03c91a63..4828f9d2460d8d44f849601e769ee4345cc05f43
  #include <drm/drm_connector.h>
  #include <drm/ttm/ttm_device.h>
  
 -#include "display/intel_bios.h"
  #include "display/intel_cdclk.h"
  #include "display/intel_display.h"
 +#include "display/intel_display_core.h"
  #include "display/intel_display_power.h"
 -#include "display/intel_dmc.h"
 -#include "display/intel_dpll_mgr.h"
  #include "display/intel_dsb.h"
  #include "display/intel_fbc.h"
  #include "display/intel_frontbuffer.h"
  #include "display/intel_global_state.h"
 -#include "display/intel_gmbus.h"
  #include "display/intel_opregion.h"
  
  #include "gem/i915_gem_context_types.h"
  #include "intel_device_info.h"
  #include "intel_memory_region.h"
  #include "intel_pch.h"
 -#include "intel_pm_types.h"
  #include "intel_runtime_pm.h"
  #include "intel_step.h"
  #include "intel_uncore.h"
  #include "intel_wopcm.h"
  
 -struct dpll;
  struct drm_i915_clock_gating_funcs;
  struct drm_i915_gem_object;
  struct drm_i915_private;
 -struct intel_atomic_state;
 -struct intel_audio_funcs;
  struct intel_cdclk_config;
 -struct intel_cdclk_funcs;
  struct intel_cdclk_state;
  struct intel_cdclk_vals;
 -struct intel_color_funcs;
  struct intel_connector;
 -struct intel_crtc;
  struct intel_dp;
 -struct intel_dpll_funcs;
  struct intel_encoder;
 -struct intel_fbdev;
 -struct intel_fdi_funcs;
 -struct intel_gmbus;
 -struct intel_hotplug_funcs;
 -struct intel_initial_plane_config;
  struct intel_limit;
 -struct intel_overlay;
  struct intel_overlay_error_state;
  struct vlv_s0ix_state;
  
  /* Threshold == 5 for long IRQs, 50 for short */
  #define HPD_STORM_DEFAULT_THRESHOLD 50
  
 -struct i915_hotplug {
 -      struct delayed_work hotplug_work;
 -
 -      const u32 *hpd, *pch_hpd;
 -
 -      struct {
 -              unsigned long last_jiffies;
 -              int count;
 -              enum {
 -                      HPD_ENABLED = 0,
 -                      HPD_DISABLED = 1,
 -                      HPD_MARK_DISABLED = 2
 -              } state;
 -      } stats[HPD_NUM_PINS];
 -      u32 event_bits;
 -      u32 retry_bits;
 -      struct delayed_work reenable_work;
 -
 -      u32 long_port_mask;
 -      u32 short_port_mask;
 -      struct work_struct dig_port_work;
 -
 -      struct work_struct poll_init_work;
 -      bool poll_enabled;
 -
 -      unsigned int hpd_storm_threshold;
 -      /* Whether or not to count short HPD IRQs in HPD storms */
 -      u8 hpd_short_storm_enabled;
 -
 -      /*
 -       * if we get a HPD irq from DP and a HPD irq from non-DP
 -       * the non-DP HPD could block the workqueue on a mode config
 -       * mutex getting, that userspace may have taken. However
 -       * userspace is waiting on the DP workqueue to run which is
 -       * blocked behind the non-DP one.
 -       */
 -      struct workqueue_struct *dp_wq;
 -};
 -
  #define I915_GEM_GPU_DOMAINS \
        (I915_GEM_DOMAIN_RENDER | \
         I915_GEM_DOMAIN_SAMPLER | \
@@@ -105,13 -161,49 +105,13 @@@ struct sdvo_device_mapping 
        u8 ddc_pin;
  };
  
 -/* functions used for watermark calcs for display. */
 -struct drm_i915_wm_disp_funcs {
 -      /* update_wm is for legacy wm management */
 -      void (*update_wm)(struct drm_i915_private *dev_priv);
 -      int (*compute_pipe_wm)(struct intel_atomic_state *state,
 -                             struct intel_crtc *crtc);
 -      int (*compute_intermediate_wm)(struct intel_atomic_state *state,
 -                                     struct intel_crtc *crtc);
 -      void (*initial_watermarks)(struct intel_atomic_state *state,
 -                                 struct intel_crtc *crtc);
 -      void (*atomic_update_watermarks)(struct intel_atomic_state *state,
 -                                       struct intel_crtc *crtc);
 -      void (*optimize_watermarks)(struct intel_atomic_state *state,
 -                                  struct intel_crtc *crtc);
 -      int (*compute_global_watermarks)(struct intel_atomic_state *state);
 -};
 -
 -struct drm_i915_display_funcs {
 -      /* Returns the active state of the crtc, and if the crtc is active,
 -       * fills out the pipe-config with the hw state. */
 -      bool (*get_pipe_config)(struct intel_crtc *,
 -                              struct intel_crtc_state *);
 -      void (*get_initial_plane_config)(struct intel_crtc *,
 -                                       struct intel_initial_plane_config *);
 -      void (*crtc_enable)(struct intel_atomic_state *state,
 -                          struct intel_crtc *crtc);
 -      void (*crtc_disable)(struct intel_atomic_state *state,
 -                           struct intel_crtc *crtc);
 -      void (*commit_modeset_enables)(struct intel_atomic_state *state);
 -};
 -
  #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
  
 -enum drrs_type {
 -      DRRS_TYPE_NONE,
 -      DRRS_TYPE_STATIC,
 -      DRRS_TYPE_SEAMLESS,
 -};
 +#define GEM_QUIRK_PIN_SWIZZLED_PAGES  BIT(0)
  
  #define QUIRK_LVDS_SSC_DISABLE (1<<1)
  #define QUIRK_INVERT_BRIGHTNESS (1<<2)
  #define QUIRK_BACKLIGHT_PRESENT (1<<3)
 -#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
  #define QUIRK_INCREASE_T12_DELAY (1<<6)
  #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
  #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
@@@ -216,19 -308,76 +216,19 @@@ struct intel_vbt_data 
        /* bdb version */
        u16 version;
  
 -      struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 -      struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 -
        /* Feature bits */
        unsigned int int_tv_support:1;
 -      unsigned int lvds_dither:1;
        unsigned int int_crt_support:1;
        unsigned int lvds_use_ssc:1;
        unsigned int int_lvds_support:1;
        unsigned int display_clock_mode:1;
        unsigned int fdi_rx_polarity_inverted:1;
 -      unsigned int panel_type:4;
        int lvds_ssc_freq;
 -      unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
        enum drm_panel_orientation orientation;
  
        bool override_afc_startup;
        u8 override_afc_startup_val;
  
 -      u8 seamless_drrs_min_refresh_rate;
 -      enum drrs_type drrs_type;
 -
 -      struct {
 -              int rate;
 -              int lanes;
 -              int preemphasis;
 -              int vswing;
 -              int bpp;
 -              struct edp_power_seq pps;
 -              u8 drrs_msa_timing_delay;
 -              bool low_vswing;
 -              bool initialized;
 -              bool hobl;
 -      } edp;
 -
 -      struct {
 -              bool enable;
 -              bool full_link;
 -              bool require_aux_wakeup;
 -              int idle_frames;
 -              int tp1_wakeup_time_us;
 -              int tp2_tp3_wakeup_time_us;
 -              int psr2_tp2_tp3_wakeup_time_us;
 -      } psr;
 -
 -      struct {
 -              u16 pwm_freq_hz;
 -              u16 brightness_precision_bits;
 -              bool present;
 -              bool active_low_pwm;
 -              u8 min_brightness;      /* min_brightness/255 of max */
 -              u8 controller;          /* brightness controller number */
 -              enum intel_backlight_type type;
 -      } backlight;
 -
 -      /* MIPI DSI */
 -      struct {
 -              u16 panel_id;
 -              struct mipi_config *config;
 -              struct mipi_pps_data *pps;
 -              u16 bl_ports;
 -              u16 cabc_ports;
 -              u8 seq_version;
 -              u32 size;
 -              u8 *data;
 -              const u8 *sequence[MIPI_SEQ_MAX];
 -              u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
 -              enum drm_panel_orientation orientation;
 -      } dsi;
 -
        int crt_ddc_pin;
  
        struct list_head display_devices;
@@@ -263,11 -412,32 +263,11 @@@ struct i915_selftest_stash 
        struct ida mock_region_instances;
  };
  
 -/* intel_audio.c private */
 -struct intel_audio_private {
 -      /* Display internal audio functions */
 -      const struct intel_audio_funcs *funcs;
 -
 -      /* hda/i915 audio component */
 -      struct i915_audio_component *component;
 -      bool component_registered;
 -      /* mutex for audio/video sync */
 -      struct mutex mutex;
 -      int power_refcount;
 -      u32 freq_cntrl;
 -
 -      /* Used to save the pipe-to-encoder mapping for audio */
 -      struct intel_encoder *encoder_map[I915_MAX_PIPES];
 -
 -      /* necessary resource sharing with HDMI LPE audio driver. */
 -      struct {
 -              struct platform_device *platdev;
 -              int irq;
 -      } lpe;
 -};
 -
  struct drm_i915_private {
        struct drm_device drm;
  
 +      struct intel_display display;
 +
        /* FIXME: Device release actions should all be moved to drmm_ */
        bool do_release;
  
  
        struct intel_wopcm wopcm;
  
 -      struct intel_dmc dmc;
 -
 -      struct intel_gmbus *gmbus[GMBUS_NUM_PINS];
 -
 -      /** gmbus_mutex protects against concurrent usage of the single hw gmbus
 -       * controller on different i2c buses. */
 -      struct mutex gmbus_mutex;
 -
 -      /**
 -       * Base address of where the gmbus and gpio blocks are located (either
 -       * on PCH or on SoC for platforms without PCH).
 -       */
 -      u32 gpio_mmio_base;
 -
        /* MMIO base address for MIPI regs */
        u32 mipi_mmio_base;
  
 -      u32 pps_mmio_base;
 -
 -      wait_queue_head_t gmbus_wait_queue;
 -
        struct pci_dev *bridge_dev;
  
        struct rb_root uabi_engines;
        };
        u32 pipestat_irq_mask[I915_MAX_PIPES];
  
 -      struct i915_hotplug hotplug;
        struct intel_fbc *fbc[I915_MAX_FBCS];
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
  
        bool preserve_bios_swizzle;
  
 -      /* overlay */
 -      struct intel_overlay *overlay;
 -
        /* backlight registers and fields in struct intel_panel */
        struct mutex backlight_lock;
  
 -      /* protects panel power sequencer state */
 -      struct mutex pps_mutex;
 -
        unsigned int fsb_freq, mem_freq, is_ddr3;
        unsigned int skl_preferred_vco_freq;
        unsigned int max_cdclk_freq;
        /* pm private clock gating functions */
        const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
  
 -      /* pm display functions */
 -      const struct drm_i915_wm_disp_funcs *wm_disp;
 -
 -      /* irq display functions */
 -      const struct intel_hotplug_funcs *hotplug_funcs;
 -
 -      /* fdi display functions */
 -      const struct intel_fdi_funcs *fdi_funcs;
 -
 -      /* display pll funcs */
 -      const struct intel_dpll_funcs *dpll_funcs;
 -
 -      /* Display functions */
 -      const struct drm_i915_display_funcs *display;
 -
 -      /* Display internal color functions */
 -      const struct intel_color_funcs *color_funcs;
 -
 -      /* Display CDCLK functions */
 -      const struct intel_cdclk_funcs *cdclk_funcs;
 -
        /* PCH chipset type */
        enum intel_pch pch_type;
        unsigned short pch_id;
  
 +      unsigned long gem_quirks;
        unsigned long quirks;
  
        struct drm_atomic_state *modeset_restore_state;
  
        /* Kernel Modesetting */
  
 -      /**
 -       * dpll and cdclk state is protected by connection_mutex
 -       * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
 -       * Must be global rather than per dpll, because on some platforms plls
 -       * share registers.
 -       */
 -      struct {
 -              struct mutex lock;
 -
 -              int num_shared_dpll;
 -              struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 -              const struct intel_dpll_mgr *mgr;
 -
 -              struct {
 -                      int nssc;
 -                      int ssc;
 -              } ref_clks;
 -      } dpll;
 -
        struct list_head global_obj_list;
  
        struct i915_frontbuffer_tracking fb_tracking;
  
        struct i915_gpu_error gpu_error;
  
 -      /* list of fbdev register on this device */
 -      struct intel_fbdev *fbdev;
 -      struct work_struct fbdev_suspend_work;
 -
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
  
        struct i915_suspend_saved_registers regfile;
        struct vlv_s0ix_state *vlv_s0ix_state;
  
 -      enum {
 -              I915_SAGV_UNKNOWN = 0,
 -              I915_SAGV_DISABLED,
 -              I915_SAGV_ENABLED,
 -              I915_SAGV_NOT_CONTROLLED
 -      } sagv_status;
 -
 -      u32 sagv_block_time_us;
 -
 -      struct {
 -              /*
 -               * Raw watermark latency values:
 -               * in 0.1us units for WM0,
 -               * in 0.5us units for WM1+.
 -               */
 -              /* primary */
 -              u16 pri_latency[5];
 -              /* sprite */
 -              u16 spr_latency[5];
 -              /* cursor */
 -              u16 cur_latency[5];
 -              /*
 -               * Raw watermark memory latency values
 -               * for SKL for all 8 levels
 -               * in 1us units.
 -               */
 -              u16 skl_latency[8];
 -
 -              /* current hardware state */
 -              union {
 -                      struct ilk_wm_values hw;
 -                      struct vlv_wm_values vlv;
 -                      struct g4x_wm_values g4x;
 -              };
 -
 -              u8 max_level;
 -
 -              /*
 -               * Should be held around atomic WM register writing; also
 -               * protects * intel_crtc->wm.active and
 -               * crtc_state->wm.need_postvbl_update.
 -               */
 -              struct mutex wm_mutex;
 -      } wm;
 -
        struct dram_info {
                bool wm_lv_0_adjust_needed;
                u8 num_channels;
  
        struct kobject *sysfs_gt;
  
+       /* Quick lookup of media GT (current platforms only have one) */
+       struct intel_gt *media_gt;
        struct {
                struct i915_gem_contexts {
                        spinlock_t lock; /* locks list */
                struct file *mmap_singleton;
        } gem;
  
 -      /* Window2 specifies time required to program DSB (Window2) in number of scan lines */
 -      u8 window2_delay;
 -
        u8 pch_ssc_use;
  
        /* For i915gm/i945gm vblank irq workaround */
  
        bool ipc_enabled;
  
 -      struct intel_audio_private audio;
 -
        struct i915_pmu pmu;
  
        struct i915_drm_clients clients;
@@@ -604,6 -895,26 +607,6 @@@ static inline struct intel_gt *to_gt(st
  
  #define I915_GTT_OFFSET_NONE ((u32)-1)
  
 -/*
 - * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
 - * considered to be the frontbuffer for the given plane interface-wise. This
 - * doesn't mean that the hw necessarily already scans it out, but that any
 - * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
 - *
 - * We have one bit per pipe and per scanout plane type.
 - */
 -#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
 -#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
 -      BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
 -      BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
 -      BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
 -})
 -#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
 -      BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
 -#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
 -      GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
 -              INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
 -
  #define INTEL_INFO(dev_priv)  (&(dev_priv)->__info)
  #define RUNTIME_INFO(dev_priv)        (&(dev_priv)->__runtime)
  #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
  
  #define IP_VER(ver, rel)              ((ver) << 8 | (rel))
  
 -#define GRAPHICS_VER(i915)            (INTEL_INFO(i915)->graphics.ver)
 -#define GRAPHICS_VER_FULL(i915)               IP_VER(INTEL_INFO(i915)->graphics.ver, \
 -                                             INTEL_INFO(i915)->graphics.rel)
 +#define GRAPHICS_VER(i915)            (RUNTIME_INFO(i915)->graphics.ver)
 +#define GRAPHICS_VER_FULL(i915)               IP_VER(RUNTIME_INFO(i915)->graphics.ver, \
 +                                             RUNTIME_INFO(i915)->graphics.rel)
  #define IS_GRAPHICS_VER(i915, from, until) \
        (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
  
@@@ -966,7 -1277,7 +969,7 @@@ IS_SUBPLATFORM(const struct drm_i915_pr
  
  #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
  
 -#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
 +#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type)
  #define HAS_PPGTT(dev_priv) \
        (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
  #define HAS_FULL_PPGTT(dev_priv) \
  
  #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
        GEM_BUG_ON((sizes) == 0); \
 -      ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
 +      ((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \
  })
  
  #define HAS_OVERLAY(dev_priv)          (INTEL_INFO(dev_priv)->display.has_overlay)
  #define I915_HAS_HOTPLUG(dev_priv)    (INTEL_INFO(dev_priv)->display.has_hotplug)
  
  #define HAS_FW_BLC(dev_priv)  (DISPLAY_VER(dev_priv) > 2)
 -#define HAS_FBC(dev_priv)     (INTEL_INFO(dev_priv)->display.fbc_mask != 0)
 +#define HAS_FBC(dev_priv)     (RUNTIME_INFO(dev_priv)->fbc_mask != 0)
  #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
  
  #define HAS_IPS(dev_priv)     (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
  
  #define HAS_DP_MST(dev_priv)  (INTEL_INFO(dev_priv)->display.has_dp_mst)
 -#define HAS_DP20(dev_priv)    (IS_DG2(dev_priv))
 +#define HAS_DP20(dev_priv)    (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
  
  #define HAS_CDCLK_CRAWL(dev_priv)      (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
  #define HAS_DDI(dev_priv)              (INTEL_INFO(dev_priv)->display.has_ddi)
  #define HAS_PSR_HW_TRACKING(dev_priv) \
        (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
  #define HAS_PSR2_SEL_FETCH(dev_priv)   (DISPLAY_VER(dev_priv) >= 12)
 -#define HAS_TRANSCODER(dev_priv, trans)        ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0)
 +#define HAS_TRANSCODER(dev_priv, trans)        ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
  
  #define HAS_RC6(dev_priv)              (INTEL_INFO(dev_priv)->has_rc6)
  #define HAS_RC6p(dev_priv)             (INTEL_INFO(dev_priv)->has_rc6p)
  
  #define HAS_RPS(dev_priv)     (INTEL_INFO(dev_priv)->has_rps)
  
 -#define HAS_DMC(dev_priv)     (INTEL_INFO(dev_priv)->display.has_dmc)
 +#define HAS_DMC(dev_priv)     (RUNTIME_INFO(dev_priv)->has_dmc)
  
  #define HAS_HECI_PXP(dev_priv) \
        (INTEL_INFO(dev_priv)->has_heci_pxp)
  
  #define HAS_IPC(dev_priv)              (INTEL_INFO(dev_priv)->display.has_ipc)
  
 -#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
 +#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
  #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
  
+ #define HAS_EXTRA_GT_LIST(dev_priv)   (INTEL_INFO(dev_priv)->extra_gt_list)
  /*
   * Platform has the dedicated compression control state for each lmem surfaces
   * stored in lmem to support the 3D and media compression formats.
  
  #define HAS_GT_UC(dev_priv)   (INTEL_INFO(dev_priv)->has_gt_uc)
  
 -#define HAS_POOLED_EU(dev_priv)       (INTEL_INFO(dev_priv)->has_pooled_eu)
 +#define HAS_POOLED_EU(dev_priv)       (RUNTIME_INFO(dev_priv)->has_pooled_eu)
  
  #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)   (INTEL_INFO(dev_priv)->has_global_mocs)
  
  #define GT_FREQUENCY_MULTIPLIER 50
  #define GEN9_FREQ_SCALER 3
  
 -#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask))
 +#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
  
 -#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0)
 +#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
  
  #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
  
  
  /* Only valid when HAS_DISPLAY() is true */
  #define INTEL_DISPLAY_ENABLED(dev_priv) \
 -      (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
 +      (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)),         \
 +       !(dev_priv)->params.disable_display &&                         \
 +       !intel_opregion_headless_sku(dev_priv))
  
  #define HAS_GUC_DEPRIVILEGE(dev_priv) \
        (INTEL_INFO(dev_priv)->has_guc_deprivilege)
  #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
                                              IS_ALDERLAKE_S(dev_priv))
  
 -#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915))
 +#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
  
  #define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
  
index 0d0e0cb189d8cb1ebafcc3c52aa5f7249eab6e67,c8e14ed9c2a96e35f88ca34ccc681cbd6b5270bf..53b63540c3c760b4da15d63a59072e03b92ac439
@@@ -842,6 -842,10 +842,10 @@@ void i915_gem_runtime_suspend(struct dr
                                 &to_gt(i915)->ggtt->userfault_list, userfault_link)
                __i915_gem_object_release_mmap_gtt(obj);
  
+       list_for_each_entry_safe(obj, on,
+                                &to_gt(i915)->lmem_userfault_list, userfault_link)
+               i915_gem_object_runtime_pm_release_mmap_offset(obj);
        /*
         * The fence will be lost when the device powers down. If any were
         * in use by hardware (i.e. they are pinned), we should not be powering
@@@ -1035,7 -1039,7 +1039,7 @@@ i915_gem_madvise_ioctl(struct drm_devic
  
        if (i915_gem_object_has_pages(obj) &&
            i915_gem_object_is_tiled(obj) &&
 -          i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 +          i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
                if (obj->mm.madv == I915_MADV_WILLNEED) {
                        GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
                        i915_gem_object_clear_tiling_quirk(obj);
@@@ -1091,7 -1095,8 +1095,7 @@@ int i915_gem_init(struct drm_i915_priva
  
        /* We need to fallback to 4K pages if host doesn't support huge gtt. */
        if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
 -              mkwrite_device_info(dev_priv)->page_sizes =
 -                      I915_GTT_PAGE_SIZE_4K;
 +              RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
  
        ret = i915_gem_init_userptr(dev_priv);
        if (ret)
@@@ -1172,7 -1177,7 +1176,7 @@@ void i915_gem_driver_unregister(struct 
  
  void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
  {
-       intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
+       intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
  
        i915_gem_suspend_late(dev_priv);
        intel_gt_driver_remove(to_gt(dev_priv));
index c2f2d7b8d9645addb39be9118cfe9d8d91c4c1b7,5652acdcf910485d08d12bb6d7515c9c2b4b028e..14efd58e37d7c24034f8fa013ff4a0391b108c10
@@@ -65,7 -65,7 +65,7 @@@
  
  /*
   * Interrupt statistic for PMU. Increments the counter only if the
 - * interrupt originated from the the GPU so interrupts from a device which
 + * interrupt originated from the GPU so interrupts from a device which
   * shares the interrupt line are not accounted.
   */
  static inline void pmu_irq_stats(struct drm_i915_private *i915,
@@@ -185,7 -185,7 +185,7 @@@ static const u32 hpd_sde_dg1[HPD_NUM_PI
  
  static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
  {
 -      struct i915_hotplug *hpd = &dev_priv->hotplug;
 +      struct intel_hotplug *hpd = &dev_priv->display.hotplug;
  
        if (HAS_GMCH(dev_priv)) {
                if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
@@@ -1104,9 -1104,9 +1104,9 @@@ static void ivb_parity_work(struct work
  
  out:
        drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
  
        mutex_unlock(&dev_priv->drm.struct_mutex);
  }
@@@ -1272,7 -1272,7 +1272,7 @@@ static u32 intel_hpd_enabled_irqs(struc
        u32 enabled_irqs = 0;
  
        for_each_intel_encoder(&dev_priv->drm, encoder)
 -              if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
 +              if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
                        enabled_irqs |= hpd[encoder->hpd_pin];
  
        return enabled_irqs;
@@@ -1304,12 -1304,12 +1304,12 @@@ static u32 intel_hpd_hotplug_enables(st
  
  static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
  {
 -      wake_up_all(&dev_priv->gmbus_wait_queue);
 +      wake_up_all(&dev_priv->display.gmbus.wait_queue);
  }
  
  static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
  {
 -      wake_up_all(&dev_priv->gmbus_wait_queue);
 +      wake_up_all(&dev_priv->display.gmbus.wait_queue);
  }
  
  #if defined(CONFIG_DEBUG_FS)
@@@ -1637,7 -1637,7 +1637,7 @@@ static void i9xx_hpd_irq_handler(struc
        if (hotplug_trigger) {
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   hotplug_trigger, hotplug_trigger,
 -                                 dev_priv->hotplug.hpd,
 +                                 dev_priv->display.hotplug.hpd,
                                   i9xx_port_hotplug_long_detect);
  
                intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@@ -1841,7 -1841,7 +1841,7 @@@ static void ibx_hpd_irq_handler(struct 
  
        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                           hotplug_trigger, dig_hotplug_reg,
 -                         dev_priv->hotplug.pch_hpd,
 +                         dev_priv->display.hotplug.pch_hpd,
                           pch_port_hotplug_long_detect);
  
        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@@ -1986,7 -1986,7 +1986,7 @@@ static void icp_irq_handler(struct drm_
  
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   ddi_hotplug_trigger, dig_hotplug_reg,
 -                                 dev_priv->hotplug.pch_hpd,
 +                                 dev_priv->display.hotplug.pch_hpd,
                                   icp_ddi_port_hotplug_long_detect);
        }
  
  
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   tc_hotplug_trigger, dig_hotplug_reg,
 -                                 dev_priv->hotplug.pch_hpd,
 +                                 dev_priv->display.hotplug.pch_hpd,
                                   icp_tc_port_hotplug_long_detect);
        }
  
@@@ -2024,7 -2024,7 +2024,7 @@@ static void spt_irq_handler(struct drm_
  
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   hotplug_trigger, dig_hotplug_reg,
 -                                 dev_priv->hotplug.pch_hpd,
 +                                 dev_priv->display.hotplug.pch_hpd,
                                   spt_port_hotplug_long_detect);
        }
  
  
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   hotplug2_trigger, dig_hotplug_reg,
 -                                 dev_priv->hotplug.pch_hpd,
 +                                 dev_priv->display.hotplug.pch_hpd,
                                   spt_port_hotplug2_long_detect);
        }
  
@@@ -2057,7 -2057,7 +2057,7 @@@ static void ilk_hpd_irq_handler(struct 
  
        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                           hotplug_trigger, dig_hotplug_reg,
 -                         dev_priv->hotplug.hpd,
 +                         dev_priv->display.hotplug.hpd,
                           ilk_port_hotplug_long_detect);
  
        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@@ -2237,7 -2237,7 +2237,7 @@@ static void bxt_hpd_irq_handler(struct 
  
        intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                           hotplug_trigger, dig_hotplug_reg,
 -                         dev_priv->hotplug.hpd,
 +                         dev_priv->display.hotplug.hpd,
                           bxt_port_hotplug_long_detect);
  
        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@@ -2257,7 -2257,7 +2257,7 @@@ static void gen11_hpd_irq_handler(struc
  
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   trigger_tc, dig_hotplug_reg,
 -                                 dev_priv->hotplug.hpd,
 +                                 dev_priv->display.hotplug.hpd,
                                   gen11_port_hotplug_long_detect);
        }
  
  
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   trigger_tbt, dig_hotplug_reg,
 -                                 dev_priv->hotplug.hpd,
 +                                 dev_priv->display.hotplug.hpd,
                                   gen11_port_hotplug_long_detect);
        }
  
@@@ -2653,9 -2653,9 +2653,9 @@@ static irqreturn_t gen8_irq_handler(in
  }
  
  static u32
 -gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
 +gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
  {
 -      void __iomem * const regs = gt->uncore->regs;
 +      void __iomem * const regs = i915->uncore.regs;
        u32 iir;
  
        if (!(master_ctl & GEN11_GU_MISC_IRQ))
  }
  
  static void
 -gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
 +gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
  {
        if (iir & GEN11_GU_MISC_GSE)
 -              intel_opregion_asle_intr(gt->i915);
 +              intel_opregion_asle_intr(i915);
  }
  
  static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@@ -2736,11 -2736,11 +2736,11 @@@ static irqreturn_t gen11_irq_handler(in
        if (master_ctl & GEN11_DISPLAY_IRQ)
                gen11_display_irq_handler(i915);
  
 -      gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
 +      gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
  
        gen11_master_intr_enable(regs);
  
 -      gen11_gu_misc_irq_handler(gt, gu_misc_iir);
 +      gen11_gu_misc_irq_handler(i915, gu_misc_iir);
  
        pmu_irq_stats(i915, IRQ_HANDLED);
  
@@@ -2801,11 -2801,11 +2801,11 @@@ static irqreturn_t dg1_irq_handler(int 
        if (master_ctl & GEN11_DISPLAY_IRQ)
                gen11_display_irq_handler(i915);
  
 -      gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
 +      gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
  
        dg1_master_intr_enable(regs);
  
 -      gen11_gu_misc_irq_handler(gt, gu_misc_iir);
 +      gen11_gu_misc_irq_handler(i915, gu_misc_iir);
  
        pmu_irq_stats(i915, IRQ_HANDLED);
  
@@@ -3313,8 -3313,8 +3313,8 @@@ static void ibx_hpd_irq_setup(struct dr
  {
        u32 hotplug_irqs, enabled_irqs;
  
 -      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 -      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 +      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
 +      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
  
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  
@@@ -3383,8 -3383,8 +3383,8 @@@ static void icp_hpd_irq_setup(struct dr
  {
        u32 hotplug_irqs, enabled_irqs;
  
 -      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 -      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 +      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
 +      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
  
        if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
                intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
@@@ -3460,8 -3460,8 +3460,8 @@@ static void gen11_hpd_irq_setup(struct 
        u32 hotplug_irqs, enabled_irqs;
        u32 val;
  
 -      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
 -      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
 +      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
 +      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
  
        val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
        val &= ~hotplug_irqs;
@@@ -3538,8 -3538,8 +3538,8 @@@ static void spt_hpd_irq_setup(struct dr
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
                intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
  
 -      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 -      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
 +      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
 +      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
  
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  
@@@ -3578,8 -3578,8 +3578,8 @@@ static void ilk_hpd_irq_setup(struct dr
  {
        u32 hotplug_irqs, enabled_irqs;
  
 -      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
 -      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
 +      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
 +      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
  
        if (DISPLAY_VER(dev_priv) >= 8)
                bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@@ -3636,8 -3636,8 +3636,8 @@@ static void bxt_hpd_irq_setup(struct dr
  {
        u32 hotplug_irqs, enabled_irqs;
  
 -      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
 -      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
 +      enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
 +      hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
  
        bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
  
@@@ -4370,8 -4370,8 +4370,8 @@@ HPD_FUNCS(ilk)
  
  void intel_hpd_irq_setup(struct drm_i915_private *i915)
  {
 -      if (i915->display_irqs_enabled && i915->hotplug_funcs)
 -              i915->hotplug_funcs->hpd_irq_setup(i915);
 +      if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
 +              i915->display.funcs.hotplug->hpd_irq_setup(i915);
  }
  
  /**
@@@ -4413,33 -4413,33 +4413,33 @@@ void intel_irq_init(struct drm_i915_pri
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                dev_priv->display_irqs_enabled = false;
  
 -      dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
 +      dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
        /* If we have MST support, we want to avoid doing short HPD IRQ storm
         * detection, as short HPD storms will occur as a natural part of
         * sideband messaging with MST.
         * On older platforms however, IRQ storms can occur with both long and
         * short pulses, as seen on some G4x systems.
         */
 -      dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
 +      dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
  
        if (HAS_GMCH(dev_priv)) {
                if (I915_HAS_HOTPLUG(dev_priv))
 -                      dev_priv->hotplug_funcs = &i915_hpd_funcs;
 +                      dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
        } else {
                if (HAS_PCH_DG2(dev_priv))
 -                      dev_priv->hotplug_funcs = &icp_hpd_funcs;
 +                      dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
                else if (HAS_PCH_DG1(dev_priv))
 -                      dev_priv->hotplug_funcs = &dg1_hpd_funcs;
 +                      dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
                else if (DISPLAY_VER(dev_priv) >= 11)
 -                      dev_priv->hotplug_funcs = &gen11_hpd_funcs;
 +                      dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
                else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 -                      dev_priv->hotplug_funcs = &bxt_hpd_funcs;
 +                      dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
                else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 -                      dev_priv->hotplug_funcs = &icp_hpd_funcs;
 +                      dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
                else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
 -                      dev_priv->hotplug_funcs = &spt_hpd_funcs;
 +                      dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
                else
 -                      dev_priv->hotplug_funcs = &ilk_hpd_funcs;
 +                      dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
        }
  }
  
index 26b25d9434d662634b03b5caaa5771f9b5f47197,2899c7cbdfb525361c75c325184cf4834c319d67..19fc00bcd7b9ba9067c1e064bd4c9dfc9215b674
  #include <drm/drm_drv.h>
  #include <drm/i915_pciids.h>
  
+ #include "gt/intel_gt_regs.h"
+ #include "gt/intel_sa_media.h"
  #include "i915_driver.h"
  #include "i915_drv.h"
  #include "i915_pci.h"
  #include "i915_reg.h"
 +#include "intel_pci_config.h"
  
  #define PLATFORM(x) .platform = (x)
  #define GEN(x) \
 -      .graphics.ver = (x), \
 +      .__runtime.graphics.ver = (x), \
        .media.ver = (x), \
        .display.ver = (x)
  
  #define I845_PIPE_OFFSETS \
 -      .pipe_offsets = { \
 +      .display.pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
        }, \
 -      .trans_offsets = { \
 +      .display.trans_offsets = { \
                [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
        }
  
  #define I9XX_PIPE_OFFSETS \
 -      .pipe_offsets = { \
 +      .display.pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
                [TRANSCODER_B] = PIPE_B_OFFSET, \
        }, \
 -      .trans_offsets = { \
 +      .display.trans_offsets = { \
                [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
                [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
        }
  
  #define IVB_PIPE_OFFSETS \
 -      .pipe_offsets = { \
 +      .display.pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
                [TRANSCODER_B] = PIPE_B_OFFSET, \
                [TRANSCODER_C] = PIPE_C_OFFSET, \
        }, \
 -      .trans_offsets = { \
 +      .display.trans_offsets = { \
                [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
                [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
                [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
        }
  
  #define HSW_PIPE_OFFSETS \
 -      .pipe_offsets = { \
 +      .display.pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
                [TRANSCODER_B] = PIPE_B_OFFSET, \
                [TRANSCODER_C] = PIPE_C_OFFSET, \
                [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
        }, \
 -      .trans_offsets = { \
 +      .display.trans_offsets = { \
                [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
                [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
                [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
        }
  
  #define CHV_PIPE_OFFSETS \
 -      .pipe_offsets = { \
 +      .display.pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
                [TRANSCODER_B] = PIPE_B_OFFSET, \
                [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
        }, \
 -      .trans_offsets = { \
 +      .display.trans_offsets = { \
                [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
                [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
                [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
        }
  
  #define I845_CURSOR_OFFSETS \
 -      .cursor_offsets = { \
 +      .display.cursor_offsets = { \
                [PIPE_A] = CURSOR_A_OFFSET, \
        }
  
  #define I9XX_CURSOR_OFFSETS \
 -      .cursor_offsets = { \
 +      .display.cursor_offsets = { \
                [PIPE_A] = CURSOR_A_OFFSET, \
                [PIPE_B] = CURSOR_B_OFFSET, \
        }
  
  #define CHV_CURSOR_OFFSETS \
 -      .cursor_offsets = { \
 +      .display.cursor_offsets = { \
                [PIPE_A] = CURSOR_A_OFFSET, \
                [PIPE_B] = CURSOR_B_OFFSET, \
                [PIPE_C] = CHV_CURSOR_C_OFFSET, \
        }
  
  #define IVB_CURSOR_OFFSETS \
 -      .cursor_offsets = { \
 +      .display.cursor_offsets = { \
                [PIPE_A] = CURSOR_A_OFFSET, \
                [PIPE_B] = IVB_CURSOR_B_OFFSET, \
                [PIPE_C] = IVB_CURSOR_C_OFFSET, \
        }
  
  #define TGL_CURSOR_OFFSETS \
 -      .cursor_offsets = { \
 +      .display.cursor_offsets = { \
                [PIPE_A] = CURSOR_A_OFFSET, \
                [PIPE_B] = IVB_CURSOR_B_OFFSET, \
                [PIPE_C] = IVB_CURSOR_C_OFFSET, \
        }
  
  #define I9XX_COLORS \
 -      .color = { .gamma_lut_size = 256 }
 +      .display.color = { .gamma_lut_size = 256 }
  #define I965_COLORS \
 -      .color = { .gamma_lut_size = 129, \
 +      .display.color = { .gamma_lut_size = 129, \
                   .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
        }
  #define ILK_COLORS \
 -      .color = { .gamma_lut_size = 1024 }
 +      .display.color = { .gamma_lut_size = 1024 }
  #define IVB_COLORS \
 -      .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
 +      .display.color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
  #define CHV_COLORS \
 -      .color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
 -                 .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
 -                 .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
 +      .display.color = { \
 +              .degamma_lut_size = 65, .gamma_lut_size = 257, \
 +              .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
 +              .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
        }
  #define GLK_COLORS \
 -      .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
 -                 .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
 -                                      DRM_COLOR_LUT_EQUAL_CHANNELS, \
 +      .display.color = { \
 +              .degamma_lut_size = 33, .gamma_lut_size = 1024, \
 +              .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
 +                                   DRM_COLOR_LUT_EQUAL_CHANNELS, \
        }
  #define ICL_COLORS \
 -      .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \
 -                 .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
 -                                      DRM_COLOR_LUT_EQUAL_CHANNELS, \
 -                 .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
 +      .display.color = { \
 +              .degamma_lut_size = 33, .gamma_lut_size = 262145, \
 +              .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
 +                                   DRM_COLOR_LUT_EQUAL_CHANNELS, \
 +              .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
        }
  
  /* Keep in gen based order, and chronological order within a gen */
  
  #define GEN_DEFAULT_PAGE_SIZES \
 -      .page_sizes = I915_GTT_PAGE_SIZE_4K
 +      .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K
  
  #define GEN_DEFAULT_REGIONS \
 -      .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
 +      .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
  
  #define I830_FEATURES \
        GEN(2), \
        .is_mobile = 1, \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_overlay = 1, \
        .display.cursor_needs_physical = 1, \
        .display.overlay_needs_physical = 1, \
        .has_3d_pipeline = 1, \
        .hws_needs_physical = 1, \
        .unfenced_needs_alignment = 1, \
 -      .platform_engine_mask = BIT(RCS0), \
 +      .__runtime.platform_engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
        .dma_mask_size = 32, \
  
  #define I845_FEATURES \
        GEN(2), \
 -      .display.pipe_mask = BIT(PIPE_A), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \
 +      .__runtime.pipe_mask = BIT(PIPE_A), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A), \
        .display.has_overlay = 1, \
        .display.overlay_needs_physical = 1, \
        .display.has_gmch = 1, \
        .gpu_reset_clobbers_display = true, \
        .hws_needs_physical = 1, \
        .unfenced_needs_alignment = 1, \
 -      .platform_engine_mask = BIT(RCS0), \
 +      .__runtime.platform_engine_mask = BIT(RCS0), \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
        .dma_mask_size = 32, \
@@@ -222,22 -221,22 +225,22 @@@ static const struct intel_device_info i
  static const struct intel_device_info i85x_info = {
        I830_FEATURES,
        PLATFORM(INTEL_I85X),
 -      .display.fbc_mask = BIT(INTEL_FBC_A),
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A),
  };
  
  static const struct intel_device_info i865g_info = {
        I845_FEATURES,
        PLATFORM(INTEL_I865G),
 -      .display.fbc_mask = BIT(INTEL_FBC_A),
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A),
  };
  
  #define GEN3_FEATURES \
        GEN(3), \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_gmch = 1, \
        .gpu_reset_clobbers_display = true, \
 -      .platform_engine_mask = BIT(RCS0), \
 +      .__runtime.platform_engine_mask = BIT(RCS0), \
        .has_3d_pipeline = 1, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@@ -267,7 -266,7 +270,7 @@@ static const struct intel_device_info i
        .display.has_overlay = 1,
        .display.overlay_needs_physical = 1,
        .display.supports_tv = 1,
 -      .display.fbc_mask = BIT(INTEL_FBC_A),
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A),
        .hws_needs_physical = 1,
        .unfenced_needs_alignment = 1,
  };
@@@ -292,7 -291,7 +295,7 @@@ static const struct intel_device_info i
        .display.has_overlay = 1,
        .display.overlay_needs_physical = 1,
        .display.supports_tv = 1,
 -      .display.fbc_mask = BIT(INTEL_FBC_A),
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A),
        .hws_needs_physical = 1,
        .unfenced_needs_alignment = 1,
  };
@@@ -324,12 -323,12 +327,12 @@@ static const struct intel_device_info p
  
  #define GEN4_FEATURES \
        GEN(4), \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_hotplug = 1, \
        .display.has_gmch = 1, \
        .gpu_reset_clobbers_display = true, \
 -      .platform_engine_mask = BIT(RCS0), \
 +      .__runtime.platform_engine_mask = BIT(RCS0), \
        .has_3d_pipeline = 1, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@@ -352,7 -351,7 +355,7 @@@ static const struct intel_device_info i
        GEN4_FEATURES,
        PLATFORM(INTEL_I965GM),
        .is_mobile = 1,
 -      .display.fbc_mask = BIT(INTEL_FBC_A),
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A),
        .display.has_overlay = 1,
        .display.supports_tv = 1,
        .hws_needs_physical = 1,
  static const struct intel_device_info g45_info = {
        GEN4_FEATURES,
        PLATFORM(INTEL_G45),
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
        .gpu_reset_clobbers_display = false,
  };
  
@@@ -370,18 -369,18 +373,18 @@@ static const struct intel_device_info g
        GEN4_FEATURES,
        PLATFORM(INTEL_GM45),
        .is_mobile = 1,
 -      .display.fbc_mask = BIT(INTEL_FBC_A),
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A),
        .display.supports_tv = 1,
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
        .gpu_reset_clobbers_display = false,
  };
  
  #define GEN5_FEATURES \
        GEN(5), \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_hotplug = 1, \
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
        .has_3d_pipeline = 1, \
        .has_snoop = true, \
        .has_coherent_ggtt = true, \
@@@ -404,16 -403,16 +407,16 @@@ static const struct intel_device_info i
        PLATFORM(INTEL_IRONLAKE),
        .is_mobile = 1,
        .has_rps = true,
 -      .display.fbc_mask = BIT(INTEL_FBC_A),
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A),
  };
  
  #define GEN6_FEATURES \
        GEN(6), \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
        .display.has_hotplug = 1, \
 -      .display.fbc_mask = BIT(INTEL_FBC_A), \
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
        .has_3d_pipeline = 1, \
        .has_coherent_ggtt = true, \
        .has_llc = 1, \
        .has_rc6p = 1, \
        .has_rps = true, \
        .dma_mask_size = 40, \
 -      .ppgtt_type = INTEL_PPGTT_ALIASING, \
 -      .ppgtt_size = 31, \
 +      .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
 +      .__runtime.ppgtt_size = 31, \
        I9XX_PIPE_OFFSETS, \
        I9XX_CURSOR_OFFSETS, \
        ILK_COLORS, \
@@@ -461,11 -460,11 +464,11 @@@ static const struct intel_device_info s
  
  #define GEN7_FEATURES  \
        GEN(7), \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
        .display.has_hotplug = 1, \
 -      .display.fbc_mask = BIT(INTEL_FBC_A), \
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
        .has_3d_pipeline = 1, \
        .has_coherent_ggtt = true, \
        .has_llc = 1, \
        .has_reset_engine = true, \
        .has_rps = true, \
        .dma_mask_size = 40, \
 -      .ppgtt_type = INTEL_PPGTT_ALIASING, \
 -      .ppgtt_size = 31, \
 +      .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
 +      .__runtime.ppgtt_size = 31, \
        IVB_PIPE_OFFSETS, \
        IVB_CURSOR_OFFSETS, \
        IVB_COLORS, \
@@@ -517,8 -516,8 +520,8 @@@ static const struct intel_device_info i
        GEN7_FEATURES,
        PLATFORM(INTEL_IVYBRIDGE),
        .gt = 2,
 -      .display.pipe_mask = 0, /* legal, last one wins */
 -      .display.cpu_transcoder_mask = 0,
 +      .__runtime.pipe_mask = 0, /* legal, last one wins */
 +      .__runtime.cpu_transcoder_mask = 0,
        .has_l3_dpf = 1,
  };
  
@@@ -526,8 -525,8 +529,8 @@@ static const struct intel_device_info v
        PLATFORM(INTEL_VALLEYVIEW),
        GEN(7),
        .is_lp = 1,
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
        .has_runtime_pm = 1,
        .has_rc6 = 1,
        .has_reset_engine = true,
        .display.has_gmch = 1,
        .display.has_hotplug = 1,
        .dma_mask_size = 40,
 -      .ppgtt_type = INTEL_PPGTT_ALIASING,
 -      .ppgtt_size = 31,
 +      .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING,
 +      .__runtime.ppgtt_size = 31,
        .has_snoop = true,
        .has_coherent_ggtt = false,
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
 -      .display_mmio_offset = VLV_DISPLAY_BASE,
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
 +      .display.mmio_offset = VLV_DISPLAY_BASE,
        I9XX_PIPE_OFFSETS,
        I9XX_CURSOR_OFFSETS,
        I965_COLORS,
  
  #define G75_FEATURES  \
        GEN7_FEATURES, \
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
        .display.has_ddi = 1, \
        .display.has_fpga_dbg = 1, \
@@@ -585,8 -584,8 +588,8 @@@ static const struct intel_device_info h
        GEN(8), \
        .has_logical_ring_contexts = 1, \
        .dma_mask_size = 39, \
 -      .ppgtt_type = INTEL_PPGTT_FULL, \
 -      .ppgtt_size = 48, \
 +      .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \
 +      .__runtime.ppgtt_size = 48, \
        .has_64bit_reloc = 1
  
  #define BDW_PLATFORM \
@@@ -614,18 -613,18 +617,18 @@@ static const struct intel_device_info b
  static const struct intel_device_info bdw_gt3_info = {
        BDW_PLATFORM,
        .gt = 3,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
  };
  
  static const struct intel_device_info chv_info = {
        PLATFORM(INTEL_CHERRYVIEW),
        GEN(8),
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
        .display.has_hotplug = 1,
        .is_lp = 1,
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
        .has_64bit_reloc = 1,
        .has_runtime_pm = 1,
        .has_rc6 = 1,
        .has_logical_ring_contexts = 1,
        .display.has_gmch = 1,
        .dma_mask_size = 39,
 -      .ppgtt_type = INTEL_PPGTT_FULL,
 -      .ppgtt_size = 32,
 +      .__runtime.ppgtt_type = INTEL_PPGTT_FULL,
 +      .__runtime.ppgtt_size = 32,
        .has_reset_engine = 1,
        .has_snoop = true,
        .has_coherent_ggtt = false,
 -      .display_mmio_offset = VLV_DISPLAY_BASE,
 +      .display.mmio_offset = VLV_DISPLAY_BASE,
        CHV_PIPE_OFFSETS,
        CHV_CURSOR_OFFSETS,
        CHV_COLORS,
  };
  
  #define GEN9_DEFAULT_PAGE_SIZES \
 -      .page_sizes = I915_GTT_PAGE_SIZE_4K | \
 -                    I915_GTT_PAGE_SIZE_64K
 +      .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
 +              I915_GTT_PAGE_SIZE_64K
  
  #define GEN9_FEATURES \
        GEN8_FEATURES, \
        GEN(9), \
        GEN9_DEFAULT_PAGE_SIZES, \
 -      .display.has_dmc = 1, \
 +      .__runtime.has_dmc = 1, \
        .has_gt_uc = 1, \
 -      .display.has_hdcp = 1, \
 +      .__runtime.has_hdcp = 1, \
        .display.has_ipc = 1, \
        .display.has_psr = 1, \
        .display.has_psr_hw_tracking = 1, \
 -      .dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
 -      .dbuf.slice_mask = BIT(DBUF_S1)
 +      .display.dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
 +      .display.dbuf.slice_mask = BIT(DBUF_S1)
  
  #define SKL_PLATFORM \
        GEN9_FEATURES, \
@@@ -679,7 -678,7 +682,7 @@@ static const struct intel_device_info s
  
  #define SKL_GT3_PLUS_PLATFORM \
        SKL_PLATFORM, \
 -      .platform_engine_mask = \
 +      .__runtime.platform_engine_mask = \
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
  
  
@@@ -696,31 -695,31 +699,31 @@@ static const struct intel_device_info s
  #define GEN9_LP_FEATURES \
        GEN(9), \
        .is_lp = 1, \
 -      .dbuf.slice_mask = BIT(DBUF_S1), \
 +      .display.dbuf.slice_mask = BIT(DBUF_S1), \
        .display.has_hotplug = 1, \
 -      .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
                BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
        .has_3d_pipeline = 1, \
        .has_64bit_reloc = 1, \
        .display.has_ddi = 1, \
        .display.has_fpga_dbg = 1, \
 -      .display.fbc_mask = BIT(INTEL_FBC_A), \
 -      .display.has_hdcp = 1, \
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
 +      .__runtime.has_hdcp = 1, \
        .display.has_psr = 1, \
        .display.has_psr_hw_tracking = 1, \
        .has_runtime_pm = 1, \
 -      .display.has_dmc = 1, \
 +      .__runtime.has_dmc = 1, \
        .has_rc6 = 1, \
        .has_rps = true, \
        .display.has_dp_mst = 1, \
        .has_logical_ring_contexts = 1, \
        .has_gt_uc = 1, \
        .dma_mask_size = 39, \
 -      .ppgtt_type = INTEL_PPGTT_FULL, \
 -      .ppgtt_size = 48, \
 +      .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \
 +      .__runtime.ppgtt_size = 48, \
        .has_reset_engine = 1, \
        .has_snoop = true, \
        .has_coherent_ggtt = false, \
  static const struct intel_device_info bxt_info = {
        GEN9_LP_FEATURES,
        PLATFORM(INTEL_BROXTON),
 -      .dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */
 +      .display.dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */
  };
  
  static const struct intel_device_info glk_info = {
        GEN9_LP_FEATURES,
        PLATFORM(INTEL_GEMINILAKE),
        .display.ver = 10,
 -      .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
 +      .display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
        GLK_COLORS,
  };
  
@@@ -762,7 -761,7 +765,7 @@@ static const struct intel_device_info k
  static const struct intel_device_info kbl_gt3_info = {
        KBL_PLATFORM,
        .gt = 3,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
  };
  
@@@ -783,7 -782,7 +786,7 @@@ static const struct intel_device_info c
  static const struct intel_device_info cfl_gt3_info = {
        CFL_PLATFORM,
        .gt = 3,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
  };
  
@@@ -802,18 -801,18 +805,18 @@@ static const struct intel_device_info c
  };
  
  #define GEN11_DEFAULT_PAGE_SIZES \
 -      .page_sizes = I915_GTT_PAGE_SIZE_4K | \
 -                    I915_GTT_PAGE_SIZE_64K | \
 -                    I915_GTT_PAGE_SIZE_2M
 +      .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
 +              I915_GTT_PAGE_SIZE_64K |                \
 +              I915_GTT_PAGE_SIZE_2M
  
  #define GEN11_FEATURES \
        GEN9_FEATURES, \
        GEN11_DEFAULT_PAGE_SIZES, \
        .display.abox_mask = BIT(0), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
                BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
 -      .pipe_offsets = { \
 +      .display.pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
                [TRANSCODER_B] = PIPE_B_OFFSET, \
                [TRANSCODER_C] = PIPE_C_OFFSET, \
                [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
                [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
        }, \
 -      .trans_offsets = { \
 +      .display.trans_offsets = { \
                [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
                [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
                [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
        }, \
        GEN(11), \
        ICL_COLORS, \
 -      .dbuf.size = 2048, \
 -      .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
 -      .display.has_dsc = 1, \
 +      .display.dbuf.size = 2048, \
 +      .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
 +      .__runtime.has_dsc = 1, \
        .has_coherent_ggtt = false, \
        .has_logical_ring_elsq = 1
  
  static const struct intel_device_info icl_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_ICELAKE),
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
  };
  
  static const struct intel_device_info ehl_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_ELKHARTLAKE),
 -      .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
 -      .ppgtt_size = 36,
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
 +      .__runtime.ppgtt_size = 36,
  };
  
  static const struct intel_device_info jsl_info = {
        GEN11_FEATURES,
        PLATFORM(INTEL_JASPERLAKE),
 -      .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
 -      .ppgtt_size = 36,
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
 +      .__runtime.ppgtt_size = 36,
  };
  
  #define GEN12_FEATURES \
        GEN11_FEATURES, \
        GEN(12), \
        .display.abox_mask = GENMASK(2, 1), \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
                BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
 -      .pipe_offsets = { \
 +      .display.pipe_offsets = { \
                [TRANSCODER_A] = PIPE_A_OFFSET, \
                [TRANSCODER_B] = PIPE_B_OFFSET, \
                [TRANSCODER_C] = PIPE_C_OFFSET, \
                [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
                [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
        }, \
 -      .trans_offsets = { \
 +      .display.trans_offsets = { \
                [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
                [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
                [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
@@@ -891,7 -890,7 +894,7 @@@ static const struct intel_device_info t
        GEN12_FEATURES,
        PLATFORM(INTEL_TIGERLAKE),
        .display.has_modular_fia = 1,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
  };
  
@@@ -899,17 -898,17 +902,17 @@@ static const struct intel_device_info r
        GEN12_FEATURES,
        PLATFORM(INTEL_ROCKETLAKE),
        .display.abox_mask = BIT(0),
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                BIT(TRANSCODER_C),
        .display.has_hti = 1,
        .display.has_psr_hw_tracking = 0,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
  };
  
  #define DGFX_FEATURES \
 -      .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
 +      .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
        .has_llc = 0, \
        .has_pxp = 0, \
        .has_snoop = 1, \
  static const struct intel_device_info dg1_info = {
        GEN12_FEATURES,
        DGFX_FEATURES,
 -      .graphics.rel = 10,
 +      .__runtime.graphics.rel = 10,
        PLATFORM(INTEL_DG1),
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
        .require_force_probe = 1,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
                BIT(VCS0) | BIT(VCS2),
        /* Wa_16011227922 */
 -      .ppgtt_size = 47,
 +      .__runtime.ppgtt_size = 47,
  };
  
  static const struct intel_device_info adl_s_info = {
        GEN12_FEATURES,
        PLATFORM(INTEL_ALDERLAKE_S),
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
        .display.has_hti = 1,
        .display.has_psr_hw_tracking = 0,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
        .dma_mask_size = 39,
  };
  
 -#define XE_LPD_CURSOR_OFFSETS \
 -      .cursor_offsets = { \
 -              [PIPE_A] = CURSOR_A_OFFSET, \
 -              [PIPE_B] = IVB_CURSOR_B_OFFSET, \
 -              [PIPE_C] = IVB_CURSOR_C_OFFSET, \
 -              [PIPE_D] = TGL_CURSOR_D_OFFSET, \
 -      }
 -
  #define XE_LPD_FEATURES \
        .display.abox_mask = GENMASK(1, 0),                                     \
 -      .color = { .degamma_lut_size = 128, .gamma_lut_size = 1024,             \
 -                 .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING |          \
 -                                      DRM_COLOR_LUT_EQUAL_CHANNELS,           \
 +      .display.color = {                                                      \
 +              .degamma_lut_size = 128, .gamma_lut_size = 1024,                \
 +              .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING |             \
 +                                   DRM_COLOR_LUT_EQUAL_CHANNELS,              \
        },                                                                      \
 -      .dbuf.size = 4096,                                                      \
 -      .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) |         \
 +      .display.dbuf.size = 4096,                                              \
 +      .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
                BIT(DBUF_S4),                                                   \
        .display.has_ddi = 1,                                                   \
 -      .display.has_dmc = 1,                                                   \
 +      .__runtime.has_dmc = 1,                                                 \
        .display.has_dp_mst = 1,                                                \
        .display.has_dsb = 1,                                                   \
 -      .display.has_dsc = 1,                                                   \
 -      .display.fbc_mask = BIT(INTEL_FBC_A),                                   \
 +      .__runtime.has_dsc = 1,                                                 \
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A),                                 \
        .display.has_fpga_dbg = 1,                                              \
 -      .display.has_hdcp = 1,                                                  \
 +      .__runtime.has_hdcp = 1,                                                \
        .display.has_hotplug = 1,                                               \
        .display.has_ipc = 1,                                                   \
        .display.has_psr = 1,                                                   \
        .display.ver = 13,                                                      \
 -      .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),     \
 -      .pipe_offsets = {                                                       \
 +      .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),   \
 +      .display.pipe_offsets = {                                               \
                [TRANSCODER_A] = PIPE_A_OFFSET,                                 \
                [TRANSCODER_B] = PIPE_B_OFFSET,                                 \
                [TRANSCODER_C] = PIPE_C_OFFSET,                                 \
                [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET,                          \
                [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET,                          \
        },                                                                      \
 -      .trans_offsets = {                                                      \
 +      .display.trans_offsets = {                                              \
                [TRANSCODER_A] = TRANSCODER_A_OFFSET,                           \
                [TRANSCODER_B] = TRANSCODER_B_OFFSET,                           \
                [TRANSCODER_C] = TRANSCODER_C_OFFSET,                           \
                [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET,                    \
                [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET,                    \
        },                                                                      \
 -      XE_LPD_CURSOR_OFFSETS
 +      TGL_CURSOR_OFFSETS
  
  static const struct intel_device_info adl_p_info = {
        GEN12_FEATURES,
        XE_LPD_FEATURES,
        PLATFORM(INTEL_ALDERLAKE_P),
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                               BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
                               BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
        .display.has_cdclk_crawl = 1,
        .display.has_modular_fia = 1,
        .display.has_psr_hw_tracking = 0,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
 -      .ppgtt_size = 48,
 +      .__runtime.ppgtt_size = 48,
        .dma_mask_size = 39,
  };
  
  #undef GEN
  
  #define XE_HP_PAGE_SIZES \
 -      .page_sizes = I915_GTT_PAGE_SIZE_4K | \
 -                    I915_GTT_PAGE_SIZE_64K | \
 -                    I915_GTT_PAGE_SIZE_2M
 +      .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
 +              I915_GTT_PAGE_SIZE_64K |                \
 +              I915_GTT_PAGE_SIZE_2M
  
  #define XE_HP_FEATURES \
 -      .graphics.ver = 12, \
 -      .graphics.rel = 50, \
 +      .__runtime.graphics.ver = 12, \
 +      .__runtime.graphics.rel = 50, \
        XE_HP_PAGE_SIZES, \
        .dma_mask_size = 46, \
        .has_3d_pipeline = 1, \
        .has_reset_engine = 1, \
        .has_rps = 1, \
        .has_runtime_pm = 1, \
 -      .ppgtt_size = 48, \
 -      .ppgtt_type = INTEL_PPGTT_FULL
 +      .__runtime.ppgtt_size = 48, \
 +      .__runtime.ppgtt_type = INTEL_PPGTT_FULL
  
  #define XE_HPM_FEATURES \
        .media.ver = 12, \
@@@ -1040,7 -1046,7 +1043,7 @@@ static const struct intel_device_info x
        .has_64k_pages = 1,
        .needs_compact_pt = 1,
        .has_media_ratio_mode = 1,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) |
                BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
                BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
        XE_HP_FEATURES, \
        XE_HPM_FEATURES, \
        DGFX_FEATURES, \
 -      .graphics.rel = 55, \
 +      .__runtime.graphics.rel = 55, \
        .media.rel = 55, \
        PLATFORM(INTEL_DG2), \
        .has_4tile = 1, \
        .has_heci_pxp = 1, \
        .needs_compact_pt = 1, \
        .has_media_ratio_mode = 1, \
 -      .platform_engine_mask = \
 +      .__runtime.platform_engine_mask = \
                BIT(RCS0) | BIT(BCS0) | \
                BIT(VECS0) | BIT(VECS1) | \
                BIT(VCS0) | BIT(VCS2) | \
  static const struct intel_device_info dg2_info = {
        DG2_FEATURES,
        XE_LPD_FEATURES,
 -      .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
 +      .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
                               BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
        .require_force_probe = 1,
  };
@@@ -1097,12 -1103,12 +1100,12 @@@ static const struct intel_device_info p
        XE_HPC_FEATURES,
        XE_HPM_FEATURES,
        DGFX_FEATURES,
 -      .graphics.rel = 60,
 +      .__runtime.graphics.rel = 60,
        .media.rel = 60,
        PLATFORM(INTEL_PONTEVECCHIO),
        .display = { 0 },
        .has_flat_ccs = 0,
 -      .platform_engine_mask =
 +      .__runtime.platform_engine_mask =
                BIT(BCS0) |
                BIT(VCS0) |
                BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
  #define XE_LPDP_FEATURES      \
        XE_LPD_FEATURES,        \
        .display.ver = 14,      \
 -      .display.has_cdclk_crawl = 1
 +      .display.has_cdclk_crawl = 1, \
 +      .__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B)
  
+ static const struct intel_gt_definition xelpmp_extra_gt[] = {
+       {
+               .type = GT_MEDIA,
+               .name = "Standalone Media GT",
+               .gsi_offset = MTL_MEDIA_GSI_BASE,
+               .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+       },
+       {}
+ };
  __maybe_unused
  static const struct intel_device_info mtl_info = {
        XE_HP_FEATURES,
         * Real graphics IP version will be obtained from hardware GMD_ID
         * register.  Value provided here is just for sanity checking.
         */
 -      .graphics.ver = 12,
 -      .graphics.rel = 70,
 +      .__runtime.graphics.ver = 12,
 +      .__runtime.graphics.rel = 70,
        .media.ver = 13,
        PLATFORM(INTEL_METEORLAKE),
        .display.has_modular_fia = 1,
+       .extra_gt_list = xelpmp_extra_gt,
        .has_flat_ccs = 0,
        .has_snoop = 1,
 -      .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
 -      .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
 +      .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
 +      .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
        .require_force_probe = 1,
  };
  
@@@ -1265,27 -1281,6 +1279,27 @@@ static bool force_probe(u16 device_id, 
        return ret;
  }
  
 +bool i915_pci_resource_valid(struct pci_dev *pdev, int bar)
 +{
 +      if (!pci_resource_flags(pdev, bar))
 +              return false;
 +
 +      if (pci_resource_flags(pdev, bar) & IORESOURCE_UNSET)
 +              return false;
 +
 +      if (!pci_resource_len(pdev, bar))
 +              return false;
 +
 +      return true;
 +}
 +
 +static bool intel_mmio_bar_valid(struct pci_dev *pdev, struct intel_device_info *intel_info)
 +{
 +      int gttmmaddr_bar = intel_info->__runtime.graphics.ver == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
 +
 +      return i915_pci_resource_valid(pdev, gttmmaddr_bar);
 +}
 +
  static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        struct intel_device_info *intel_info =
        if (PCI_FUNC(pdev->devfn))
                return -ENODEV;
  
 +      if (!intel_mmio_bar_valid(pdev, intel_info))
 +              return -ENXIO;
 +
        /* Detect if we need to wait for other drivers early on */
        if (intel_modeset_probe_defer(pdev))
                return -EPROBE_DEFER;
index adfb279c07822d53d754bae298e9dcaa104f2d8e,f44eec7f7a9c5d90bc8981b3ae7b1e220d3c73fa..8b1c1b527e9b7fbdcd7310811670c32afe192e5b
   *  #define GEN8_BAR                    _MMIO(0xb888)
   */
  
 -#define DISPLAY_MMIO_BASE(dev_priv)   (INTEL_INFO(dev_priv)->display_mmio_offset)
 +#define DISPLAY_MMIO_BASE(dev_priv)   (INTEL_INFO(dev_priv)->display.mmio_offset)
  
  /*
   * Given the first two numbers __a and __b of arbitrarily many evenly spaced
   * Device info offset array based helpers for groups of registers with unevenly
   * spaced base offsets.
   */
 -#define _MMIO_PIPE2(pipe, reg)                _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
 -                                            INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
 -                                            DISPLAY_MMIO_BASE(dev_priv))
 -#define _TRANS2(tran, reg)            (INTEL_INFO(dev_priv)->trans_offsets[(tran)] - \
 -                                       INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
 -                                       DISPLAY_MMIO_BASE(dev_priv))
 -#define _MMIO_TRANS2(tran, reg)               _MMIO(_TRANS2(tran, reg))
 -#define _CURSOR2(pipe, reg)           _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
 -                                            INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
 -                                            DISPLAY_MMIO_BASE(dev_priv))
 +#define _MMIO_PIPE2(pipe, reg)                _MMIO(INTEL_INFO(dev_priv)->display.pipe_offsets[(pipe)] - \
 +                                            INTEL_INFO(dev_priv)->display.pipe_offsets[PIPE_A] + \
 +                                            DISPLAY_MMIO_BASE(dev_priv) + (reg))
 +#define _MMIO_TRANS2(tran, reg)               _MMIO(INTEL_INFO(dev_priv)->display.trans_offsets[(tran)] - \
 +                                            INTEL_INFO(dev_priv)->display.trans_offsets[TRANSCODER_A] + \
 +                                            DISPLAY_MMIO_BASE(dev_priv) + (reg))
 +#define _MMIO_CURSOR2(pipe, reg)      _MMIO(INTEL_INFO(dev_priv)->display.cursor_offsets[(pipe)] - \
 +                                            INTEL_INFO(dev_priv)->display.cursor_offsets[PIPE_A] + \
 +                                            DISPLAY_MMIO_BASE(dev_priv) + (reg))
  
  #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
  #define _MASKED_FIELD(mask, value) ({                                    \
  /*
   * GPIO regs
   */
 -#define GPIO(gpio)            _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \
 +#define GPIO(gpio)            _MMIO(dev_priv->display.gmbus.mmio_base + 0x5010 + \
                                      4 * (gpio))
  
  # define GPIO_CLOCK_DIR_MASK          (1 << 0)
  # define GPIO_DATA_VAL_IN             (1 << 12)
  # define GPIO_DATA_PULLUP_DISABLE     (1 << 13)
  
 -#define GMBUS0                        _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
 +#define GMBUS0                        _MMIO(dev_priv->display.gmbus.mmio_base + 0x5100) /* clock/port select */
  #define   GMBUS_AKSV_SELECT   (1 << 11)
  #define   GMBUS_RATE_100KHZ   (0 << 8)
  #define   GMBUS_RATE_50KHZ    (1 << 8)
  #define   GMBUS_HOLD_EXT      (1 << 7) /* 300ns hold time, rsvd on Pineview */
  #define   GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
  
 -#define GMBUS1                        _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
 +#define GMBUS1                        _MMIO(dev_priv->display.gmbus.mmio_base + 0x5104) /* command/status */
  #define   GMBUS_SW_CLR_INT    (1 << 31)
  #define   GMBUS_SW_RDY                (1 << 30)
  #define   GMBUS_ENT           (1 << 29) /* enable timeout */
  #define   GMBUS_SLAVE_ADDR_SHIFT 1
  #define   GMBUS_SLAVE_READ    (1 << 0)
  #define   GMBUS_SLAVE_WRITE   (0 << 0)
 -#define GMBUS2                        _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
 +#define GMBUS2                        _MMIO(dev_priv->display.gmbus.mmio_base + 0x5108) /* status */
  #define   GMBUS_INUSE         (1 << 15)
  #define   GMBUS_HW_WAIT_PHASE (1 << 14)
  #define   GMBUS_STALL_TIMEOUT (1 << 13)
  #define   GMBUS_HW_RDY                (1 << 11)
  #define   GMBUS_SATOER                (1 << 10)
  #define   GMBUS_ACTIVE                (1 << 9)
 -#define GMBUS3                        _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
 -#define GMBUS4                        _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
 +#define GMBUS3                        _MMIO(dev_priv->display.gmbus.mmio_base + 0x510c) /* data buffer bytes 3-0 */
 +#define GMBUS4                        _MMIO(dev_priv->display.gmbus.mmio_base + 0x5110) /* interrupt mask (Pineview+) */
  #define   GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
  #define   GMBUS_NAK_EN                (1 << 3)
  #define   GMBUS_IDLE_EN               (1 << 2)
  #define   GMBUS_HW_WAIT_EN    (1 << 1)
  #define   GMBUS_HW_RDY_EN     (1 << 0)
 -#define GMBUS5                        _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
 +#define GMBUS5                        _MMIO(dev_priv->display.gmbus.mmio_base + 0x5120) /* byte index */
  #define   GMBUS_2BYTE_INDEX_EN        (1 << 31)
  
  /*
  
  #define GT0_PERF_LIMIT_REASONS                _MMIO(0x1381a8)
  #define   GT0_PERF_LIMIT_REASONS_MASK 0xde3
- #define   PROCHOT_MASK                        REG_BIT(1)
- #define   THERMAL_LIMIT_MASK          REG_BIT(2)
- #define   RATL_MASK                   REG_BIT(6)
- #define   VR_THERMALERT_MASK          REG_BIT(7)
- #define   VR_TDC_MASK                 REG_BIT(8)
- #define   POWER_LIMIT_4_MASK          REG_BIT(9)
- #define   POWER_LIMIT_1_MASK          REG_BIT(11)
- #define   POWER_LIMIT_2_MASK          REG_BIT(12)
+ #define   PROCHOT_MASK                        REG_BIT(0)
+ #define   THERMAL_LIMIT_MASK          REG_BIT(1)
+ #define   RATL_MASK                   REG_BIT(5)
+ #define   VR_THERMALERT_MASK          REG_BIT(6)
+ #define   VR_TDC_MASK                 REG_BIT(7)
+ #define   POWER_LIMIT_4_MASK          REG_BIT(8)
+ #define   POWER_LIMIT_1_MASK          REG_BIT(10)
+ #define   POWER_LIMIT_2_MASK          REG_BIT(11)
  
  #define CHV_CLK_CTL1                  _MMIO(0x101100)
  #define VLV_CLK_CTL2                  _MMIO(0x101104)
  #define CLKGATE_DIS_PSL(pipe) \
        _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
  
 +#define _CLKGATE_DIS_PSL_EXT_A                0x4654C
 +#define _CLKGATE_DIS_PSL_EXT_B                0x46550
 +#define   PIPEDMC_GATING_DIS          REG_BIT(12)
 +
 +#define CLKGATE_DIS_PSL_EXT(pipe) \
 +      _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
 +
  /*
   * Display engine regs
   */
   */
  #define _SRD_CTL_A                            0x60800
  #define _SRD_CTL_EDP                          0x6f800
 -#define EDP_PSR_CTL(tran)                     _MMIO(_TRANS2(tran, _SRD_CTL_A))
 +#define EDP_PSR_CTL(tran)                     _MMIO_TRANS2(tran, _SRD_CTL_A)
  #define   EDP_PSR_ENABLE                      (1 << 31)
  #define   BDW_PSR_SINGLE_FRAME                        (1 << 30)
  #define   EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
  
  #define _SRD_AUX_DATA_A                               0x60814
  #define _SRD_AUX_DATA_EDP                     0x6f814
 -#define EDP_PSR_AUX_DATA(tran, i)             _MMIO(_TRANS2(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
 +#define EDP_PSR_AUX_DATA(tran, i)             _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */
  
  #define _SRD_STATUS_A                         0x60840
  #define _SRD_STATUS_EDP                               0x6f840
 -#define EDP_PSR_STATUS(tran)                  _MMIO(_TRANS2(tran, _SRD_STATUS_A))
 +#define EDP_PSR_STATUS(tran)                  _MMIO_TRANS2(tran, _SRD_STATUS_A)
  #define   EDP_PSR_STATUS_STATE_MASK           (7 << 29)
  #define   EDP_PSR_STATUS_STATE_SHIFT          29
  #define   EDP_PSR_STATUS_STATE_IDLE           (0 << 29)
  
  #define _SRD_PERF_CNT_A                       0x60844
  #define _SRD_PERF_CNT_EDP             0x6f844
 -#define EDP_PSR_PERF_CNT(tran)                _MMIO(_TRANS2(tran, _SRD_PERF_CNT_A))
 +#define EDP_PSR_PERF_CNT(tran)                _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
  #define   EDP_PSR_PERF_CNT_MASK               0xffffff
  
  /* PSR_MASK on SKL+ */
  #define _SRD_DEBUG_A                          0x60860
  #define _SRD_DEBUG_EDP                                0x6f860
 -#define EDP_PSR_DEBUG(tran)                   _MMIO(_TRANS2(tran, _SRD_DEBUG_A))
 +#define EDP_PSR_DEBUG(tran)                   _MMIO_TRANS2(tran, _SRD_DEBUG_A)
  #define   EDP_PSR_DEBUG_MASK_MAX_SLEEP         (1 << 28)
  #define   EDP_PSR_DEBUG_MASK_LPSP              (1 << 27)
  #define   EDP_PSR_DEBUG_MASK_MEMUP             (1 << 26)
  
  #define _PSR2_SU_STATUS_A             0x60914
  #define _PSR2_SU_STATUS_EDP           0x6f914
 -#define _PSR2_SU_STATUS(tran, index)  _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4)
 +#define _PSR2_SU_STATUS(tran, index)  _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4)
  #define PSR2_SU_STATUS(tran, frame)   (_PSR2_SU_STATUS(tran, (frame) / 3))
  #define PSR2_SU_STATUS_SHIFT(frame)   (((frame) % 3) * 10)
  #define PSR2_SU_STATUS_MASK(frame)    (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
  #define VLV_PPS_BASE                  (VLV_DISPLAY_BASE + PPS_BASE)
  #define PCH_PPS_BASE                  0xC7200
  
 -#define _MMIO_PPS(pps_idx, reg)               _MMIO(dev_priv->pps_mmio_base - \
 +#define _MMIO_PPS(pps_idx, reg)               _MMIO(dev_priv->display.pps.mmio_base - \
                                              PPS_BASE + (reg) +        \
                                              (pps_idx) * 0x100)
  
  
  #define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
  
 -#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
 -#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
 -#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
 -                                       _VLV_BLC_PWM_CTL2_B)
 -
 -#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
 -#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
 -#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
 -                                      _VLV_BLC_PWM_CTL_B)
 -
 -#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
 -#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
 -#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
 -                                       _VLV_BLC_HIST_CTL_B)
 -
 -/* Backlight control */
 -#define BLC_PWM_CTL2  _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
 -#define   BLM_PWM_ENABLE              (1 << 31)
 -#define   BLM_COMBINATION_MODE                (1 << 30) /* gen4 only */
 -#define   BLM_PIPE_SELECT             (1 << 29)
 -#define   BLM_PIPE_SELECT_IVB         (3 << 29)
 -#define   BLM_PIPE_A                  (0 << 29)
 -#define   BLM_PIPE_B                  (1 << 29)
 -#define   BLM_PIPE_C                  (2 << 29) /* ivb + */
 -#define   BLM_TRANSCODER_A            BLM_PIPE_A /* hsw */
 -#define   BLM_TRANSCODER_B            BLM_PIPE_B
 -#define   BLM_TRANSCODER_C            BLM_PIPE_C
 -#define   BLM_TRANSCODER_EDP          (3 << 29)
 -#define   BLM_PIPE(pipe)              ((pipe) << 29)
 -#define   BLM_POLARITY_I965           (1 << 28) /* gen4 only */
 -#define   BLM_PHASE_IN_INTERUPT_STATUS        (1 << 26)
 -#define   BLM_PHASE_IN_ENABLE         (1 << 25)
 -#define   BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
 -#define   BLM_PHASE_IN_TIME_BASE_SHIFT        (16)
 -#define   BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
 -#define   BLM_PHASE_IN_COUNT_SHIFT    (8)
 -#define   BLM_PHASE_IN_COUNT_MASK     (0xff << 8)
 -#define   BLM_PHASE_IN_INCR_SHIFT     (0)
 -#define   BLM_PHASE_IN_INCR_MASK      (0xff << 0)
 -#define BLC_PWM_CTL   _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
 -/*
 - * This is the most significant 15 bits of the number of backlight cycles in a
 - * complete cycle of the modulated backlight control.
 - *
 - * The actual value is this field multiplied by two.
 - */
 -#define   BACKLIGHT_MODULATION_FREQ_SHIFT     (17)
 -#define   BACKLIGHT_MODULATION_FREQ_MASK      (0x7fff << 17)
 -#define   BLM_LEGACY_MODE                     (1 << 16) /* gen2 only */
 -/*
 - * This is the number of cycles out of the backlight modulation cycle for which
 - * the backlight is on.
 - *
 - * This field must be no greater than the number of cycles in the complete
 - * backlight modulation cycle.
 - */
 -#define   BACKLIGHT_DUTY_CYCLE_SHIFT          (0)
 -#define   BACKLIGHT_DUTY_CYCLE_MASK           (0xffff)
 -#define   BACKLIGHT_DUTY_CYCLE_MASK_PNV               (0xfffe)
 -#define   BLM_POLARITY_PNV                    (1 << 0) /* pnv only */
 -
 -#define BLC_HIST_CTL  _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
 -#define  BLM_HISTOGRAM_ENABLE                 (1 << 31)
 -
 -/* New registers for PCH-split platforms. Safe where new bits show up, the
 - * register layout machtes with gen4 BLC_PWM_CTL[12]. */
 -#define BLC_PWM_CPU_CTL2      _MMIO(0x48250)
 -#define BLC_PWM_CPU_CTL               _MMIO(0x48254)
 -
 -#define HSW_BLC_PWM2_CTL      _MMIO(0x48350)
 -
 -/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
 - * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
 -#define BLC_PWM_PCH_CTL1      _MMIO(0xc8250)
 -#define   BLM_PCH_PWM_ENABLE                  (1 << 31)
 -#define   BLM_PCH_OVERRIDE_ENABLE             (1 << 30)
 -#define   BLM_PCH_POLARITY                    (1 << 29)
 -#define BLC_PWM_PCH_CTL2      _MMIO(0xc8254)
 -
 -#define UTIL_PIN_CTL                  _MMIO(0x48400)
 -#define   UTIL_PIN_ENABLE             (1 << 31)
 -#define   UTIL_PIN_PIPE_MASK          (3 << 29)
 -#define   UTIL_PIN_PIPE(x)            ((x) << 29)
 -#define   UTIL_PIN_MODE_MASK          (0xf << 24)
 -#define   UTIL_PIN_MODE_DATA          (0 << 24)
 -#define   UTIL_PIN_MODE_PWM           (1 << 24)
 -#define   UTIL_PIN_MODE_VBLANK                (4 << 24)
 -#define   UTIL_PIN_MODE_VSYNC         (5 << 24)
 -#define   UTIL_PIN_MODE_EYE_LEVEL     (8 << 24)
 -#define   UTIL_PIN_OUTPUT_DATA                (1 << 23)
 -#define   UTIL_PIN_POLARITY           (1 << 22)
 -#define   UTIL_PIN_DIRECTION_INPUT    (1 << 19)
 -#define   UTIL_PIN_INPUT_DATA         (1 << 16)
 -
 -/* BXT backlight register definition. */
 -#define _BXT_BLC_PWM_CTL1                     0xC8250
 -#define   BXT_BLC_PWM_ENABLE                  (1 << 31)
 -#define   BXT_BLC_PWM_POLARITY                        (1 << 29)
 -#define _BXT_BLC_PWM_FREQ1                    0xC8254
 -#define _BXT_BLC_PWM_DUTY1                    0xC8258
 -
 -#define _BXT_BLC_PWM_CTL2                     0xC8350
 -#define _BXT_BLC_PWM_FREQ2                    0xC8354
 -#define _BXT_BLC_PWM_DUTY2                    0xC8358
 -
 -#define BXT_BLC_PWM_CTL(controller)    _MMIO_PIPE(controller,         \
 -                                      _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
 -#define BXT_BLC_PWM_FREQ(controller)   _MMIO_PIPE(controller, \
 -                                      _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
 -#define BXT_BLC_PWM_DUTY(controller)   _MMIO_PIPE(controller, \
 -                                      _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
 -
  #define PCH_GTC_CTL           _MMIO(0xe7000)
  #define   PCH_GTC_ENABLE      (1 << 31)
  
  #define _CURBBASE_IVB         0x71084
  #define _CURBPOS_IVB          0x71088
  
 -#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
 -#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
 -#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
 -#define CURSIZE(pipe) _CURSOR2(pipe, _CURASIZE)
 -#define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A)
 -#define CURSURFLIVE(pipe) _CURSOR2(pipe, _CURASURFLIVE)
 +#define CURCNTR(pipe) _MMIO_CURSOR2(pipe, _CURACNTR)
 +#define CURBASE(pipe) _MMIO_CURSOR2(pipe, _CURABASE)
 +#define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS)
 +#define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE)
 +#define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A)
 +#define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE)
  
  #define CURSOR_A_OFFSET 0x70080
  #define CURSOR_B_OFFSET 0x700c0
  #define DSPLINOFF(plane)      DSPADDR(plane)
  #define DSPOFFSET(plane)      _MMIO_PIPE2(plane, _DSPAOFFSET)
  #define DSPSURFLIVE(plane)    _MMIO_PIPE2(plane, _DSPASURFLIVE)
 -#define DSPGAMC(plane, i)     _MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
 +#define DSPGAMC(plane, i)     _MMIO_PIPE2(plane, _DSPAGAMC + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
  
  /* CHV pipe B blender and primary plane */
  #define _CHV_BLEND_A          0x60a00
                                                         _BW_BUDDY1_PAGE_MASK))
  
  #define HSW_NDE_RSTWRN_OPT    _MMIO(0x46408)
 -#define  RESET_PCH_HANDSHAKE_ENABLE   (1 << 4)
 +#define  MTL_RESET_PICA_HANDSHAKE_EN  REG_BIT(6)
 +#define  RESET_PCH_HANDSHAKE_ENABLE   REG_BIT(4)
  
  #define GEN8_CHICKEN_DCPR_1                   _MMIO(0x46430)
  #define   SKL_SELECT_ALTERNATE_DC_EXIT                REG_BIT(30)
                (((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
  #define   GEN7_L3CDERRST1_ENABLE      (1 << 7)
  
 -/* Audio */
 -#define G4X_AUD_VID_DID                       _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
 -#define   INTEL_AUDIO_DEVCL           0x808629FB
 -#define   INTEL_AUDIO_DEVBLC          0x80862801
 -#define   INTEL_AUDIO_DEVCTG          0x80862802
 -
 -#define G4X_AUD_CNTL_ST                       _MMIO(0x620B4)
 -#define   G4X_ELDV_DEVCL_DEVBLC               (1 << 13)
 -#define   G4X_ELDV_DEVCTG             (1 << 14)
 -#define   G4X_ELD_ADDR_MASK           (0xf << 5)
 -#define   G4X_ELD_ACK                 (1 << 4)
 -#define G4X_HDMIW_HDMIEDID            _MMIO(0x6210C)
 -
 -#define _IBX_HDMIW_HDMIEDID_A         0xE2050
 -#define _IBX_HDMIW_HDMIEDID_B         0xE2150
 -#define IBX_HDMIW_HDMIEDID(pipe)      _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
 -                                                _IBX_HDMIW_HDMIEDID_B)
 -#define _IBX_AUD_CNTL_ST_A            0xE20B4
 -#define _IBX_AUD_CNTL_ST_B            0xE21B4
 -#define IBX_AUD_CNTL_ST(pipe)         _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
 -                                                _IBX_AUD_CNTL_ST_B)
 -#define   IBX_ELD_BUFFER_SIZE_MASK    (0x1f << 10)
 -#define   IBX_ELD_ADDRESS_MASK                (0x1f << 5)
 -#define   IBX_ELD_ACK                 (1 << 4)
 -#define IBX_AUD_CNTL_ST2              _MMIO(0xE20C0)
 -#define   IBX_CP_READY(port)          ((1 << 1) << (((port) - 1) * 4))
 -#define   IBX_ELD_VALID(port)         ((1 << 0) << (((port) - 1) * 4))
 -
 -#define _CPT_HDMIW_HDMIEDID_A         0xE5050
 -#define _CPT_HDMIW_HDMIEDID_B         0xE5150
 -#define CPT_HDMIW_HDMIEDID(pipe)      _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
 -#define _CPT_AUD_CNTL_ST_A            0xE50B4
 -#define _CPT_AUD_CNTL_ST_B            0xE51B4
 -#define CPT_AUD_CNTL_ST(pipe)         _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
 -#define CPT_AUD_CNTRL_ST2             _MMIO(0xE50C0)
 -
 -#define _VLV_HDMIW_HDMIEDID_A         (VLV_DISPLAY_BASE + 0x62050)
 -#define _VLV_HDMIW_HDMIEDID_B         (VLV_DISPLAY_BASE + 0x62150)
 -#define VLV_HDMIW_HDMIEDID(pipe)      _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
 -#define _VLV_AUD_CNTL_ST_A            (VLV_DISPLAY_BASE + 0x620B4)
 -#define _VLV_AUD_CNTL_ST_B            (VLV_DISPLAY_BASE + 0x621B4)
 -#define VLV_AUD_CNTL_ST(pipe)         _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
 -#define VLV_AUD_CNTL_ST2              _MMIO(VLV_DISPLAY_BASE + 0x620C0)
 -
  /* These are the 4 32-bit write offset registers for each stream
   * output buffer.  It determines the offset from the
   * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
   */
  #define GEN7_SO_WRITE_OFFSET(n)               _MMIO(0x5280 + (n) * 4)
  
 -#define _IBX_AUD_CONFIG_A             0xe2000
 -#define _IBX_AUD_CONFIG_B             0xe2100
 -#define IBX_AUD_CFG(pipe)             _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
 -#define _CPT_AUD_CONFIG_A             0xe5000
 -#define _CPT_AUD_CONFIG_B             0xe5100
 -#define CPT_AUD_CFG(pipe)             _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
 -#define _VLV_AUD_CONFIG_A             (VLV_DISPLAY_BASE + 0x62000)
 -#define _VLV_AUD_CONFIG_B             (VLV_DISPLAY_BASE + 0x62100)
 -#define VLV_AUD_CFG(pipe)             _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
 -
 -#define   AUD_CONFIG_N_VALUE_INDEX            (1 << 29)
 -#define   AUD_CONFIG_N_PROG_ENABLE            (1 << 28)
 -#define   AUD_CONFIG_UPPER_N_SHIFT            20
 -#define   AUD_CONFIG_UPPER_N_MASK             (0xff << 20)
 -#define   AUD_CONFIG_LOWER_N_SHIFT            4
 -#define   AUD_CONFIG_LOWER_N_MASK             (0xfff << 4)
 -#define   AUD_CONFIG_N_MASK                   (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
 -#define   AUD_CONFIG_N(n) \
 -      (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) |   \
 -       (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT   16
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK    (0xf << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25175   (0 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25200   (1 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27000   (2 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27027   (3 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54000   (4 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54054   (5 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74176   (6 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74250   (7 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148352  (8 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148500  (9 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_296703  (10 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_297000  (11 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_593407  (12 << 16)
 -#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_594000  (13 << 16)
 -#define   AUD_CONFIG_DISABLE_NCTS             (1 << 3)
 -
 -/* HSW Audio */
 -#define _HSW_AUD_CONFIG_A             0x65000
 -#define _HSW_AUD_CONFIG_B             0x65100
 -#define HSW_AUD_CFG(trans)            _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
 -
 -#define _HSW_AUD_MISC_CTRL_A          0x65010
 -#define _HSW_AUD_MISC_CTRL_B          0x65110
 -#define HSW_AUD_MISC_CTRL(trans)      _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
 -
 -#define _HSW_AUD_M_CTS_ENABLE_A               0x65028
 -#define _HSW_AUD_M_CTS_ENABLE_B               0x65128
 -#define HSW_AUD_M_CTS_ENABLE(trans)   _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
 -#define   AUD_M_CTS_M_VALUE_INDEX     (1 << 21)
 -#define   AUD_M_CTS_M_PROG_ENABLE     (1 << 20)
 -#define   AUD_CONFIG_M_MASK           0xfffff
 -
 -#define _HSW_AUD_DIP_ELD_CTRL_ST_A    0x650b4
 -#define _HSW_AUD_DIP_ELD_CTRL_ST_B    0x651b4
 -#define HSW_AUD_DIP_ELD_CTRL(trans)   _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
 -
 -/* Audio Digital Converter */
 -#define _HSW_AUD_DIG_CNVT_1           0x65080
 -#define _HSW_AUD_DIG_CNVT_2           0x65180
 -#define AUD_DIG_CNVT(trans)           _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
 -#define DIP_PORT_SEL_MASK             0x3
 -
 -#define _HSW_AUD_EDID_DATA_A          0x65050
 -#define _HSW_AUD_EDID_DATA_B          0x65150
 -#define HSW_AUD_EDID_DATA(trans)      _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
 -
 -#define HSW_AUD_PIPE_CONV_CFG         _MMIO(0x6507c)
 -#define HSW_AUD_PIN_ELD_CP_VLD                _MMIO(0x650c0)
 -#define   AUDIO_INACTIVE(trans)               ((1 << 3) << ((trans) * 4))
 -#define   AUDIO_OUTPUT_ENABLE(trans)  ((1 << 2) << ((trans) * 4))
 -#define   AUDIO_CP_READY(trans)               ((1 << 1) << ((trans) * 4))
 -#define   AUDIO_ELD_VALID(trans)      ((1 << 0) << ((trans) * 4))
 -
 -#define _AUD_TCA_DP_2DOT0_CTRL                0x650bc
 -#define _AUD_TCB_DP_2DOT0_CTRL                0x651bc
 -#define AUD_DP_2DOT0_CTRL(trans)      _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
 -#define  AUD_ENABLE_SDP_SPLIT         REG_BIT(31)
 -
 -#define HSW_AUD_CHICKENBIT                    _MMIO(0x65f10)
 -#define   SKL_AUD_CODEC_WAKE_SIGNAL           (1 << 15)
 -
 -#define AUD_FREQ_CNTRL                        _MMIO(0x65900)
 -#define AUD_PIN_BUF_CTL               _MMIO(0x48414)
 -#define   AUD_PIN_BUF_ENABLE          REG_BIT(31)
 -
 -#define AUD_TS_CDCLK_M                        _MMIO(0x65ea0)
 -#define   AUD_TS_CDCLK_M_EN           REG_BIT(31)
 -#define AUD_TS_CDCLK_N                        _MMIO(0x65ea4)
 -
 -/* Display Audio Config Reg */
 -#define AUD_CONFIG_BE                 _MMIO(0x65ef0)
 -#define HBLANK_EARLY_ENABLE_ICL(pipe)         (0x1 << (20 - (pipe)))
 -#define HBLANK_EARLY_ENABLE_TGL(pipe)         (0x1 << (24 + (pipe)))
 -#define HBLANK_START_COUNT_MASK(pipe)         (0x7 << (3 + ((pipe) * 6)))
 -#define HBLANK_START_COUNT(pipe, val)         (((val) & 0x7) << (3 + ((pipe)) * 6))
 -#define NUMBER_SAMPLES_PER_LINE_MASK(pipe)    (0x3 << ((pipe) * 6))
 -#define NUMBER_SAMPLES_PER_LINE(pipe, val)    (((val) & 0x3) << ((pipe) * 6))
 -
 -#define HBLANK_START_COUNT_8  0
 -#define HBLANK_START_COUNT_16 1
 -#define HBLANK_START_COUNT_32 2
 -#define HBLANK_START_COUNT_64 3
 -#define HBLANK_START_COUNT_96 4
 -#define HBLANK_START_COUNT_128        5
 -
  /*
   * HSW - ICL power wells
   *
@@@ -6833,6 -7089,265 +6833,6 @@@ enum skl_power_gate 
  #define   ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
  #define   ICL_AUX_ANAOVRD1_ENABLE     (1 << 0)
  
 -/* HDCP Key Registers */
 -#define HDCP_KEY_CONF                 _MMIO(0x66c00)
 -#define  HDCP_AKSV_SEND_TRIGGER               BIT(31)
 -#define  HDCP_CLEAR_KEYS_TRIGGER      BIT(30)
 -#define  HDCP_KEY_LOAD_TRIGGER                BIT(8)
 -#define HDCP_KEY_STATUS                       _MMIO(0x66c04)
 -#define  HDCP_FUSE_IN_PROGRESS                BIT(7)
 -#define  HDCP_FUSE_ERROR              BIT(6)
 -#define  HDCP_FUSE_DONE                       BIT(5)
 -#define  HDCP_KEY_LOAD_STATUS         BIT(1)
 -#define  HDCP_KEY_LOAD_DONE           BIT(0)
 -#define HDCP_AKSV_LO                  _MMIO(0x66c10)
 -#define HDCP_AKSV_HI                  _MMIO(0x66c14)
 -
 -/* HDCP Repeater Registers */
 -#define HDCP_REP_CTL                  _MMIO(0x66d00)
 -#define  HDCP_TRANSA_REP_PRESENT      BIT(31)
 -#define  HDCP_TRANSB_REP_PRESENT      BIT(30)
 -#define  HDCP_TRANSC_REP_PRESENT      BIT(29)
 -#define  HDCP_TRANSD_REP_PRESENT      BIT(28)
 -#define  HDCP_DDIB_REP_PRESENT                BIT(30)
 -#define  HDCP_DDIA_REP_PRESENT                BIT(29)
 -#define  HDCP_DDIC_REP_PRESENT                BIT(28)
 -#define  HDCP_DDID_REP_PRESENT                BIT(27)
 -#define  HDCP_DDIF_REP_PRESENT                BIT(26)
 -#define  HDCP_DDIE_REP_PRESENT                BIT(25)
 -#define  HDCP_TRANSA_SHA1_M0          (1 << 20)
 -#define  HDCP_TRANSB_SHA1_M0          (2 << 20)
 -#define  HDCP_TRANSC_SHA1_M0          (3 << 20)
 -#define  HDCP_TRANSD_SHA1_M0          (4 << 20)
 -#define  HDCP_DDIB_SHA1_M0            (1 << 20)
 -#define  HDCP_DDIA_SHA1_M0            (2 << 20)
 -#define  HDCP_DDIC_SHA1_M0            (3 << 20)
 -#define  HDCP_DDID_SHA1_M0            (4 << 20)
 -#define  HDCP_DDIF_SHA1_M0            (5 << 20)
 -#define  HDCP_DDIE_SHA1_M0            (6 << 20) /* Bspec says 5? */
 -#define  HDCP_SHA1_BUSY                       BIT(16)
 -#define  HDCP_SHA1_READY              BIT(17)
 -#define  HDCP_SHA1_COMPLETE           BIT(18)
 -#define  HDCP_SHA1_V_MATCH            BIT(19)
 -#define  HDCP_SHA1_TEXT_32            (1 << 1)
 -#define  HDCP_SHA1_COMPLETE_HASH      (2 << 1)
 -#define  HDCP_SHA1_TEXT_24            (4 << 1)
 -#define  HDCP_SHA1_TEXT_16            (5 << 1)
 -#define  HDCP_SHA1_TEXT_8             (6 << 1)
 -#define  HDCP_SHA1_TEXT_0             (7 << 1)
 -#define HDCP_SHA_V_PRIME_H0           _MMIO(0x66d04)
 -#define HDCP_SHA_V_PRIME_H1           _MMIO(0x66d08)
 -#define HDCP_SHA_V_PRIME_H2           _MMIO(0x66d0C)
 -#define HDCP_SHA_V_PRIME_H3           _MMIO(0x66d10)
 -#define HDCP_SHA_V_PRIME_H4           _MMIO(0x66d14)
 -#define HDCP_SHA_V_PRIME(h)           _MMIO((0x66d04 + (h) * 4))
 -#define HDCP_SHA_TEXT                 _MMIO(0x66d18)
 -
 -/* HDCP Auth Registers */
 -#define _PORTA_HDCP_AUTHENC           0x66800
 -#define _PORTB_HDCP_AUTHENC           0x66500
 -#define _PORTC_HDCP_AUTHENC           0x66600
 -#define _PORTD_HDCP_AUTHENC           0x66700
 -#define _PORTE_HDCP_AUTHENC           0x66A00
 -#define _PORTF_HDCP_AUTHENC           0x66900
 -#define _PORT_HDCP_AUTHENC(port, x)   _MMIO(_PICK(port, \
 -                                        _PORTA_HDCP_AUTHENC, \
 -                                        _PORTB_HDCP_AUTHENC, \
 -                                        _PORTC_HDCP_AUTHENC, \
 -                                        _PORTD_HDCP_AUTHENC, \
 -                                        _PORTE_HDCP_AUTHENC, \
 -                                        _PORTF_HDCP_AUTHENC) + (x))
 -#define PORT_HDCP_CONF(port)          _PORT_HDCP_AUTHENC(port, 0x0)
 -#define _TRANSA_HDCP_CONF             0x66400
 -#define _TRANSB_HDCP_CONF             0x66500
 -#define TRANS_HDCP_CONF(trans)                _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
 -                                                  _TRANSB_HDCP_CONF)
 -#define HDCP_CONF(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP_CONF(trans) : \
 -                                       PORT_HDCP_CONF(port))
 -
 -#define  HDCP_CONF_CAPTURE_AN         BIT(0)
 -#define  HDCP_CONF_AUTH_AND_ENC               (BIT(1) | BIT(0))
 -#define PORT_HDCP_ANINIT(port)                _PORT_HDCP_AUTHENC(port, 0x4)
 -#define _TRANSA_HDCP_ANINIT           0x66404
 -#define _TRANSB_HDCP_ANINIT           0x66504
 -#define TRANS_HDCP_ANINIT(trans)      _MMIO_TRANS(trans, \
 -                                                  _TRANSA_HDCP_ANINIT, \
 -                                                  _TRANSB_HDCP_ANINIT)
 -#define HDCP_ANINIT(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP_ANINIT(trans) : \
 -                                       PORT_HDCP_ANINIT(port))
 -
 -#define PORT_HDCP_ANLO(port)          _PORT_HDCP_AUTHENC(port, 0x8)
 -#define _TRANSA_HDCP_ANLO             0x66408
 -#define _TRANSB_HDCP_ANLO             0x66508
 -#define TRANS_HDCP_ANLO(trans)                _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
 -                                                  _TRANSB_HDCP_ANLO)
 -#define HDCP_ANLO(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP_ANLO(trans) : \
 -                                       PORT_HDCP_ANLO(port))
 -
 -#define PORT_HDCP_ANHI(port)          _PORT_HDCP_AUTHENC(port, 0xC)
 -#define _TRANSA_HDCP_ANHI             0x6640C
 -#define _TRANSB_HDCP_ANHI             0x6650C
 -#define TRANS_HDCP_ANHI(trans)                _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
 -                                                  _TRANSB_HDCP_ANHI)
 -#define HDCP_ANHI(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP_ANHI(trans) : \
 -                                       PORT_HDCP_ANHI(port))
 -
 -#define PORT_HDCP_BKSVLO(port)                _PORT_HDCP_AUTHENC(port, 0x10)
 -#define _TRANSA_HDCP_BKSVLO           0x66410
 -#define _TRANSB_HDCP_BKSVLO           0x66510
 -#define TRANS_HDCP_BKSVLO(trans)      _MMIO_TRANS(trans, \
 -                                                  _TRANSA_HDCP_BKSVLO, \
 -                                                  _TRANSB_HDCP_BKSVLO)
 -#define HDCP_BKSVLO(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP_BKSVLO(trans) : \
 -                                       PORT_HDCP_BKSVLO(port))
 -
 -#define PORT_HDCP_BKSVHI(port)                _PORT_HDCP_AUTHENC(port, 0x14)
 -#define _TRANSA_HDCP_BKSVHI           0x66414
 -#define _TRANSB_HDCP_BKSVHI           0x66514
 -#define TRANS_HDCP_BKSVHI(trans)      _MMIO_TRANS(trans, \
 -                                                  _TRANSA_HDCP_BKSVHI, \
 -                                                  _TRANSB_HDCP_BKSVHI)
 -#define HDCP_BKSVHI(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP_BKSVHI(trans) : \
 -                                       PORT_HDCP_BKSVHI(port))
 -
 -#define PORT_HDCP_RPRIME(port)                _PORT_HDCP_AUTHENC(port, 0x18)
 -#define _TRANSA_HDCP_RPRIME           0x66418
 -#define _TRANSB_HDCP_RPRIME           0x66518
 -#define TRANS_HDCP_RPRIME(trans)      _MMIO_TRANS(trans, \
 -                                                  _TRANSA_HDCP_RPRIME, \
 -                                                  _TRANSB_HDCP_RPRIME)
 -#define HDCP_RPRIME(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP_RPRIME(trans) : \
 -                                       PORT_HDCP_RPRIME(port))
 -
 -#define PORT_HDCP_STATUS(port)                _PORT_HDCP_AUTHENC(port, 0x1C)
 -#define _TRANSA_HDCP_STATUS           0x6641C
 -#define _TRANSB_HDCP_STATUS           0x6651C
 -#define TRANS_HDCP_STATUS(trans)      _MMIO_TRANS(trans, \
 -                                                  _TRANSA_HDCP_STATUS, \
 -                                                  _TRANSB_HDCP_STATUS)
 -#define HDCP_STATUS(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP_STATUS(trans) : \
 -                                       PORT_HDCP_STATUS(port))
 -
 -#define  HDCP_STATUS_STREAM_A_ENC     BIT(31)
 -#define  HDCP_STATUS_STREAM_B_ENC     BIT(30)
 -#define  HDCP_STATUS_STREAM_C_ENC     BIT(29)
 -#define  HDCP_STATUS_STREAM_D_ENC     BIT(28)
 -#define  HDCP_STATUS_AUTH             BIT(21)
 -#define  HDCP_STATUS_ENC              BIT(20)
 -#define  HDCP_STATUS_RI_MATCH         BIT(19)
 -#define  HDCP_STATUS_R0_READY         BIT(18)
 -#define  HDCP_STATUS_AN_READY         BIT(17)
 -#define  HDCP_STATUS_CIPHER           BIT(16)
 -#define  HDCP_STATUS_FRAME_CNT(x)     (((x) >> 8) & 0xff)
 -
 -/* HDCP2.2 Registers */
 -#define _PORTA_HDCP2_BASE             0x66800
 -#define _PORTB_HDCP2_BASE             0x66500
 -#define _PORTC_HDCP2_BASE             0x66600
 -#define _PORTD_HDCP2_BASE             0x66700
 -#define _PORTE_HDCP2_BASE             0x66A00
 -#define _PORTF_HDCP2_BASE             0x66900
 -#define _PORT_HDCP2_BASE(port, x)     _MMIO(_PICK((port), \
 -                                        _PORTA_HDCP2_BASE, \
 -                                        _PORTB_HDCP2_BASE, \
 -                                        _PORTC_HDCP2_BASE, \
 -                                        _PORTD_HDCP2_BASE, \
 -                                        _PORTE_HDCP2_BASE, \
 -                                        _PORTF_HDCP2_BASE) + (x))
 -
 -#define PORT_HDCP2_AUTH(port)         _PORT_HDCP2_BASE(port, 0x98)
 -#define _TRANSA_HDCP2_AUTH            0x66498
 -#define _TRANSB_HDCP2_AUTH            0x66598
 -#define TRANS_HDCP2_AUTH(trans)               _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
 -                                                  _TRANSB_HDCP2_AUTH)
 -#define   AUTH_LINK_AUTHENTICATED     BIT(31)
 -#define   AUTH_LINK_TYPE              BIT(30)
 -#define   AUTH_FORCE_CLR_INPUTCTR     BIT(19)
 -#define   AUTH_CLR_KEYS                       BIT(18)
 -#define HDCP2_AUTH(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP2_AUTH(trans) : \
 -                                       PORT_HDCP2_AUTH(port))
 -
 -#define PORT_HDCP2_CTL(port)          _PORT_HDCP2_BASE(port, 0xB0)
 -#define _TRANSA_HDCP2_CTL             0x664B0
 -#define _TRANSB_HDCP2_CTL             0x665B0
 -#define TRANS_HDCP2_CTL(trans)                _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
 -                                                  _TRANSB_HDCP2_CTL)
 -#define   CTL_LINK_ENCRYPTION_REQ     BIT(31)
 -#define HDCP2_CTL(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP2_CTL(trans) : \
 -                                       PORT_HDCP2_CTL(port))
 -
 -#define PORT_HDCP2_STATUS(port)               _PORT_HDCP2_BASE(port, 0xB4)
 -#define _TRANSA_HDCP2_STATUS          0x664B4
 -#define _TRANSB_HDCP2_STATUS          0x665B4
 -#define TRANS_HDCP2_STATUS(trans)     _MMIO_TRANS(trans, \
 -                                                  _TRANSA_HDCP2_STATUS, \
 -                                                  _TRANSB_HDCP2_STATUS)
 -#define   LINK_TYPE_STATUS            BIT(22)
 -#define   LINK_AUTH_STATUS            BIT(21)
 -#define   LINK_ENCRYPTION_STATUS      BIT(20)
 -#define HDCP2_STATUS(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP2_STATUS(trans) : \
 -                                       PORT_HDCP2_STATUS(port))
 -
 -#define _PIPEA_HDCP2_STREAM_STATUS    0x668C0
 -#define _PIPEB_HDCP2_STREAM_STATUS    0x665C0
 -#define _PIPEC_HDCP2_STREAM_STATUS    0x666C0
 -#define _PIPED_HDCP2_STREAM_STATUS    0x667C0
 -#define PIPE_HDCP2_STREAM_STATUS(pipe)                _MMIO(_PICK((pipe), \
 -                                                    _PIPEA_HDCP2_STREAM_STATUS, \
 -                                                    _PIPEB_HDCP2_STREAM_STATUS, \
 -                                                    _PIPEC_HDCP2_STREAM_STATUS, \
 -                                                    _PIPED_HDCP2_STREAM_STATUS))
 -
 -#define _TRANSA_HDCP2_STREAM_STATUS           0x664C0
 -#define _TRANSB_HDCP2_STREAM_STATUS           0x665C0
 -#define TRANS_HDCP2_STREAM_STATUS(trans)      _MMIO_TRANS(trans, \
 -                                                  _TRANSA_HDCP2_STREAM_STATUS, \
 -                                                  _TRANSB_HDCP2_STREAM_STATUS)
 -#define   STREAM_ENCRYPTION_STATUS    BIT(31)
 -#define   STREAM_TYPE_STATUS          BIT(30)
 -#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP2_STREAM_STATUS(trans) : \
 -                                       PIPE_HDCP2_STREAM_STATUS(pipe))
 -
 -#define _PORTA_HDCP2_AUTH_STREAM              0x66F00
 -#define _PORTB_HDCP2_AUTH_STREAM              0x66F04
 -#define PORT_HDCP2_AUTH_STREAM(port)  _MMIO_PORT(port, \
 -                                                 _PORTA_HDCP2_AUTH_STREAM, \
 -                                                 _PORTB_HDCP2_AUTH_STREAM)
 -#define _TRANSA_HDCP2_AUTH_STREAM             0x66F00
 -#define _TRANSB_HDCP2_AUTH_STREAM             0x66F04
 -#define TRANS_HDCP2_AUTH_STREAM(trans)        _MMIO_TRANS(trans, \
 -                                                  _TRANSA_HDCP2_AUTH_STREAM, \
 -                                                  _TRANSB_HDCP2_AUTH_STREAM)
 -#define   AUTH_STREAM_TYPE            BIT(31)
 -#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
 -                                      (GRAPHICS_VER(dev_priv) >= 12 ? \
 -                                       TRANS_HDCP2_AUTH_STREAM(trans) : \
 -                                       PORT_HDCP2_AUTH_STREAM(port))
 -
  /* Per-pipe DDI Function Control */
  #define _TRANS_DDI_FUNC_CTL_A         0x60400
  #define _TRANS_DDI_FUNC_CTL_B         0x61400
  #define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
 +#define  DSC_ALT_ICH_SEL              (1 << 20)
  #define  DSC_VBR_ENABLE                       (1 << 19)
  #define  DSC_422_ENABLE                       (1 << 18)
  #define  DSC_COLOR_SPACE_CONVERSION   (1 << 17)
  #define GEN12_CULLBIT2                        _MMIO(0x7030)
  #define GEN12_STATE_ACK_DEBUG         _MMIO(0x20BC)
  
 +#define MTL_LATENCY_LP0_LP1           _MMIO(0x45780)
 +#define MTL_LATENCY_LP2_LP3           _MMIO(0x45784)
 +#define MTL_LATENCY_LP4_LP5           _MMIO(0x45788)
 +#define  MTL_LATENCY_LEVEL_EVEN_MASK  REG_GENMASK(12, 0)
 +#define  MTL_LATENCY_LEVEL_ODD_MASK   REG_GENMASK(28, 16)
 +
  #endif /* _I915_REG_H_ */
index 6904ad03ca19f7061562af52081a122a5350de7d,80ff83f96b8863b30fe8608105c621d457205a17..deaa07d8df2c14eeb6d062ea768325f86a5bdf61
@@@ -37,6 -37,7 +37,7 @@@
  
  struct drm_printer;
  struct drm_i915_private;
+ struct intel_gt_definition;
  
  /* Keep in gen based order, and chronological order within a gen */
  enum intel_platform {
@@@ -164,6 -165,7 +165,6 @@@ enum intel_ppgtt_type 
        func(has_media_ratio_mode); \
        func(has_mslice_steering); \
        func(has_one_eu_per_fuse_bit); \
 -      func(has_pooled_eu); \
        func(has_pxp); \
        func(has_rc6); \
        func(has_rc6p); \
        /* Keep in alphabetical order */ \
        func(cursor_needs_physical); \
        func(has_cdclk_crawl); \
 -      func(has_dmc); \
        func(has_ddi); \
        func(has_dp_mst); \
        func(has_dsb); \
 -      func(has_dsc); \
        func(has_fpga_dbg); \
        func(has_gmch); \
 -      func(has_hdcp); \
        func(has_hotplug); \
        func(has_hti); \
        func(has_ipc); \
@@@ -199,59 -204,27 +200,61 @@@ struct ip_version 
        u8 rel;
  };
  
 -struct intel_device_info {
 +struct intel_runtime_info {
        struct ip_version graphics;
 -      struct ip_version media;
 +
 +      /*
 +       * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
 +       * single runtime conditionals, and also to provide groundwork for
 +       * future per platform, or per SKU build optimizations.
 +       *
 +       * Array can be extended when necessary if the corresponding
 +       * BUILD_BUG_ON is hit.
 +       */
 +      u32 platform_mask[2];
 +
 +      u16 device_id;
  
        intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
  
 -      enum intel_platform platform;
 +      u32 rawclk_freq;
  
 -      unsigned int dma_mask_size; /* available DMA address bits */
 +      struct intel_step_info step;
 +
 +      unsigned int page_sizes; /* page sizes supported by the HW */
  
        enum intel_ppgtt_type ppgtt_type;
        unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
  
 -      unsigned int page_sizes; /* page sizes supported by the HW */
 -
        u32 memory_regions; /* regions supported by the HW */
  
 -      u32 display_mmio_offset;
 +      bool has_pooled_eu;
 +
 +      /* display */
 +      struct {
 +              u8 pipe_mask;
 +              u8 cpu_transcoder_mask;
 +
 +              u8 num_sprites[I915_MAX_PIPES];
 +              u8 num_scalers[I915_MAX_PIPES];
 +
 +              u8 fbc_mask;
 +
 +              bool has_hdcp;
 +              bool has_dmc;
 +              bool has_dsc;
 +      };
 +};
 +
 +struct intel_device_info {
 +      struct ip_version media;
 +
 +      enum intel_platform platform;
 +
 +      unsigned int dma_mask_size; /* available DMA address bits */
  
+       const struct intel_gt_definition *extra_gt_list;
        u8 gt; /* GT number, 0 if undefined */
  
  #define DEFINE_FLAG(name) u8 name:1
                u8 ver;
                u8 rel;
  
 -              u8 pipe_mask;
 -              u8 cpu_transcoder_mask;
 -              u8 fbc_mask;
                u8 abox_mask;
  
 +              struct {
 +                      u16 size; /* in blocks */
 +                      u8 slice_mask;
 +              } dbuf;
 +
  #define DEFINE_FLAG(name) u8 name:1
                DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
  #undef DEFINE_FLAG
 -      } display;
 -
 -      struct {
 -              u16 size; /* in blocks */
 -              u8 slice_mask;
 -      } dbuf;
 -
 -      /* Register offsets for the various display pipes and transcoders */
 -      int pipe_offsets[I915_MAX_TRANSCODERS];
 -      int trans_offsets[I915_MAX_TRANSCODERS];
 -      int cursor_offsets[I915_MAX_PIPES];
 -
 -      struct color_luts {
 -              u32 degamma_lut_size;
 -              u32 gamma_lut_size;
 -              u32 degamma_lut_tests;
 -              u32 gamma_lut_tests;
 -      } color;
 -};
 -
 -struct intel_runtime_info {
 -      /*
 -       * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
 -       * into single runtime conditionals, and also to provide groundwork
 -       * for future per platform, or per SKU build optimizations.
 -       *
 -       * Array can be extended when necessary if the corresponding
 -       * BUILD_BUG_ON is hit.
 -       */
 -      u32 platform_mask[2];
  
 -      u16 device_id;
 +              /* Global register offset for the display engine */
 +              u32 mmio_offset;
  
 -      u8 num_sprites[I915_MAX_PIPES];
 -      u8 num_scalers[I915_MAX_PIPES];
 +              /* Register offsets for the various display pipes and transcoders */
 +              u32 pipe_offsets[I915_MAX_TRANSCODERS];
 +              u32 trans_offsets[I915_MAX_TRANSCODERS];
 +              u32 cursor_offsets[I915_MAX_PIPES];
  
 -      u32 rawclk_freq;
 +              struct {
 +                      u32 degamma_lut_size;
 +                      u32 gamma_lut_size;
 +                      u32 degamma_lut_tests;
 +                      u32 gamma_lut_tests;
 +              } color;
 +      } display;
  
 -      struct intel_step_info step;
 +      /*
 +       * Initial runtime info. Do not access outside of i915_driver_create().
 +       */
 +      const struct intel_runtime_info __runtime;
  };
  
  struct intel_driver_caps {
@@@ -305,9 -294,10 +308,9 @@@ const char *intel_platform_name(enum in
  void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
  void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
  
 -void intel_device_info_print_static(const struct intel_device_info *info,
 -                                  struct drm_printer *p);
 -void intel_device_info_print_runtime(const struct intel_runtime_info *info,
 -                                   struct drm_printer *p);
 +void intel_device_info_print(const struct intel_device_info *info,
 +                           const struct intel_runtime_info *runtime,
 +                           struct drm_printer *p);
  
  void intel_driver_caps_print(const struct intel_driver_caps *caps,
                             struct drm_printer *p);
index a7bfa063447c14771997176ab1751028d5f1216a,d309cc031e939a29e1727916eb3f389329ee165f..efc040cccc2469c8e1fbade0bebd466cca755fb8
@@@ -30,8 -30,8 +30,8 @@@
  #include <linux/pm_runtime.h>
  
  #include <drm/drm_atomic_helper.h>
 +#include <drm/drm_blend.h>
  #include <drm/drm_fourcc.h>
 -#include <drm/drm_plane_helper.h>
  
  #include "display/intel_atomic.h"
  #include "display/intel_atomic_plane.h"
@@@ -468,13 -468,13 +468,13 @@@ bool intel_set_memory_cxsr(struct drm_i
  {
        bool ret;
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
        ret = _intel_set_memory_cxsr(dev_priv, enable);
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 -              dev_priv->wm.vlv.cxsr = enable;
 +              dev_priv->display.wm.vlv.cxsr = enable;
        else if (IS_G4X(dev_priv))
 -              dev_priv->wm.g4x.cxsr = enable;
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +              dev_priv->display.wm.g4x.cxsr = enable;
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  
        return ret;
  }
@@@ -834,7 -834,7 +834,7 @@@ static bool is_enabling(int old, int ne
  
  static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
  {
 -      return dev_priv->wm.max_level + 1;
 +      return dev_priv->display.wm.max_level + 1;
  }
  
  static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
@@@ -1093,11 -1093,11 +1093,11 @@@ static void vlv_write_wm_values(struct 
  static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
  {
        /* all latencies in usec */
 -      dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
 -      dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
 -      dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
 +      dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
 +      dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
 +      dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
  
 -      dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
 +      dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
  }
  
  static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@@ -1150,7 -1150,7 +1150,7 @@@ static u16 g4x_compute_wm(const struct 
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        const struct drm_display_mode *pipe_mode =
                &crtc_state->hw.pipe_mode;
 -      unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
 +      unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
        unsigned int pixel_rate, htotal, cpp, width, wm;
  
        if (latency == 0)
@@@ -1324,7 -1324,7 +1324,7 @@@ static bool g4x_raw_crtc_wm_is_valid(co
  {
        struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
  
 -      if (level > dev_priv->wm.max_level)
 +      if (level > dev_priv->display.wm.max_level)
                return false;
  
        return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@@ -1583,7 -1583,7 +1583,7 @@@ static void g4x_merge_wm(struct drm_i91
  
  static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
  {
 -      struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
 +      struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
        struct g4x_wm_values new_wm = {};
  
        g4x_merge_wm(dev_priv, &new_wm);
@@@ -1609,10 -1609,10 +1609,10 @@@ static void g4x_initial_watermarks(stru
        const struct intel_crtc_state *crtc_state =
                intel_atomic_get_new_crtc_state(state, crtc);
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
        crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
        g4x_program_watermarks(dev_priv);
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  }
  
  static void g4x_optimize_watermarks(struct intel_atomic_state *state,
        if (!crtc_state->wm.need_postvbl_update)
                return;
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
        crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
        g4x_program_watermarks(dev_priv);
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  }
  
  /* latency must be in 0.1us units. */
@@@ -1650,15 -1650,15 +1650,15 @@@ static unsigned int vlv_wm_method2(unsi
  static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
  {
        /* all latencies in usec */
 -      dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
 +      dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
  
 -      dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
 +      dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
  
        if (IS_CHERRYVIEW(dev_priv)) {
 -              dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
 -              dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
 +              dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
 +              dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
  
 -              dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
 +              dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
        }
  }
  
@@@ -1672,7 -1672,7 +1672,7 @@@ static u16 vlv_compute_wm_level(const s
                &crtc_state->hw.pipe_mode;
        unsigned int pixel_rate, htotal, cpp, width, wm;
  
 -      if (dev_priv->wm.pri_latency[level] == 0)
 +      if (dev_priv->display.wm.pri_latency[level] == 0)
                return USHRT_MAX;
  
        if (!intel_wm_plane_visible(crtc_state, plane_state))
                wm = 63;
        } else {
                wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
 -                                  dev_priv->wm.pri_latency[level] * 10);
 +                                  dev_priv->display.wm.pri_latency[level] * 10);
        }
  
        return min_t(unsigned int, wm, USHRT_MAX);
@@@ -2158,7 -2158,7 +2158,7 @@@ static void vlv_merge_wm(struct drm_i91
        struct intel_crtc *crtc;
        int num_active_pipes = 0;
  
 -      wm->level = dev_priv->wm.max_level;
 +      wm->level = dev_priv->display.wm.max_level;
        wm->cxsr = true;
  
        for_each_intel_crtc(&dev_priv->drm, crtc) {
  
  static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
  {
 -      struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
 +      struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
        struct vlv_wm_values new_wm = {};
  
        vlv_merge_wm(dev_priv, &new_wm);
@@@ -2235,10 -2235,10 +2235,10 @@@ static void vlv_initial_watermarks(stru
        const struct intel_crtc_state *crtc_state =
                intel_atomic_get_new_crtc_state(state, crtc);
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
        crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
        vlv_program_watermarks(dev_priv);
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  }
  
  static void vlv_optimize_watermarks(struct intel_atomic_state *state,
        if (!crtc_state->wm.need_postvbl_update)
                return;
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
        crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
        vlv_program_watermarks(dev_priv);
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  }
  
  static void i965_update_wm(struct drm_i915_private *dev_priv)
@@@ -2835,9 -2835,9 +2835,9 @@@ static void ilk_compute_wm_level(const 
                                 const struct intel_plane_state *curstate,
                                 struct intel_wm_level *result)
  {
 -      u16 pri_latency = dev_priv->wm.pri_latency[level];
 -      u16 spr_latency = dev_priv->wm.spr_latency[level];
 -      u16 cur_latency = dev_priv->wm.cur_latency[level];
 +      u16 pri_latency = dev_priv->display.wm.pri_latency[level];
 +      u16 spr_latency = dev_priv->display.wm.spr_latency[level];
 +      u16 cur_latency = dev_priv->display.wm.cur_latency[level];
  
        /* WM1+ latency values stored in 0.5us units */
        if (level > 0) {
        result->enable = true;
  }
  
 +static void
 +adjust_wm_latency(struct drm_i915_private *i915,
 +                u16 wm[], int max_level, int read_latency)
 +{
 +      bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
 +      int i, level;
 +
 +      /*
 +       * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
 +       * need to be disabled. We make sure to sanitize the values out
 +       * of the punit to satisfy this requirement.
 +       */
 +      for (level = 1; level <= max_level; level++) {
 +              if (wm[level] == 0) {
 +                      for (i = level + 1; i <= max_level; i++)
 +                              wm[i] = 0;
 +
 +                      max_level = level - 1;
 +                      break;
 +              }
 +      }
 +
 +      /*
 +       * WaWmMemoryReadLatency
 +       *
 +       * punit doesn't take into account the read latency so we need
 +       * to add proper adjustement to each valid level we retrieve
 +       * from the punit when level 0 response data is 0us.
 +       */
 +      if (wm[0] == 0) {
 +              for (level = 0; level <= max_level; level++)
 +                      wm[level] += read_latency;
 +      }
 +
 +      /*
 +       * WA Level-0 adjustment for 16GB DIMMs: SKL+
 +       * If we could not get dimm info enable this WA to prevent from
 +       * any underrun. If not able to get Dimm info assume 16GB dimm
 +       * to avoid any underrun.
 +       */
 +      if (wm_lv_0_adjust_needed)
 +              wm[0] += 1;
 +}
 +
  static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 -                                u16 wm[8])
 +                                u16 wm[])
  {
        struct intel_uncore *uncore = &dev_priv->uncore;
 +      int max_level = ilk_wm_max_level(dev_priv);
  
 -      if (DISPLAY_VER(dev_priv) >= 9) {
 +      if (DISPLAY_VER(dev_priv) >= 14) {
                u32 val;
 -              int ret, i;
 -              int level, max_level = ilk_wm_max_level(dev_priv);
 +
 +              val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
 +              wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
 +              wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
 +              val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
 +              wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
 +              wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
 +              val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
 +              wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
 +              wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
 +
 +              adjust_wm_latency(dev_priv, wm, max_level, 6);
 +      } else if (DISPLAY_VER(dev_priv) >= 9) {
 +              int read_latency = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
                int mult = IS_DG2(dev_priv) ? 2 : 1;
 +              u32 val;
 +              int ret;
  
                /* read the first set of memory latencies[0:3] */
                val = 0; /* data0 to be programmed to 0 for first set */
                wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
                                GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
  
 -              /*
 -               * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
 -               * need to be disabled. We make sure to sanitize the values out
 -               * of the punit to satisfy this requirement.
 -               */
 -              for (level = 1; level <= max_level; level++) {
 -                      if (wm[level] == 0) {
 -                              for (i = level + 1; i <= max_level; i++)
 -                                      wm[i] = 0;
 -
 -                              max_level = level - 1;
 -
 -                              break;
 -                      }
 -              }
 -
 -              /*
 -               * WaWmMemoryReadLatency
 -               *
 -               * punit doesn't take into account the read latency so we need
 -               * to add proper adjustement to each valid level we retrieve
 -               * from the punit when level 0 response data is 0us.
 -               */
 -              if (wm[0] == 0) {
 -                      u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
 -
 -                      for (level = 0; level <= max_level; level++)
 -                              wm[level] += adjust;
 -              }
 -
 -              /*
 -               * WA Level-0 adjustment for 16GB DIMMs: SKL+
 -               * If we could not get dimm info enable this WA to prevent from
 -               * any underrun. If not able to get Dimm info assume 16GB dimm
 -               * to avoid any underrun.
 -               */
 -              if (dev_priv->dram_info.wm_lv_0_adjust_needed)
 -                      wm[0] += 1;
 +              adjust_wm_latency(dev_priv, wm, max_level, read_latency);
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
  
@@@ -3083,18 -3061,18 +3083,18 @@@ static void snb_wm_latency_quirk(struc
         * The BIOS provided WM memory latency values are often
         * inadequate for high resolution displays. Adjust them.
         */
 -      changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
 -      changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
 -      changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
 +      changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
 +      changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
 +      changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
  
        if (!changed)
                return;
  
        drm_dbg_kms(&dev_priv->drm,
                    "WM latency values increased to avoid potential underruns\n");
 -      intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 -      intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
 -      intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 +      intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
 +      intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
 +      intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
  }
  
  static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
         * interrupts only. To play it safe we disable LP3
         * watermarks entirely.
         */
 -      if (dev_priv->wm.pri_latency[3] == 0 &&
 -          dev_priv->wm.spr_latency[3] == 0 &&
 -          dev_priv->wm.cur_latency[3] == 0)
 +      if (dev_priv->display.wm.pri_latency[3] == 0 &&
 +          dev_priv->display.wm.spr_latency[3] == 0 &&
 +          dev_priv->display.wm.cur_latency[3] == 0)
                return;
  
 -      dev_priv->wm.pri_latency[3] = 0;
 -      dev_priv->wm.spr_latency[3] = 0;
 -      dev_priv->wm.cur_latency[3] = 0;
 +      dev_priv->display.wm.pri_latency[3] = 0;
 +      dev_priv->display.wm.spr_latency[3] = 0;
 +      dev_priv->display.wm.cur_latency[3] = 0;
  
        drm_dbg_kms(&dev_priv->drm,
                    "LP3 watermarks disabled due to potential for lost interrupts\n");
 -      intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 -      intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
 -      intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 +      intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
 +      intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
 +      intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
  }
  
  static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
  {
 -      intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 +      intel_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
  
 -      memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 -             sizeof(dev_priv->wm.pri_latency));
 -      memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
 -             sizeof(dev_priv->wm.pri_latency));
 +      memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
 +             sizeof(dev_priv->display.wm.pri_latency));
 +      memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
 +             sizeof(dev_priv->display.wm.pri_latency));
  
 -      intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
 -      intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
 +      intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
 +      intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
  
 -      intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 -      intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
 -      intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 +      intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
 +      intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
 +      intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
  
        if (DISPLAY_VER(dev_priv) == 6) {
                snb_wm_latency_quirk(dev_priv);
  
  static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
  {
 -      intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
 -      intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
 +      intel_read_wm_latency(dev_priv, dev_priv->display.wm.skl_latency);
 +      intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->display.wm.skl_latency);
  }
  
  static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
@@@ -3408,7 -3386,7 +3408,7 @@@ static unsigned int ilk_wm_lp_latency(s
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                return 2 * level;
        else
 -              return dev_priv->wm.pri_latency[level];
 +              return dev_priv->display.wm.pri_latency[level];
  }
  
  static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
@@@ -3560,7 -3538,7 +3560,7 @@@ static unsigned int ilk_compute_wm_dirt
  static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
                               unsigned int dirty)
  {
 -      struct ilk_wm_values *previous = &dev_priv->wm.hw;
 +      struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
        bool changed = false;
  
        if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
  static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
                                struct ilk_wm_values *results)
  {
 -      struct ilk_wm_values *previous = &dev_priv->wm.hw;
 +      struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
        unsigned int dirty;
        u32 val;
  
        if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
                intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
  
 -      dev_priv->wm.hw = *results;
 +      dev_priv->display.wm.hw = *results;
  }
  
  bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
@@@ -3691,7 -3669,7 +3691,7 @@@ static boo
  intel_has_sagv(struct drm_i915_private *dev_priv)
  {
        return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) &&
 -              dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
 +              dev_priv->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
  }
  
  static u32
@@@ -3722,7 -3700,7 +3722,7 @@@ intel_sagv_block_time(struct drm_i915_p
  static void intel_sagv_init(struct drm_i915_private *i915)
  {
        if (!intel_has_sagv(i915))
 -              i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
 +              i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
  
        /*
         * Probe to see if we have working SAGV control.
        if (DISPLAY_VER(i915) < 11)
                skl_sagv_disable(i915);
  
 -      drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN);
 +      drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
  
 -      i915->sagv_block_time_us = intel_sagv_block_time(i915);
 +      i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
  
        drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
 -                  str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us);
 +                  str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
  
        /* avoid overflow when adding with wm0 latency/etc. */
 -      if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX,
 +      if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
                     "Excessive SAGV block time %u, ignoring\n",
 -                   i915->sagv_block_time_us))
 -              i915->sagv_block_time_us = 0;
 +                   i915->display.sagv.block_time_us))
 +              i915->display.sagv.block_time_us = 0;
  
        if (!intel_has_sagv(i915))
 -              i915->sagv_block_time_us = 0;
 +              i915->display.sagv.block_time_us = 0;
  }
  
  /*
@@@ -3766,7 -3744,7 +3766,7 @@@ static void skl_sagv_enable(struct drm_
        if (!intel_has_sagv(dev_priv))
                return;
  
 -      if (dev_priv->sagv_status == I915_SAGV_ENABLED)
 +      if (dev_priv->display.sagv.status == I915_SAGV_ENABLED)
                return;
  
        drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
         */
        if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
                drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
 -              dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
 +              dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
                return;
        } else if (ret < 0) {
                drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
                return;
        }
  
 -      dev_priv->sagv_status = I915_SAGV_ENABLED;
 +      dev_priv->display.sagv.status = I915_SAGV_ENABLED;
  }
  
  static void skl_sagv_disable(struct drm_i915_private *dev_priv)
        if (!intel_has_sagv(dev_priv))
                return;
  
 -      if (dev_priv->sagv_status == I915_SAGV_DISABLED)
 +      if (dev_priv->display.sagv.status == I915_SAGV_DISABLED)
                return;
  
        drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
         */
        if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
                drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
 -              dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
 +              dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
                return;
        } else if (ret < 0) {
                drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
                return;
        }
  
 -      dev_priv->sagv_status = I915_SAGV_DISABLED;
 +      dev_priv->display.sagv.status = I915_SAGV_DISABLED;
  }
  
  static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
@@@ -4122,8 -4100,8 +4122,8 @@@ static u16 skl_ddb_entry_init(struct sk
  
  static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
  {
 -      return INTEL_INFO(dev_priv)->dbuf.size /
 -              hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
 +      return INTEL_INFO(dev_priv)->display.dbuf.size /
 +              hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask);
  }
  
  static void
@@@ -4142,7 -4120,7 +4142,7 @@@ skl_ddb_entry_for_slices(struct drm_i91
        ddb->end = fls(slice_mask) * slice_size;
  
        WARN_ON(ddb->start >= ddb->end);
 -      WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
 +      WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size);
  }
  
  static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
@@@ -4343,7 -4321,7 +4343,7 @@@ skl_cursor_allocation(const struct inte
        drm_WARN_ON(&dev_priv->drm, ret);
  
        for (level = 0; level <= max_level; level++) {
 -              unsigned int latency = dev_priv->wm.skl_latency[level];
 +              unsigned int latency = dev_priv->display.wm.skl_latency[level];
  
                skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
                if (wm.min_ddb_alloc == U16_MAX)
@@@ -4390,9 -4368,9 +4390,9 @@@ skl_ddb_get_hw_plane_state(struct drm_i
        skl_ddb_entry_init_from_hw(ddb_y, val);
  }
  
 -void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 -                             struct skl_ddb_entry *ddb,
 -                             struct skl_ddb_entry *ddb_y)
 +static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 +                                    struct skl_ddb_entry *ddb,
 +                                    struct skl_ddb_entry *ddb_y)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum intel_display_power_domain power_domain;
@@@ -4927,7 -4905,7 +4927,7 @@@ static u8 skl_compute_dbuf_slices(struc
  
        if (IS_DG2(dev_priv))
                return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
 -      else if (IS_ALDERLAKE_P(dev_priv))
 +      else if (DISPLAY_VER(dev_priv) >= 13)
                return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
        else if (DISPLAY_VER(dev_priv) == 12)
                return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
@@@ -4972,7 -4950,7 +4972,7 @@@ skl_total_relative_data_rate(const stru
        return data_rate;
  }
  
 -const struct skl_wm_level *
 +static const struct skl_wm_level *
  skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
                   enum plane_id plane_id,
                   int level)
        return &wm->wm[level];
  }
  
 -const struct skl_wm_level *
 +static const struct skl_wm_level *
  skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
                   enum plane_id plane_id)
  {
@@@ -5582,8 -5560,8 +5582,8 @@@ static void skl_compute_plane_wm(const 
        result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
        result->enable = true;
  
 -      if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us)
 -              result->can_sagv = latency >= dev_priv->sagv_block_time_us;
 +      if (DISPLAY_VER(dev_priv) < 12 && dev_priv->display.sagv.block_time_us)
 +              result->can_sagv = latency >= dev_priv->display.sagv.block_time_us;
  }
  
  static void
@@@ -5598,7 -5576,7 +5598,7 @@@ skl_compute_wm_levels(const struct inte
  
        for (level = 0; level <= max_level; level++) {
                struct skl_wm_level *result = &levels[level];
 -              unsigned int latency = dev_priv->wm.skl_latency[level];
 +              unsigned int latency = dev_priv->display.wm.skl_latency[level];
  
                skl_compute_plane_wm(crtc_state, plane, level, latency,
                                     wm_params, result_prev, result);
@@@ -5617,8 -5595,8 +5617,8 @@@ static void tgl_compute_sagv_wm(const s
        struct skl_wm_level *levels = plane_wm->wm;
        unsigned int latency = 0;
  
 -      if (dev_priv->sagv_block_time_us)
 -              latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0];
 +      if (dev_priv->display.sagv.block_time_us)
 +              latency = dev_priv->display.sagv.block_time_us + dev_priv->display.wm.skl_latency[0];
  
        skl_compute_plane_wm(crtc_state, plane, 0, latency,
                             wm_params, &levels[0],
@@@ -5937,8 -5915,8 +5937,8 @@@ void skl_write_cursor_wm(struct intel_p
        skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
  }
  
 -bool skl_wm_level_equals(const struct skl_wm_level *l1,
 -                       const struct skl_wm_level *l2)
 +static bool skl_wm_level_equals(const struct skl_wm_level *l1,
 +                              const struct skl_wm_level *l2)
  {
        return l1->enable == l2->enable &&
                l1->ignore_lines == l2->ignore_lines &&
@@@ -6117,7 -6095,7 +6117,7 @@@ skl_compute_ddb(struct intel_atomic_sta
                            "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
                            old_dbuf_state->enabled_slices,
                            new_dbuf_state->enabled_slices,
 -                          INTEL_INFO(dev_priv)->dbuf.slice_mask,
 +                          INTEL_INFO(dev_priv)->display.dbuf.slice_mask,
                            str_yes_no(old_dbuf_state->joined_mbus),
                            str_yes_no(new_dbuf_state->joined_mbus));
        }
@@@ -6480,10 -6458,10 +6480,10 @@@ static void ilk_initial_watermarks(stru
        const struct intel_crtc_state *crtc_state =
                intel_atomic_get_new_crtc_state(state, crtc);
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
        crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
        ilk_program_watermarks(dev_priv);
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  }
  
  static void ilk_optimize_watermarks(struct intel_atomic_state *state,
        if (!crtc_state->wm.need_postvbl_update)
                return;
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
        crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
        ilk_program_watermarks(dev_priv);
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  }
  
  static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
        level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
  }
  
 -void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 -                            struct skl_pipe_wm *out)
 +static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 +                                   struct skl_pipe_wm *out)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
@@@ -6582,10 -6560,7 +6582,10 @@@ void skl_wm_get_hw_state(struct drm_i91
                enum plane_id plane_id;
                u8 slices;
  
 -              skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
 +              memset(&crtc_state->wm.skl.optimal, 0,
 +                     sizeof(crtc_state->wm.skl.optimal));
 +              if (crtc_state->hw.active)
 +                      skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
                crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
  
                memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
                        struct skl_ddb_entry *ddb_y =
                                &crtc_state->wm.skl.plane_ddb_y[plane_id];
  
 +                      if (!crtc_state->hw.active)
 +                              continue;
 +
                        skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
                                                   plane_id, ddb, ddb_y);
  
@@@ -6705,7 -6677,7 +6705,7 @@@ static void ilk_pipe_wm_get_hw_state(st
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 -      struct ilk_wm_values *hw = &dev_priv->wm.hw;
 +      struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
        struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
        struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
        enum pipe pipe = crtc->pipe;
@@@ -6853,7 -6825,7 +6853,7 @@@ static void vlv_read_wm_values(struct d
  
  void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
  {
 -      struct g4x_wm_values *wm = &dev_priv->wm.g4x;
 +      struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
        struct intel_crtc *crtc;
  
        g4x_read_wm_values(dev_priv, wm);
@@@ -6947,7 -6919,7 +6947,7 @@@ void g4x_wm_sanitize(struct drm_i915_pr
        struct intel_plane *plane;
        struct intel_crtc *crtc;
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
  
        for_each_intel_plane(&dev_priv->drm, plane) {
                struct intel_crtc *crtc =
  
        g4x_program_watermarks(dev_priv);
  
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  }
  
  void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
  {
 -      struct vlv_wm_values *wm = &dev_priv->wm.vlv;
 +      struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
        struct intel_crtc *crtc;
        u32 val;
  
                        drm_dbg_kms(&dev_priv->drm,
                                    "Punit not acking DDR DVFS request, "
                                    "assuming DDR DVFS is disabled\n");
 -                      dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
 +                      dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
                } else {
                        val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
                        if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@@ -7103,7 -7075,7 +7103,7 @@@ void vlv_wm_sanitize(struct drm_i915_pr
        struct intel_plane *plane;
        struct intel_crtc *crtc;
  
 -      mutex_lock(&dev_priv->wm.wm_mutex);
 +      mutex_lock(&dev_priv->display.wm.wm_mutex);
  
        for_each_intel_plane(&dev_priv->drm, plane) {
                struct intel_crtc *crtc =
  
        vlv_program_watermarks(dev_priv);
  
 -      mutex_unlock(&dev_priv->wm.wm_mutex);
 +      mutex_unlock(&dev_priv->display.wm.wm_mutex);
  }
  
  /*
@@@ -7165,7 -7137,7 +7165,7 @@@ static void ilk_init_lp_watermarks(stru
  
  void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
  {
 -      struct ilk_wm_values *hw = &dev_priv->wm.hw;
 +      struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
        struct intel_crtc *crtc;
  
        ilk_init_lp_watermarks(dev_priv);
                !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
  }
  
 +void intel_wm_state_verify(struct intel_crtc *crtc,
 +                         struct intel_crtc_state *new_crtc_state)
 +{
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      struct skl_hw_state {
 +              struct skl_ddb_entry ddb[I915_MAX_PLANES];
 +              struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
 +              struct skl_pipe_wm wm;
 +      } *hw;
 +      const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
 +      int level, max_level = ilk_wm_max_level(dev_priv);
 +      struct intel_plane *plane;
 +      u8 hw_enabled_slices;
 +
 +      if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
 +              return;
 +
 +      hw = kzalloc(sizeof(*hw), GFP_KERNEL);
 +      if (!hw)
 +              return;
 +
 +      skl_pipe_wm_get_hw_state(crtc, &hw->wm);
 +
 +      skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
 +
 +      hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
 +
 +      if (DISPLAY_VER(dev_priv) >= 11 &&
 +          hw_enabled_slices != dev_priv->dbuf.enabled_slices)
 +              drm_err(&dev_priv->drm,
 +                      "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
 +                      dev_priv->dbuf.enabled_slices,
 +                      hw_enabled_slices);
 +
 +      for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
 +              const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
 +              const struct skl_wm_level *hw_wm_level, *sw_wm_level;
 +
 +              /* Watermarks */
 +              for (level = 0; level <= max_level; level++) {
 +                      hw_wm_level = &hw->wm.planes[plane->id].wm[level];
 +                      sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
 +
 +                      if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
 +                              continue;
 +
 +                      drm_err(&dev_priv->drm,
 +                              "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
 +                              plane->base.base.id, plane->base.name, level,
 +                              sw_wm_level->enable,
 +                              sw_wm_level->blocks,
 +                              sw_wm_level->lines,
 +                              hw_wm_level->enable,
 +                              hw_wm_level->blocks,
 +                              hw_wm_level->lines);
 +              }
 +
 +              hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
 +              sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
 +
 +              if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
 +                      drm_err(&dev_priv->drm,
 +                              "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
 +                              plane->base.base.id, plane->base.name,
 +                              sw_wm_level->enable,
 +                              sw_wm_level->blocks,
 +                              sw_wm_level->lines,
 +                              hw_wm_level->enable,
 +                              hw_wm_level->blocks,
 +                              hw_wm_level->lines);
 +              }
 +
 +              hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
 +              sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
 +
 +              if (HAS_HW_SAGV_WM(dev_priv) &&
 +                  !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
 +                      drm_err(&dev_priv->drm,
 +                              "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
 +                              plane->base.base.id, plane->base.name,
 +                              sw_wm_level->enable,
 +                              sw_wm_level->blocks,
 +                              sw_wm_level->lines,
 +                              hw_wm_level->enable,
 +                              hw_wm_level->blocks,
 +                              hw_wm_level->lines);
 +              }
 +
 +              hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
 +              sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
 +
 +              if (HAS_HW_SAGV_WM(dev_priv) &&
 +                  !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
 +                      drm_err(&dev_priv->drm,
 +                              "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
 +                              plane->base.base.id, plane->base.name,
 +                              sw_wm_level->enable,
 +                              sw_wm_level->blocks,
 +                              sw_wm_level->lines,
 +                              hw_wm_level->enable,
 +                              hw_wm_level->blocks,
 +                              hw_wm_level->lines);
 +              }
 +
 +              /* DDB */
 +              hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
 +              sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
 +
 +              if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
 +                      drm_err(&dev_priv->drm,
 +                              "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
 +                              plane->base.base.id, plane->base.name,
 +                              sw_ddb_entry->start, sw_ddb_entry->end,
 +                              hw_ddb_entry->start, hw_ddb_entry->end);
 +              }
 +      }
 +
 +      kfree(hw);
 +}
 +
  void intel_enable_ipc(struct drm_i915_private *dev_priv)
  {
        u32 val;
@@@ -7614,9 -7466,8 +7614,8 @@@ static void icl_init_clock_gating(struc
  
  static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
  {
-       /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */
-       if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
-           IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv))
+       /* Wa_1409120013 */
+       if (DISPLAY_VER(dev_priv) == 12)
                intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
                                   DPFC_CHICKEN_COMP_DUMMY_PIXEL);
  
@@@ -8196,18 -8047,18 +8195,18 @@@ void intel_init_clock_gating_hooks(stru
        }
  }
  
 -static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
 +static const struct intel_wm_funcs skl_wm_funcs = {
        .compute_global_watermarks = skl_compute_wm,
  };
  
 -static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
 +static const struct intel_wm_funcs ilk_wm_funcs = {
        .compute_pipe_wm = ilk_compute_pipe_wm,
        .compute_intermediate_wm = ilk_compute_intermediate_wm,
        .initial_watermarks = ilk_initial_watermarks,
        .optimize_watermarks = ilk_optimize_watermarks,
  };
  
 -static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
 +static const struct intel_wm_funcs vlv_wm_funcs = {
        .compute_pipe_wm = vlv_compute_pipe_wm,
        .compute_intermediate_wm = vlv_compute_intermediate_wm,
        .initial_watermarks = vlv_initial_watermarks,
        .atomic_update_watermarks = vlv_atomic_update_fifo,
  };
  
 -static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
 +static const struct intel_wm_funcs g4x_wm_funcs = {
        .compute_pipe_wm = g4x_compute_pipe_wm,
        .compute_intermediate_wm = g4x_compute_intermediate_wm,
        .initial_watermarks = g4x_initial_watermarks,
        .optimize_watermarks = g4x_optimize_watermarks,
  };
  
 -static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
 +static const struct intel_wm_funcs pnv_wm_funcs = {
        .update_wm = pnv_update_wm,
  };
  
 -static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
 +static const struct intel_wm_funcs i965_wm_funcs = {
        .update_wm = i965_update_wm,
  };
  
 -static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
 +static const struct intel_wm_funcs i9xx_wm_funcs = {
        .update_wm = i9xx_update_wm,
  };
  
 -static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
 +static const struct intel_wm_funcs i845_wm_funcs = {
        .update_wm = i845_update_wm,
  };
  
 -static const struct drm_i915_wm_disp_funcs nop_funcs = {
 +static const struct intel_wm_funcs nop_funcs = {
  };
  
  /* Set up chip specific power management-related functions */
@@@ -8255,27 -8106,27 +8254,27 @@@ void intel_init_pm(struct drm_i915_priv
        /* For FIFO watermark updates */
        if (DISPLAY_VER(dev_priv) >= 9) {
                skl_setup_wm_latency(dev_priv);
 -              dev_priv->wm_disp = &skl_wm_funcs;
 +              dev_priv->display.funcs.wm = &skl_wm_funcs;
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                ilk_setup_wm_latency(dev_priv);
  
 -              if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] &&
 -                   dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
 -                  (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
 -                   dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
 -                      dev_priv->wm_disp = &ilk_wm_funcs;
 +              if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
 +                   dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
 +                  (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
 +                   dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
 +                      dev_priv->display.funcs.wm = &ilk_wm_funcs;
                } else {
                        drm_dbg_kms(&dev_priv->drm,
                                    "Failed to read display plane latency. "
                                    "Disable CxSR\n");
 -                      dev_priv->wm_disp = &nop_funcs;
 +                      dev_priv->display.funcs.wm = &nop_funcs;
                }
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                vlv_setup_wm_latency(dev_priv);
 -              dev_priv->wm_disp = &vlv_wm_funcs;
 +              dev_priv->display.funcs.wm = &vlv_wm_funcs;
        } else if (IS_G4X(dev_priv)) {
                g4x_setup_wm_latency(dev_priv);
 -              dev_priv->wm_disp = &g4x_wm_funcs;
 +              dev_priv->display.funcs.wm = &g4x_wm_funcs;
        } else if (IS_PINEVIEW(dev_priv)) {
                if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
                                            dev_priv->is_ddr3,
                                 dev_priv->fsb_freq, dev_priv->mem_freq);
                        /* Disable CxSR and never update its watermark again */
                        intel_set_memory_cxsr(dev_priv, false);
 -                      dev_priv->wm_disp = &nop_funcs;
 +                      dev_priv->display.funcs.wm = &nop_funcs;
                } else
 -                      dev_priv->wm_disp = &pnv_wm_funcs;
 +                      dev_priv->display.funcs.wm = &pnv_wm_funcs;
        } else if (DISPLAY_VER(dev_priv) == 4) {
 -              dev_priv->wm_disp = &i965_wm_funcs;
 +              dev_priv->display.funcs.wm = &i965_wm_funcs;
        } else if (DISPLAY_VER(dev_priv) == 3) {
 -              dev_priv->wm_disp = &i9xx_wm_funcs;
 +              dev_priv->display.funcs.wm = &i9xx_wm_funcs;
        } else if (DISPLAY_VER(dev_priv) == 2) {
                if (INTEL_NUM_PIPES(dev_priv) == 1)
 -                      dev_priv->wm_disp = &i845_wm_funcs;
 +                      dev_priv->display.funcs.wm = &i845_wm_funcs;
                else
 -                      dev_priv->wm_disp = &i9xx_wm_funcs;
 +                      dev_priv->display.funcs.wm = &i9xx_wm_funcs;
        } else {
                drm_err(&dev_priv->drm,
                        "unexpected fall-through in %s\n", __func__);
 -              dev_priv->wm_disp = &nop_funcs;
 +              dev_priv->display.funcs.wm = &nop_funcs;
        }
  }
  
index 9b81b2543ce22bc0ea52be668a4d3c5aed3ddbe3,5d3405cb593002412843cdae08ed82e8ab67f6e2..5cd423c7b64649f4e19f26d9e78780e70ce85a12
@@@ -21,6 -21,7 +21,7 @@@
   * IN THE SOFTWARE.
   */
  
+ #include <drm/drm_managed.h>
  #include <linux/pm_runtime.h>
  
  #include "gt/intel_engine_regs.h"
@@@ -44,29 -45,47 +45,47 @@@ fw_domains_get(struct intel_uncore *unc
  }
  
  void
- intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
+ intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
  {
-       spin_lock_init(&mmio_debug->lock);
-       mmio_debug->unclaimed_mmio_check = 1;
+       spin_lock_init(&i915->mmio_debug.lock);
+       i915->mmio_debug.unclaimed_mmio_check = 1;
+       i915->uncore.debug = &i915->mmio_debug;
  }
  
- static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
+ static void mmio_debug_suspend(struct intel_uncore *uncore)
  {
-       lockdep_assert_held(&mmio_debug->lock);
+       if (!uncore->debug)
+               return;
+       spin_lock(&uncore->debug->lock);
  
        /* Save and disable mmio debugging for the user bypass */
-       if (!mmio_debug->suspend_count++) {
-               mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
-               mmio_debug->unclaimed_mmio_check = 0;
+       if (!uncore->debug->suspend_count++) {
+               uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
+               uncore->debug->unclaimed_mmio_check = 0;
        }
+       spin_unlock(&uncore->debug->lock);
  }
  
- static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
+ static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
+ static void mmio_debug_resume(struct intel_uncore *uncore)
  {
-       lockdep_assert_held(&mmio_debug->lock);
+       if (!uncore->debug)
+               return;
+       spin_lock(&uncore->debug->lock);
+       if (!--uncore->debug->suspend_count)
+               uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
  
-       if (!--mmio_debug->suspend_count)
-               mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
+       if (check_for_unclaimed_mmio(uncore))
+               drm_info(&uncore->i915->drm,
+                        "Invalid mmio detected during user access\n");
+       spin_unlock(&uncore->debug->lock);
  }
  
  static const char * const forcewake_domain_names[] = {
@@@ -677,9 -696,7 +696,7 @@@ void intel_uncore_forcewake_user_get(st
        spin_lock_irq(&uncore->lock);
        if (!uncore->user_forcewake_count++) {
                intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
-               spin_lock(&uncore->debug->lock);
-               mmio_debug_suspend(uncore->debug);
-               spin_unlock(&uncore->debug->lock);
+               mmio_debug_suspend(uncore);
        }
        spin_unlock_irq(&uncore->lock);
  }
@@@ -695,14 -712,7 +712,7 @@@ void intel_uncore_forcewake_user_put(st
  {
        spin_lock_irq(&uncore->lock);
        if (!--uncore->user_forcewake_count) {
-               spin_lock(&uncore->debug->lock);
-               mmio_debug_resume(uncore->debug);
-               if (check_for_unclaimed_mmio(uncore))
-                       drm_info(&uncore->i915->drm,
-                                "Invalid mmio detected during user access\n");
-               spin_unlock(&uncore->debug->lock);
+               mmio_debug_resume(uncore);
                intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
        }
        spin_unlock_irq(&uncore->lock);
@@@ -918,6 -928,9 +928,9 @@@ find_fw_domain(struct intel_uncore *unc
  {
        const struct intel_forcewake_range *entry;
  
+       if (IS_GSI_REG(offset))
+               offset += uncore->gsi_offset;
        entry = BSEARCH(offset,
                        uncore->fw_domains_table,
                        uncore->fw_domains_table_entries,
@@@ -1133,6 -1146,9 +1146,9 @@@ static bool is_shadowed(struct intel_un
        if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
                return false;
  
+       if (IS_GSI_REG(offset))
+               offset += uncore->gsi_offset;
        return BSEARCH(offset,
                       uncore->shadowed_reg_table,
                       uncore->shadowed_reg_table_entries,
@@@ -1704,7 -1720,7 +1720,7 @@@ unclaimed_reg_debug(struct intel_uncor
                    const bool read,
                    const bool before)
  {
-       if (likely(!uncore->i915->params.mmio_debug))
+       if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
                return;
  
        /* interrupts are disabled and re-enabled around uncore->lock usage */
@@@ -1985,8 -2001,8 +2001,8 @@@ static int __fw_domain_init(struct inte
  
        d->uncore = uncore;
        d->wake_count = 0;
-       d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
-       d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
+       d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
+       d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
  
        d->id = domain_id;
  
@@@ -2070,7 -2086,7 +2086,7 @@@ static int intel_uncore_fw_domains_init
  
        if (GRAPHICS_VER(i915) >= 11) {
                /* we'll prune the domains of missing engines later */
 -              intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
 +              intel_engine_mask_t emask = RUNTIME_INFO(i915)->platform_engine_mask;
                int i;
  
                uncore->fw_get_funcs = &uncore_get_fallback;
@@@ -2223,6 -2239,11 +2239,11 @@@ static int i915_pmic_bus_access_notifie
        return NOTIFY_OK;
  }
  
+ static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
+ {
+       iounmap(regs);
+ }
  int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
  {
        struct drm_i915_private *i915 = uncore->i915;
                return -EIO;
        }
  
-       return 0;
- }
- void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
- {
-       iounmap(uncore->regs);
+       return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
  }
  
  void intel_uncore_init_early(struct intel_uncore *uncore,
        uncore->i915 = gt->i915;
        uncore->gt = gt;
        uncore->rpm = &gt->i915->runtime_pm;
-       uncore->debug = &gt->i915->mmio_debug;
  }
  
  static void uncore_raw_init(struct intel_uncore *uncore)
@@@ -2446,8 -2461,11 +2461,11 @@@ void intel_uncore_prune_engine_fw_domai
        }
  }
  
- void intel_uncore_fini_mmio(struct intel_uncore *uncore)
+ /* Called via drm-managed action */
+ void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
  {
+       struct intel_uncore *uncore = data;
        if (intel_uncore_has_forcewake(uncore)) {
                iosf_mbi_punit_acquire();
                iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
@@@ -2577,6 -2595,9 +2595,9 @@@ bool intel_uncore_unclaimed_mmio(struc
  {
        bool ret;
  
+       if (!uncore->debug)
+               return false;
        spin_lock_irq(&uncore->debug->lock);
        ret = check_for_unclaimed_mmio(uncore);
        spin_unlock_irq(&uncore->debug->lock);
@@@ -2589,6 -2610,9 +2610,9 @@@ intel_uncore_arm_unclaimed_mmio_detecti
  {
        bool ret = false;
  
+       if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
+               return false;
        spin_lock_irq(&uncore->debug->lock);
  
        if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
index e888b5124a07a0d49755fbb35209f87bf1d392e1,b2d1a0f9e7af5bb6a3c29b7ef8a57da2c792831d..4359e8be4101831522a378204bbf20e6763703c5
@@@ -9,10 -9,9 +9,10 @@@
  #include <drm/drm_print.h>
  
  #include "gt/intel_gt_debugfs.h"
 -#include "pxp/intel_pxp.h"
 -#include "pxp/intel_pxp_irq.h"
  #include "i915_drv.h"
 +#include "intel_pxp.h"
 +#include "intel_pxp_debugfs.h"
 +#include "intel_pxp_irq.h"
  
  static int pxp_info_show(struct seq_file *m, void *data)
  {
@@@ -47,9 -46,9 +47,9 @@@ static int pxp_terminate_set(void *data
                return -ENODEV;
  
        /* simulate a termination interrupt */
-       spin_lock_irq(&gt->irq_lock);
+       spin_lock_irq(gt->irq_lock);
        intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
-       spin_unlock_irq(&gt->irq_lock);
+       spin_unlock_irq(gt->irq_lock);
  
        if (!wait_for_completion_timeout(&pxp->termination,
                                         msecs_to_jiffies(100)))
index f5904e659ef265ca8e616feca421f90dd0dc0161,8cd51cea77f0d347f74c518befcca49599db34ea..915d58ba383e85c555490c813df10ca3ad14ede6
@@@ -115,6 -115,7 +115,7 @@@ static struct dev_pm_domain pm_domain 
  static void mock_gt_probe(struct drm_i915_private *i915)
  {
        i915->gt[0] = &i915->gt0;
+       i915->gt[0]->name = "Mock GT";
  }
  
  struct drm_i915_private *mock_gem_device(void)
        /* Using the global GTT may ask questions about KMS users, so prepare */
        drm_mode_config_init(&i915->drm);
  
 -      mkwrite_device_info(i915)->graphics.ver = -1;
 +      RUNTIME_INFO(i915)->graphics.ver = -1;
  
 -      mkwrite_device_info(i915)->page_sizes =
 +      RUNTIME_INFO(i915)->page_sizes =
                I915_GTT_PAGE_SIZE_4K |
                I915_GTT_PAGE_SIZE_64K |
                I915_GTT_PAGE_SIZE_2M;
  
 -      mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
 +      RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
        intel_memory_regions_hw_probe(i915);
  
        spin_lock_init(&i915->gpu_error.lock);
        mock_init_ggtt(to_gt(i915));
        to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
  
 -      mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
 +      RUNTIME_INFO(i915)->platform_engine_mask = BIT(0);
        to_gt(i915)->info.engine_mask = BIT(0);
  
        to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
diff --combined drivers/misc/mei/hbm.c
index cf2b8261da1444470a310f80c4cdc9e9cd4afeb5,291bc0997efeba5b47a567e26fced5b0c3b3df83..de712cbf5d07253ad0fd3ee1177f077e229fe096
@@@ -1,6 -1,6 +1,6 @@@
  // SPDX-License-Identifier: GPL-2.0
  /*
-  * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+  * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
   * Intel Management Engine Interface (Intel MEI) Linux driver
   */
  #include <linux/export.h>
@@@ -232,7 -232,7 +232,7 @@@ int mei_hbm_start_wait(struct mei_devic
        mutex_unlock(&dev->device_lock);
        ret = wait_event_timeout(dev->wait_hbm_start,
                        dev->hbm_state != MEI_HBM_STARTING,
-                       mei_secs_to_jiffies(MEI_HBM_TIMEOUT));
+                       dev->timeouts.hbm);
        mutex_lock(&dev->device_lock);
  
        if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) {
@@@ -275,7 -275,7 +275,7 @@@ int mei_hbm_start_req(struct mei_devic
        }
  
        dev->hbm_state = MEI_HBM_STARTING;
-       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+       dev->init_clients_timer = dev->timeouts.client_init;
        mei_schedule_stall_timer(dev);
        return 0;
  }
@@@ -316,7 -316,7 +316,7 @@@ static int mei_hbm_dma_setup_req(struc
        }
  
        dev->hbm_state = MEI_HBM_DR_SETUP;
-       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+       dev->init_clients_timer = dev->timeouts.client_init;
        mei_schedule_stall_timer(dev);
        return 0;
  }
@@@ -351,7 -351,7 +351,7 @@@ static int mei_hbm_capabilities_req(str
        }
  
        dev->hbm_state = MEI_HBM_CAP_SETUP;
-       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+       dev->init_clients_timer = dev->timeouts.client_init;
        mei_schedule_stall_timer(dev);
        return 0;
  }
@@@ -385,7 -385,7 +385,7 @@@ static int mei_hbm_enum_clients_req(str
                return ret;
        }
        dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
-       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+       dev->init_clients_timer = dev->timeouts.client_init;
        mei_schedule_stall_timer(dev);
        return 0;
  }
@@@ -751,7 -751,7 +751,7 @@@ static int mei_hbm_prop_req(struct mei_
                return ret;
        }
  
-       dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+       dev->init_clients_timer = dev->timeouts.client_init;
        mei_schedule_stall_timer(dev);
  
        return 0;
@@@ -1351,8 -1351,7 +1351,8 @@@ int mei_hbm_dispatch(struct mei_device 
  
                if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
                    dev->hbm_state != MEI_HBM_CAP_SETUP) {
 -                      if (dev->dev_state == MEI_DEV_POWER_DOWN) {
 +                      if (dev->dev_state == MEI_DEV_POWER_DOWN ||
 +                          dev->dev_state == MEI_DEV_POWERING_DOWN) {
                                dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
                                return 0;
                        }
index 15e8e2b322b1a398678555f0a07091c7a1fe84b3,4d5e8af5745b75669d37e37cd7a935b23bafcaa8..99966cd3e7d892ca0f86666614bc7f664e978251
@@@ -1,6 -1,6 +1,6 @@@
  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
  /*
-  * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+  * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
   * Intel Management Engine Interface (Intel MEI) Linux driver
   */
  #ifndef _MEI_HW_MEI_REGS_H_
  #define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
  #define MEI_DEV_ID_ADP_N      0x54E0  /* Alder Lake Point N */
  
 +#define MEI_DEV_ID_RPL_S      0x7A68  /* Raptor Lake Point S */
 +
  /*
   * MEI HW Section
   */
  #  define PCI_CFG_HFS_3_FW_SKU_SPS   0x00000060
  #define PCI_CFG_HFS_4         0x64
  #define PCI_CFG_HFS_5         0x68
+ #  define GSC_CFG_HFS_5_BOOT_TYPE_MSK      0x00000003
+ #  define GSC_CFG_HFS_5_BOOT_TYPE_PXP               3
  #define PCI_CFG_HFS_6         0x6C
  
  /* MEI registers */
  /* H_D0I3C - D0I3 Control  */
  #define H_D0I3C    0x800
  
+ #define H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG 0x100
+ #define H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG 0x104
+ #define H_GSC_EXT_OP_MEM_LIMIT_REG        0x108
+ #define GSC_EXT_OP_MEM_VALID              BIT(31)
  /* register bits of H_CSR (Host Control Status register) */
  /* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
  #define H_CBD             0xFF000000
diff --combined drivers/misc/mei/hw-me.c
index 3a95fe7d4e3306e7c1d8ad079596156ab18b1fe1,bb317cd4f0f3ce7e32eb379bc13766712fdb7f2c..9e2f781c6ed527b779ac5945d1c3c36a188caf65
@@@ -1,6 -1,6 +1,6 @@@
  // SPDX-License-Identifier: GPL-2.0
  /*
-  * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+  * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
   * Intel Management Engine Interface (Intel MEI) Linux driver
   */
  
@@@ -10,6 -10,7 +10,7 @@@
  #include <linux/interrupt.h>
  #include <linux/pm_runtime.h>
  #include <linux/sizes.h>
+ #include <linux/delay.h>
  
  #include "mei_dev.h"
  #include "hbm.h"
@@@ -327,9 -328,12 +328,12 @@@ static void mei_me_intr_clear(struct me
   */
  static void mei_me_intr_enable(struct mei_device *dev)
  {
-       u32 hcsr = mei_hcsr_read(dev);
+       u32 hcsr;
+       if (mei_me_hw_use_polling(to_me_hw(dev)))
+               return;
  
-       hcsr |= H_CSR_IE_MASK;
+       hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
        mei_hcsr_set(dev, hcsr);
  }
  
@@@ -354,6 -358,9 +358,9 @@@ static void mei_me_synchronize_irq(stru
  {
        struct mei_me_hw *hw = to_me_hw(dev);
  
+       if (mei_me_hw_use_polling(hw))
+               return;
        synchronize_irq(hw->irq);
  }
  
@@@ -380,7 -387,10 +387,10 @@@ static void mei_me_host_set_ready(struc
  {
        u32 hcsr = mei_hcsr_read(dev);
  
-       hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
+       if (!mei_me_hw_use_polling(to_me_hw(dev)))
+               hcsr |= H_CSR_IE_MASK;
+       hcsr |=  H_IG | H_RDY;
        mei_hcsr_set(dev, hcsr);
  }
  
@@@ -423,6 -433,29 +433,29 @@@ static bool mei_me_hw_is_resetting(stru
        return (mecsr & ME_RST_HRA) == ME_RST_HRA;
  }
  
+ /**
+  * mei_gsc_pxp_check - check for gsc firmware entering pxp mode
+  *
+  * @dev: the device structure
+  */
+ static void mei_gsc_pxp_check(struct mei_device *dev)
+ {
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 fwsts5 = 0;
+       if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
+               return;
+       hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
+       trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
+       if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
+               dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
+               dev->pxp_mode = MEI_DEV_PXP_READY;
+       } else {
+               dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
+       }
+ }
  /**
   * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
   *  or timeout is reached
@@@ -435,13 -468,15 +468,15 @@@ static int mei_me_hw_ready_wait(struct 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_hw_ready,
                        dev->recvd_hw_ready,
-                       mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
+                       dev->timeouts.hw_ready);
        mutex_lock(&dev->device_lock);
        if (!dev->recvd_hw_ready) {
                dev_err(dev->dev, "wait hw ready failed\n");
                return -ETIME;
        }
  
+       mei_gsc_pxp_check(dev);
        mei_me_hw_reset_release(dev);
        dev->recvd_hw_ready = false;
        return 0;
@@@ -561,7 -596,7 +596,7 @@@ static int mei_me_hbuf_write(struct mei
        dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
  
        empty_slots = mei_hbuf_empty_slots(dev);
 -      dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
 +      dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots);
  
        if (empty_slots < 0)
                return -EOVERFLOW;
@@@ -697,7 -732,6 +732,6 @@@ static void mei_me_pg_unset(struct mei_
  static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
  {
        struct mei_me_hw *hw = to_me_hw(dev);
-       unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
        int ret;
  
        dev->pg_event = MEI_PG_EVENT_WAIT;
  
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+               dev->pg_event == MEI_PG_EVENT_RECEIVED,
+               dev->timeouts.pgi);
        mutex_lock(&dev->device_lock);
  
        if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
  static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
  {
        struct mei_me_hw *hw = to_me_hw(dev);
-       unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
        int ret;
  
        if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
  
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+               dev->pg_event == MEI_PG_EVENT_RECEIVED,
+               dev->timeouts.pgi);
        mutex_lock(&dev->device_lock);
  
  reply:
  
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+               dev->timeouts.pgi);
        mutex_lock(&dev->device_lock);
  
        if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
@@@ -877,8 -913,6 +913,6 @@@ static u32 mei_me_d0i3_unset(struct mei
  static int mei_me_d0i3_enter_sync(struct mei_device *dev)
  {
        struct mei_me_hw *hw = to_me_hw(dev);
-       unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
-       unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
        int ret;
        u32 reg;
  
  
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
+               dev->pg_event == MEI_PG_EVENT_RECEIVED,
+               dev->timeouts.pgi);
        mutex_lock(&dev->device_lock);
  
        if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
  
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
+               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+               dev->timeouts.d0i3);
        mutex_lock(&dev->device_lock);
  
        if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
@@@ -980,7 -1016,6 +1016,6 @@@ on
  static int mei_me_d0i3_exit_sync(struct mei_device *dev)
  {
        struct mei_me_hw *hw = to_me_hw(dev);
-       unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
        int ret;
        u32 reg;
  
  
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(dev->wait_pg,
-               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+               dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+               dev->timeouts.d0i3);
        mutex_lock(&dev->device_lock);
  
        if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
@@@ -1154,8 -1190,6 +1190,8 @@@ static int mei_me_hw_reset(struct mei_d
                        ret = mei_me_d0i3_exit_sync(dev);
                        if (ret)
                                return ret;
 +              } else {
 +                      hw->pg_state = MEI_PG_OFF;
                }
        }
  
  
        hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
  
-       if (!intr_enable)
+       if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
                hcsr &= ~H_CSR_IE_MASK;
  
        dev->recvd_hw_ready = false;
@@@ -1259,7 -1293,8 +1295,8 @@@ irqreturn_t mei_me_irq_thread_handler(i
  
        /* check if ME wants a reset */
        if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
-               dev_warn(dev->dev, "FW not ready: resetting.\n");
+               dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n",
+                        dev->dev_state, dev->pxp_mode);
                if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
                    dev->dev_state == MEI_DEV_POWER_DOWN)
                        mei_cl_all_disconnect(dev);
  }
  EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
  
+ #define MEI_POLLING_TIMEOUT_ACTIVE 100
+ #define MEI_POLLING_TIMEOUT_IDLE   500
+ /**
+  * mei_me_polling_thread - interrupt register polling thread
+  *
+  * The thread monitors the interrupt source register and calls
+  * mei_me_irq_thread_handler() to handle the firmware
+  * input.
+  *
+  * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
+  * in case there was an event, in idle case the polling
+  * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
+  * up to MEI_POLLING_TIMEOUT_IDLE.
+  *
+  * @_dev: mei device
+  *
+  * Return: always 0
+  */
+ int mei_me_polling_thread(void *_dev)
+ {
+       struct mei_device *dev = _dev;
+       irqreturn_t irq_ret;
+       long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+       dev_dbg(dev->dev, "kernel thread is running\n");
+       while (!kthread_should_stop()) {
+               struct mei_me_hw *hw = to_me_hw(dev);
+               u32 hcsr;
+               wait_event_timeout(hw->wait_active,
+                                  hw->is_active || kthread_should_stop(),
+                                  msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
+               if (kthread_should_stop())
+                       break;
+               hcsr = mei_hcsr_read(dev);
+               if (me_intr_src(hcsr)) {
+                       polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+                       irq_ret = mei_me_irq_thread_handler(1, dev);
+                       if (irq_ret != IRQ_HANDLED)
+                               dev_err(dev->dev, "irq_ret %d\n", irq_ret);
+               } else {
+                       /*
+                        * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
+                        * up to MEI_POLLING_TIMEOUT_IDLE
+                        */
+                       polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
+                                                   MEI_POLLING_TIMEOUT_ACTIVE,
+                                                   MEI_POLLING_TIMEOUT_IDLE);
+               }
+               schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
+       }
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(mei_me_polling_thread);
  static const struct mei_hw_ops mei_me_hw_ops = {
  
        .trc_status = mei_me_trc_status,
@@@ -1636,11 -1731,12 +1733,12 @@@ EXPORT_SYMBOL_GPL(mei_me_get_cfg)
   *
   * @parent: device associated with physical device (pci/platform)
   * @cfg: per device generation config
+  * @slow_fw: configure longer timeouts as FW is slow
   *
   * Return: The mei_device pointer on success, NULL on failure.
   */
  struct mei_device *mei_me_dev_init(struct device *parent,
-                                  const struct mei_cfg *cfg)
+                                  const struct mei_cfg *cfg, bool slow_fw)
  {
        struct mei_device *dev;
        struct mei_me_hw *hw;
        for (i = 0; i < DMA_DSCR_NUM; i++)
                dev->dr_dscr[i].size = cfg->dma_size[i];
  
-       mei_device_init(dev, parent, &mei_me_hw_ops);
+       mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
        hw->cfg = cfg;
  
        dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
index 5435604327a71e9077d242c30386af505905f9e3,47e1e1ec3d1b1552a7d3f4c75247f68eb424c02b..704cd0caa172caec9bbd459dbe15ae7db879364c
@@@ -1,6 -1,6 +1,6 @@@
  // SPDX-License-Identifier: GPL-2.0
  /*
-  * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+  * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
   * Intel Management Engine Interface (Intel MEI) Linux driver
   */
  
@@@ -116,8 -116,6 +116,8 @@@ static const struct pci_device_id mei_m
        {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
  
 +      {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
 +
        /* required last entry */
        {0, }
  };
@@@ -203,7 -201,7 +203,7 @@@ static int mei_me_probe(struct pci_dev 
        }
  
        /* allocates and initializes the mei dev structure */
-       dev = mei_me_dev_init(&pdev->dev, cfg);
+       dev = mei_me_dev_init(&pdev->dev, cfg, false);
        if (!dev) {
                err = -ENOMEM;
                goto end;