Merge tag 'iommu-updates-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Jan 2019 23:55:29 +0000 (15:55 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Jan 2019 23:55:29 +0000 (15:55 -0800)
Pull IOMMU updates from Joerg Roedel:

 - Page table code for AMD IOMMU now supports large pages where smaller
   page-sizes were mapped before. VFIO had to work around that in the
   past and I included a patch to remove it (acked by Alex Williamson)

 - Patches to unmodularize a couple of IOMMU drivers that would never
   work as modules anyway.

 - Work to unify the the iommu-related pointers in 'struct device' into
   one pointer. This work is not finished yet, but will probably be in
   the next cycle.

 - NUMA aware allocation in iommu-dma code

 - Support for r8a774a1 and r8a774c0 in the Renesas IOMMU driver

 - Scalable mode support for the Intel VT-d driver

 - PM runtime improvements for the ARM-SMMU driver

 - Support for the QCOM-SMMUv2 IOMMU hardware from Qualcom

 - Various smaller fixes and improvements

* tag 'iommu-updates-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (78 commits)
  iommu: Check for iommu_ops == NULL in iommu_probe_device()
  ACPI/IORT: Don't call iommu_ops->add_device directly
  iommu/of: Don't call iommu_ops->add_device directly
  iommu: Consolitate ->add/remove_device() calls
  iommu/sysfs: Rename iommu_release_device()
  dmaengine: sh: rcar-dmac: Use device_iommu_mapped()
  xhci: Use device_iommu_mapped()
  powerpc/iommu: Use device_iommu_mapped()
  ACPI/IORT: Use device_iommu_mapped()
  iommu/of: Use device_iommu_mapped()
  driver core: Introduce device_iommu_mapped() function
  iommu/tegra: Use helper functions to access dev->iommu_fwspec
  iommu/qcom: Use helper functions to access dev->iommu_fwspec
  iommu/of: Use helper functions to access dev->iommu_fwspec
  iommu/mediatek: Use helper functions to access dev->iommu_fwspec
  iommu/ipmmu-vmsa: Use helper functions to access dev->iommu_fwspec
  iommu/dma: Use helper functions to access dev->iommu_fwspec
  iommu/arm-smmu: Use helper functions to access dev->iommu_fwspec
  ACPI/IORT: Use helper functions to access dev->iommu_fwspec
  iommu: Introduce wrappers around dev->iommu_fwspec
  ...

13 files changed:
1  2 
Documentation/admin-guide/kernel-parameters.txt
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/iommu.c
drivers/acpi/arm64/iort.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/iommu/amd_iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/misc/mic/scif/scif_rma.h
drivers/usb/host/xhci.c

index 37e235be1d3516392063d8a896d2eeb2b103f902,70384d8682eaac66fab163178d320ffe144a0e1a..408781ee142c9f0242520e9e240400fc35ca8e58
                        APC and your system crashes randomly.
  
        apic=           [APIC,X86] Advanced Programmable Interrupt Controller
 -                      Change the output verbosity whilst booting
 +                      Change the output verbosity while booting
                        Format: { quiet (default) | verbose | debug }
                        Change the amount of debugging information output
                        when initialising the APIC and IO-APIC components.
                        cut the overhead, others just disable the usage. So
                        only cgroup_disable=memory is actually worthy}
  
 -      cgroup_no_v1=   [KNL] Disable one, multiple, all cgroup controllers in v1
 -                      Format: { controller[,controller...] | "all" }
 +      cgroup_no_v1=   [KNL] Disable cgroup controllers and named hierarchies in v1
 +                      Format: { { controller | "all" | "named" }
 +                                [,{ controller | "all" | "named" }...] }
                        Like cgroup_disable, but only applies to cgroup v1;
                        the blacklisted controllers remain available in cgroup2.
 +                      "all" blacklists all controllers and "named" disables
 +                      named mounts. Specifying both "all" and "named" disables
 +                      all v1 hierarchies.
  
        cgroup.memory=  [KNL] Pass options to the cgroup memory controller.
                        Format: <string>
        cpuidle.off=1   [CPU_IDLE]
                        disable the cpuidle sub-system
  
 +      cpuidle.governor=
 +                      [CPU_IDLE] Name of the cpuidle governor to use.
 +
        cpufreq.off=1   [CPU_FREQ]
                        disable the cpufreq sub-system
  
                        By default, super page will be supported if Intel IOMMU
                        has the capability. With this option, super page will
                        not be supported.
-               ecs_off [Default Off]
-                       By default, extended context tables will be supported if
-                       the hardware advertises that it has support both for the
-                       extended tables themselves, and also PASID support. With
-                       this option set, extended tables will not be used even
-                       on hardware which claims to support them.
+               sm_off [Default Off]
+                       By default, scalable mode will be supported if the
+                       hardware advertises that it has support for the scalable
+                       mode translation. With this option set, scalable mode
+                       will not be used even on hardware which claims to support
+                       it.
                tboot_noforce [Default Off]
                        Do not force the Intel IOMMU enabled under tboot.
                        By default, tboot will force Intel IOMMU on, which
                        off
                                Disables hypervisor mitigations and doesn't
                                emit any warnings.
 +                              It also drops the swap size and available
 +                              RAM limit restriction on both hypervisor and
 +                              bare metal.
  
                        Default is 'flush'.
  
                        check bypass). With this option data leaks are possible
                        in the system.
  
 -      nospectre_v2    [X86] Disable all mitigations for the Spectre variant 2
 +      nospectre_v2    [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
                        (indirect branch prediction) vulnerability. System may
                        allow data leaks with this option, which is equivalent
                        to spectre_v2=off.
                        in microseconds.  The default of zero says
                        no holdoff.
  
 -      rcutorture.cbflood_inter_holdoff= [KNL]
 -                      Set holdoff time (jiffies) between successive
 -                      callback-flood tests.
 -
 -      rcutorture.cbflood_intra_holdoff= [KNL]
 -                      Set holdoff time (jiffies) between successive
 -                      bursts of callbacks within a given callback-flood
 -                      test.
 -
 -      rcutorture.cbflood_n_burst= [KNL]
 -                      Set the number of bursts making up a given
 -                      callback-flood test.  Set this to zero to
 -                      disable callback-flood testing.
 -
 -      rcutorture.cbflood_n_per_burst= [KNL]
 -                      Set the number of callbacks to be registered
 -                      in a given burst of a callback-flood test.
 -
        rcutorture.fqs_duration= [KNL]
                        Set duration of force_quiescent_state bursts
                        in microseconds.
                        Set wait time between force_quiescent_state bursts
                        in seconds.
  
 +      rcutorture.fwd_progress= [KNL]
 +                      Enable RCU grace-period forward-progress testing
 +                      for the types of RCU supporting this notion.
 +
 +      rcutorture.fwd_progress_div= [KNL]
 +                      Specify the fraction of a CPU-stall-warning
 +                      period to do tight-loop forward-progress testing.
 +
 +      rcutorture.fwd_progress_holdoff= [KNL]
 +                      Number of seconds to wait between successive
 +                      forward-progress tests.
 +
 +      rcutorture.fwd_progress_need_resched= [KNL]
 +                      Enclose cond_resched() calls within checks for
 +                      need_resched() during tight-loop forward-progress
 +                      testing.
 +
        rcutorture.gp_cond= [KNL]
                        Use conditional/asynchronous update-side
                        primitives, if available.
index 3230137469abda80e759f6a7824fbe584b21e2c6,23fe62f11486e0109c35c243d31fb0b62aeb5a75..ae05203eb4de67ef69e5673d4111ac646049c348
@@@ -1472,7 -1472,7 +1472,7 @@@ static int dev_has_iommu_table(struct d
        if (!dev)
                return 0;
  
-       if (dev->iommu_group) {
+       if (device_iommu_mapped(dev)) {
                *ppdev = pdev;
                return 1;
        }
@@@ -1808,10 -1808,10 +1808,10 @@@ static int eeh_freeze_dbgfs_get(void *d
        return 0;
  }
  
 -DEFINE_SIMPLE_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
 -                      eeh_enable_dbgfs_set, "0x%llx\n");
 -DEFINE_SIMPLE_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get,
 -                      eeh_freeze_dbgfs_set, "0x%llx\n");
 +DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
 +                       eeh_enable_dbgfs_set, "0x%llx\n");
 +DEFINE_DEBUGFS_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get,
 +                       eeh_freeze_dbgfs_set, "0x%llx\n");
  #endif
  
  static int __init eeh_init_proc(void)
        if (machine_is(pseries) || machine_is(powernv)) {
                proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show);
  #ifdef CONFIG_DEBUG_FS
 -              debugfs_create_file("eeh_enable", 0600,
 -                                    powerpc_debugfs_root, NULL,
 -                                    &eeh_enable_dbgfs_ops);
 -              debugfs_create_file("eeh_max_freezes", 0600,
 -                                  powerpc_debugfs_root, NULL,
 -                                  &eeh_freeze_dbgfs_ops);
 +              debugfs_create_file_unsafe("eeh_enable", 0600,
 +                                         powerpc_debugfs_root, NULL,
 +                                         &eeh_enable_dbgfs_ops);
 +              debugfs_create_file_unsafe("eeh_max_freezes", 0600,
 +                                         powerpc_debugfs_root, NULL,
 +                                         &eeh_freeze_dbgfs_ops);
  #endif
        }
  
index d0625480b59e5cd00b9381ba191988ed07b65c26,48d58d1dcac2d30e38d29884b51a368b45e52294..33bbd59cff792a8a981ef3775095e4a081e39aeb
@@@ -47,7 -47,6 +47,7 @@@
  #include <asm/fadump.h>
  #include <asm/vio.h>
  #include <asm/tce.h>
 +#include <asm/mmu_context.h>
  
  #define DBG(...)
  
@@@ -198,11 -197,11 +198,11 @@@ static unsigned long iommu_range_alloc(
        if (unlikely(npages == 0)) {
                if (printk_ratelimit())
                        WARN_ON(1);
 -              return IOMMU_MAPPING_ERROR;
 +              return DMA_MAPPING_ERROR;
        }
  
        if (should_fail_iommu(dev))
 -              return IOMMU_MAPPING_ERROR;
 +              return DMA_MAPPING_ERROR;
  
        /*
         * We don't need to disable preemption here because any CPU can
@@@ -278,7 -277,7 +278,7 @@@ again
                } else {
                        /* Give up */
                        spin_unlock_irqrestore(&(pool->lock), flags);
 -                      return IOMMU_MAPPING_ERROR;
 +                      return DMA_MAPPING_ERROR;
                }
        }
  
@@@ -310,13 -309,13 +310,13 @@@ static dma_addr_t iommu_alloc(struct de
                              unsigned long attrs)
  {
        unsigned long entry;
 -      dma_addr_t ret = IOMMU_MAPPING_ERROR;
 +      dma_addr_t ret = DMA_MAPPING_ERROR;
        int build_fail;
  
        entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
  
 -      if (unlikely(entry == IOMMU_MAPPING_ERROR))
 -              return IOMMU_MAPPING_ERROR;
 +      if (unlikely(entry == DMA_MAPPING_ERROR))
 +              return DMA_MAPPING_ERROR;
  
        entry += tbl->it_offset;        /* Offset into real TCE table */
        ret = entry << tbl->it_page_shift;      /* Set the return dma address */
  
        /* tbl->it_ops->set() only returns non-zero for transient errors.
         * Clean up the table bitmap in this case and return
 -       * IOMMU_MAPPING_ERROR. For all other errors the functionality is
 +       * DMA_MAPPING_ERROR. For all other errors the functionality is
         * not altered.
         */
        if (unlikely(build_fail)) {
                __iommu_free(tbl, ret, npages);
 -              return IOMMU_MAPPING_ERROR;
 +              return DMA_MAPPING_ERROR;
        }
  
        /* Flush/invalidate TLB caches if necessary */
@@@ -478,7 -477,7 +478,7 @@@ int ppc_iommu_map_sg(struct device *dev
                DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
  
                /* Handle failure */
 -              if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
 +              if (unlikely(entry == DMA_MAPPING_ERROR)) {
                        if (!(attrs & DMA_ATTR_NO_WARN) &&
                            printk_ratelimit())
                                dev_info(dev, "iommu_alloc failed, tbl %p "
         */
        if (outcount < incount) {
                outs = sg_next(outs);
 -              outs->dma_address = IOMMU_MAPPING_ERROR;
 +              outs->dma_address = DMA_MAPPING_ERROR;
                outs->dma_length = 0;
        }
  
                        npages = iommu_num_pages(s->dma_address, s->dma_length,
                                                 IOMMU_PAGE_SIZE(tbl));
                        __iommu_free(tbl, vaddr, npages);
 -                      s->dma_address = IOMMU_MAPPING_ERROR;
 +                      s->dma_address = DMA_MAPPING_ERROR;
                        s->dma_length = 0;
                }
                if (s == outs)
@@@ -777,7 -776,7 +777,7 @@@ dma_addr_t iommu_map_page(struct devic
                          unsigned long mask, enum dma_data_direction direction,
                          unsigned long attrs)
  {
 -      dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
 +      dma_addr_t dma_handle = DMA_MAPPING_ERROR;
        void *vaddr;
        unsigned long uaddr;
        unsigned int npages, align;
                dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
                                         mask >> tbl->it_page_shift, align,
                                         attrs);
 -              if (dma_handle == IOMMU_MAPPING_ERROR) {
 +              if (dma_handle == DMA_MAPPING_ERROR) {
                        if (!(attrs & DMA_ATTR_NO_WARN) &&
                            printk_ratelimit())  {
                                dev_info(dev, "iommu_alloc failed, tbl %p "
@@@ -869,7 -868,7 +869,7 @@@ void *iommu_alloc_coherent(struct devic
        io_order = get_iommu_order(size, tbl);
        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
                              mask >> tbl->it_page_shift, io_order, 0);
 -      if (mapping == IOMMU_MAPPING_ERROR) {
 +      if (mapping == DMA_MAPPING_ERROR) {
                free_pages((unsigned long)ret, order);
                return NULL;
        }
@@@ -994,19 -993,15 +994,19 @@@ int iommu_tce_check_gpa(unsigned long p
  }
  EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
  
 -long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
 -              unsigned long *hpa, enum dma_data_direction *direction)
 +long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
 +              unsigned long entry, unsigned long *hpa,
 +              enum dma_data_direction *direction)
  {
        long ret;
 +      unsigned long size = 0;
  
        ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
  
        if (!ret && ((*direction == DMA_FROM_DEVICE) ||
 -                      (*direction == DMA_BIDIRECTIONAL)))
 +                      (*direction == DMA_BIDIRECTIONAL)) &&
 +                      !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
 +                                      &size))
                SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
  
        /* if (unlikely(ret))
@@@ -1078,8 -1073,11 +1078,8 @@@ void iommu_release_ownership(struct iom
  }
  EXPORT_SYMBOL_GPL(iommu_release_ownership);
  
 -int iommu_add_device(struct device *dev)
 +int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
  {
 -      struct iommu_table *tbl;
 -      struct iommu_table_group_link *tgl;
 -
        /*
         * The sysfs entries should be populated before
         * binding IOMMU group. If sysfs entries isn't
        if (!device_is_registered(dev))
                return -ENOENT;
  
-       if (dev->iommu_group) {
+       if (device_iommu_mapped(dev)) {
                pr_debug("%s: Skipping device %s with iommu group %d\n",
                         __func__, dev_name(dev),
                         iommu_group_id(dev->iommu_group));
                return -EBUSY;
        }
  
 -      tbl = get_iommu_table_base(dev);
 -      if (!tbl) {
 -              pr_debug("%s: Skipping device %s with no tbl\n",
 -                       __func__, dev_name(dev));
 -              return 0;
 -      }
 -
 -      tgl = list_first_entry_or_null(&tbl->it_group_list,
 -                      struct iommu_table_group_link, next);
 -      if (!tgl) {
 -              pr_debug("%s: Skipping device %s with no group\n",
 -                       __func__, dev_name(dev));
 -              return 0;
 -      }
        pr_debug("%s: Adding %s to iommu group %d\n",
 -               __func__, dev_name(dev),
 -               iommu_group_id(tgl->table_group->group));
 -
 -      if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
 -              pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
 -                     __func__, IOMMU_PAGE_SIZE(tbl),
 -                     PAGE_SIZE, dev_name(dev));
 -              return -EINVAL;
 -      }
 +               __func__, dev_name(dev),  iommu_group_id(table_group->group));
  
 -      return iommu_group_add_device(tgl->table_group->group, dev);
 +      return iommu_group_add_device(table_group->group, dev);
  }
  EXPORT_SYMBOL_GPL(iommu_add_device);
  
@@@ -1109,7 -1129,7 +1109,7 @@@ void iommu_del_device(struct device *de
         * and we needn't detach them from the associated
         * IOMMU groups
         */
-       if (!dev->iommu_group) {
+       if (!device_iommu_mapped(dev)) {
                pr_debug("iommu_tce: skipping device %s with no tbl\n",
                         dev_name(dev));
                return;
        iommu_group_remove_device(dev);
  }
  EXPORT_SYMBOL_GPL(iommu_del_device);
 -
 -static int tce_iommu_bus_notifier(struct notifier_block *nb,
 -                unsigned long action, void *data)
 -{
 -        struct device *dev = data;
 -
 -        switch (action) {
 -        case BUS_NOTIFY_ADD_DEVICE:
 -                return iommu_add_device(dev);
 -        case BUS_NOTIFY_DEL_DEVICE:
 -                if (device_iommu_mapped(dev))
 -                        iommu_del_device(dev);
 -                return 0;
 -        default:
 -                return 0;
 -        }
 -}
 -
 -static struct notifier_block tce_iommu_bus_nb = {
 -        .notifier_call = tce_iommu_bus_notifier,
 -};
 -
 -int __init tce_iommu_bus_notifier_init(void)
 -{
 -        bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
 -        return 0;
 -}
  #endif /* CONFIG_IOMMU_API */
index 2159ad9bf9ed8721733fdfd8f5ee29098b42850a,6d539d0673738378428bae5bba34580cd29e0736..fdd90ffceb85cedd8c8a2049d48be97d66ee9a6e
@@@ -779,7 -779,7 +779,7 @@@ static inline bool iort_iommu_driver_en
  static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
  {
        struct acpi_iort_node *iommu;
-       struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
  
        iommu = iort_get_iort_node(fwspec->iommu_fwnode);
  
        return NULL;
  }
  
- static inline const struct iommu_ops *iort_fwspec_iommu_ops(
-                               struct iommu_fwspec *fwspec)
+ static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
  {
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
        return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
  }
  
@@@ -805,8 -806,8 +806,8 @@@ static inline int iort_add_device_repla
  {
        int err = 0;
  
-       if (ops->add_device && dev->bus && !dev->iommu_group)
-               err = ops->add_device(dev);
+       if (dev->bus && !device_iommu_mapped(dev))
+               err = iommu_probe_device(dev);
  
        return err;
  }
   */
  int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
  {
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
        struct acpi_iort_its_group *its;
        struct acpi_iort_node *iommu_node, *its_node = NULL;
        int i, resv = 0;
         * a given PCI or named component may map IDs to.
         */
  
-       for (i = 0; i < dev->iommu_fwspec->num_ids; i++) {
+       for (i = 0; i < fwspec->num_ids; i++) {
                its_node = iort_node_map_id(iommu_node,
-                                       dev->iommu_fwspec->ids[i],
+                                       fwspec->ids[i],
                                        NULL, IORT_MSI_TYPE);
                if (its_node)
                        break;
        return (resv == its->its_count) ? resv : -ENODEV;
  }
  #else
- static inline const struct iommu_ops *iort_fwspec_iommu_ops(
-                               struct iommu_fwspec *fwspec)
+ static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev);
  { return NULL; }
  static inline int iort_add_device_replay(const struct iommu_ops *ops,
                                         struct device *dev)
@@@ -1045,7 -1046,7 +1046,7 @@@ const struct iommu_ops *iort_iommu_conf
         * If we already translated the fwspec there
         * is nothing left to do, return the iommu_ops.
         */
-       ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+       ops = iort_fwspec_iommu_ops(dev);
        if (ops)
                return ops;
  
         * add_device callback for dev, replay it to get things in order.
         */
        if (!err) {
-               ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+               ops = iort_fwspec_iommu_ops(dev);
                err = iort_add_device_replay(ops, dev);
        }
  
@@@ -1435,14 -1436,8 +1436,14 @@@ dev_put
        return ret;
  }
  
 -static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
 +#ifdef CONFIG_PCI
 +static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
  {
 +      static bool acs_enabled __initdata;
 +
 +      if (acs_enabled)
 +              return;
 +
        if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
                struct acpi_iort_node *parent;
                struct acpi_iort_id_mapping *map;
                        if ((parent->type == ACPI_IORT_NODE_SMMU) ||
                                (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
                                pci_request_acs();
 -                              return true;
 +                              acs_enabled = true;
 +                              return;
                        }
                }
        }
 -
 -      return false;
  }
 +#else
 +static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
 +#endif
  
  static void __init iort_init_platform_devices(void)
  {
        struct acpi_table_iort *iort;
        struct fwnode_handle *fwnode;
        int i, ret;
 -      bool acs_enabled = false;
        const struct iort_dev_config *ops;
  
        /*
                        return;
                }
  
 -              if (!acs_enabled)
 -                      acs_enabled = iort_enable_acs(iort_node);
 +              iort_enable_acs(iort_node);
  
                ops = iort_get_dev_cfg(iort_node);
                if (ops) {
index 786d719e652de95e82e540847b03698b0e2907b9,1c5d04f002bca4f39b637af57538b63a10e6591d..8ff6b581cf1c73f318be40bbf1d0718129c765ae
@@@ -26,7 -26,7 +26,7 @@@
   *
   */
  
- #include <linux/dma_remapping.h>
+ #include <linux/intel-iommu.h>
  #include <linux/reservation.h>
  #include <linux/sync_file.h>
  #include <linux/uaccess.h>
@@@ -2186,7 -2186,7 +2186,7 @@@ signal_fence_array(struct i915_execbuff
                if (!(flags & I915_EXEC_FENCE_SIGNAL))
                        continue;
  
 -              drm_syncobj_replace_fence(syncobj, 0, fence);
 +              drm_syncobj_replace_fence(syncobj, fence);
        }
  }
  
index 07c861884c7068124f3c7835a9f46ff23f0da10f,a17f0737b5f834b62f53a0a21282b083a9b5fbec..3da9c0f9e9485c7c4b9ccf8fefe5c71f72f1ea02
@@@ -24,6 -24,7 +24,6 @@@
   *    Eric Anholt <eric@anholt.net>
   */
  
 -#include <linux/dmi.h>
  #include <linux/module.h>
  #include <linux/input.h>
  #include <linux/i2c.h>
@@@ -46,7 -47,7 +46,7 @@@
  #include <drm/drm_plane_helper.h>
  #include <drm/drm_rect.h>
  #include <drm/drm_atomic_uapi.h>
- #include <linux/dma_remapping.h>
+ #include <linux/intel-iommu.h>
  #include <linux/reservation.h>
  
  /* Primary plane formats for gen <= 3 */
@@@ -73,6 -74,55 +73,6 @@@ static const uint64_t i9xx_format_modif
        DRM_FORMAT_MOD_INVALID
  };
  
 -static const uint32_t skl_primary_formats[] = {
 -      DRM_FORMAT_C8,
 -      DRM_FORMAT_RGB565,
 -      DRM_FORMAT_XRGB8888,
 -      DRM_FORMAT_XBGR8888,
 -      DRM_FORMAT_ARGB8888,
 -      DRM_FORMAT_ABGR8888,
 -      DRM_FORMAT_XRGB2101010,
 -      DRM_FORMAT_XBGR2101010,
 -      DRM_FORMAT_YUYV,
 -      DRM_FORMAT_YVYU,
 -      DRM_FORMAT_UYVY,
 -      DRM_FORMAT_VYUY,
 -};
 -
 -static const uint32_t skl_pri_planar_formats[] = {
 -      DRM_FORMAT_C8,
 -      DRM_FORMAT_RGB565,
 -      DRM_FORMAT_XRGB8888,
 -      DRM_FORMAT_XBGR8888,
 -      DRM_FORMAT_ARGB8888,
 -      DRM_FORMAT_ABGR8888,
 -      DRM_FORMAT_XRGB2101010,
 -      DRM_FORMAT_XBGR2101010,
 -      DRM_FORMAT_YUYV,
 -      DRM_FORMAT_YVYU,
 -      DRM_FORMAT_UYVY,
 -      DRM_FORMAT_VYUY,
 -      DRM_FORMAT_NV12,
 -};
 -
 -static const uint64_t skl_format_modifiers_noccs[] = {
 -      I915_FORMAT_MOD_Yf_TILED,
 -      I915_FORMAT_MOD_Y_TILED,
 -      I915_FORMAT_MOD_X_TILED,
 -      DRM_FORMAT_MOD_LINEAR,
 -      DRM_FORMAT_MOD_INVALID
 -};
 -
 -static const uint64_t skl_format_modifiers_ccs[] = {
 -      I915_FORMAT_MOD_Yf_TILED_CCS,
 -      I915_FORMAT_MOD_Y_TILED_CCS,
 -      I915_FORMAT_MOD_Yf_TILED,
 -      I915_FORMAT_MOD_Y_TILED,
 -      I915_FORMAT_MOD_X_TILED,
 -      DRM_FORMAT_MOD_LINEAR,
 -      DRM_FORMAT_MOD_INVALID
 -};
 -
  /* Cursor formats */
  static const uint32_t intel_cursor_formats[] = {
        DRM_FORMAT_ARGB8888,
@@@ -91,15 -141,15 +91,15 @@@ static void ironlake_pch_clock_get(stru
  static int intel_framebuffer_init(struct intel_framebuffer *ifb,
                                  struct drm_i915_gem_object *obj,
                                  struct drm_mode_fb_cmd2 *mode_cmd);
 -static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
 -static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
 -static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
 -static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 -                                       struct intel_link_m_n *m_n,
 -                                       struct intel_link_m_n *m2_n2);
 -static void ironlake_set_pipeconf(struct drm_crtc *crtc);
 -static void haswell_set_pipeconf(struct drm_crtc *crtc);
 -static void haswell_set_pipemisc(struct drm_crtc *crtc);
 +static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
 +static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
 +static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
 +                                       const struct intel_link_m_n *m_n,
 +                                       const struct intel_link_m_n *m2_n2);
 +static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
 +static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
 +static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
 +static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
  static void vlv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config);
  static void chv_prepare_pll(struct intel_crtc *crtc,
@@@ -108,9 -158,9 +108,9 @@@ static void intel_begin_crtc_commit(str
  static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
  static void intel_crtc_init_scalers(struct intel_crtc *crtc,
                                    struct intel_crtc_state *crtc_state);
 -static void skylake_pfit_enable(struct intel_crtc *crtc);
 -static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
 -static void ironlake_pfit_enable(struct intel_crtc *crtc);
 +static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
 +static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
 +static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
  static void intel_modeset_setup_hw_state(struct drm_device *dev,
                                         struct drm_modeset_acquire_ctx *ctx);
  static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
@@@ -455,9 -505,24 +455,9 @@@ static const struct intel_limit intel_l
        .p2 = { .p2_slow = 1, .p2_fast = 20 },
  };
  
 -static void
 -skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
 -{
 -      if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
 -              return;
 -
 -      if (enable)
 -              I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
 -      else
 -              I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
 -}
 -
  static void
  skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
  {
 -      if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
 -              return;
 -
        if (enable)
                I915_WRITE(CLKGATE_DIS_PSL(pipe),
                           DUPS1_GATING_DIS | DUPS2_GATING_DIS);
@@@ -1316,7 -1381,6 +1316,7 @@@ static void assert_pch_ports_disabled(s
                        "PCH LVDS enabled on transcoder %c, should be disabled\n",
                        pipe_name(pipe));
  
 +      /* PCH SDVOB multiplex with HDMIB */
        assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
        assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
        assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
@@@ -1501,15 -1565,14 +1501,15 @@@ static void i9xx_enable_pll(struct inte
        }
  }
  
 -static void i9xx_disable_pll(struct intel_crtc *crtc)
 +static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
  {
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
  
        /* Disable DVO 2x clock on both PLLs if necessary */
        if (IS_I830(dev_priv) &&
 -          intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
 +          intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
            !intel_num_dvo_pipes(dev_priv)) {
                I915_WRITE(DPLL(PIPE_B),
                           I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@@ -1603,16 -1666,16 +1603,16 @@@ void vlv_wait_port_ready(struct drm_i91
                     I915_READ(dpll_reg) & port_mask, expected_mask);
  }
  
 -static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 -                                         enum pipe pipe)
 +static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
  {
 -      struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
 -                                                              pipe);
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
        i915_reg_t reg;
        uint32_t val, pipeconf_val;
  
        /* Make sure PCH DPLL is enabled */
 -      assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
 +      assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
  
        /* FDI must be feeding us bits for PCH ports */
        assert_fdi_tx_enabled(dev_priv, pipe);
                 * here for both 8bpc and 12bpc.
                 */
                val &= ~PIPECONF_BPC_MASK;
 -              if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
 +              if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
                        val |= PIPECONF_8BPC;
                else
                        val |= pipeconf_val & PIPECONF_BPC_MASK;
        val &= ~TRANS_INTERLACE_MASK;
        if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
                if (HAS_PCH_IBX(dev_priv) &&
 -                  intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
 +                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
                        val |= TRANS_LEGACY_INTERLACED_ILK;
                else
                        val |= TRANS_INTERLACED;
@@@ -2191,11 -2254,6 +2191,11 @@@ static u32 intel_adjust_tile_offset(in
        return new_offset;
  }
  
 +static bool is_surface_linear(u64 modifier, int color_plane)
 +{
 +      return modifier == DRM_FORMAT_MOD_LINEAR;
 +}
 +
  static u32 intel_adjust_aligned_offset(int *x, int *y,
                                       const struct drm_framebuffer *fb,
                                       int color_plane,
  
        WARN_ON(new_offset > old_offset);
  
 -      if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
 +      if (!is_surface_linear(fb->modifier, color_plane)) {
                unsigned int tile_size, tile_width, tile_height;
                unsigned int pitch_tiles;
  
@@@ -2272,13 -2330,14 +2272,13 @@@ static u32 intel_compute_aligned_offset
                                        unsigned int rotation,
                                        u32 alignment)
  {
 -      uint64_t fb_modifier = fb->modifier;
        unsigned int cpp = fb->format->cpp[color_plane];
        u32 offset, offset_aligned;
  
        if (alignment)
                alignment--;
  
 -      if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
 +      if (!is_surface_linear(fb->modifier, color_plane)) {
                unsigned int tile_size, tile_width, tile_height;
                unsigned int tile_rows, tiles, pitch_tiles;
  
@@@ -2341,26 -2400,10 +2341,26 @@@ static int intel_fb_offset_to_xy(int *x
                                 int color_plane)
  {
        struct drm_i915_private *dev_priv = to_i915(fb->dev);
 +      unsigned int height;
  
        if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
 -          fb->offsets[color_plane] % intel_tile_size(dev_priv))
 +          fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
 +              DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
 +                            fb->offsets[color_plane], color_plane);
                return -EINVAL;
 +      }
 +
 +      height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
 +      height = ALIGN(height, intel_tile_height(fb, color_plane));
 +
 +      /* Catch potential overflows early */
 +      if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
 +                          fb->offsets[color_plane])) {
 +              DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
 +                            fb->offsets[color_plane], fb->pitches[color_plane],
 +                            color_plane);
 +              return -ERANGE;
 +      }
  
        *x = 0;
        *y = 0;
@@@ -2531,7 -2574,7 +2531,7 @@@ intel_fill_fb_info(struct drm_i915_priv
                                                      tile_size);
                offset /= tile_size;
  
 -              if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
 +              if (!is_surface_linear(fb->modifier, i)) {
                        unsigned int tile_width, tile_height;
                        unsigned int pitch_tiles;
                        struct drm_rect r;
@@@ -2745,6 -2788,10 +2745,6 @@@ intel_set_plane_visible(struct intel_cr
                crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
        else
                crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
 -
 -      DRM_DEBUG_KMS("%s active planes 0x%x\n",
 -                    crtc_state->base.crtc->name,
 -                    crtc_state->active_planes);
  }
  
  static void fixup_active_planes(struct intel_crtc_state *crtc_state)
@@@ -2772,10 -2819,6 +2772,10 @@@ static void intel_plane_disable_noatomi
        struct intel_plane_state *plane_state =
                to_intel_plane_state(plane->base.state);
  
 +      DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
 +                    plane->base.base.id, plane->base.name,
 +                    crtc->base.base.id, crtc->base.name);
 +
        intel_set_plane_visible(crtc_state, plane_state, false);
        fixup_active_planes(crtc_state);
  
                intel_pre_disable_primary_noatomic(&crtc->base);
  
        trace_intel_disable_plane(&plane->base, crtc);
 -      plane->disable_plane(plane, crtc);
 +      plane->disable_plane(plane, crtc_state);
  }
  
  static void
@@@ -3056,6 -3099,28 +3056,6 @@@ static int skl_check_main_surface(struc
        return 0;
  }
  
 -static int
 -skl_check_nv12_surface(struct intel_plane_state *plane_state)
 -{
 -      /* Display WA #1106 */
 -      if (plane_state->base.rotation !=
 -          (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
 -          plane_state->base.rotation != DRM_MODE_ROTATE_270)
 -              return 0;
 -
 -      /*
 -       * src coordinates are rotated here.
 -       * We check height but report it as width
 -       */
 -      if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
 -              DRM_DEBUG_KMS("src width must be multiple "
 -                            "of 4 for rotated NV12\n");
 -              return -EINVAL;
 -      }
 -
 -      return 0;
 -}
 -
  static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
  {
        const struct drm_framebuffer *fb = plane_state->base.fb;
@@@ -3134,6 -3199,9 +3134,6 @@@ int skl_check_plane_surface(struct inte
         * the main surface setup depends on it.
         */
        if (fb->format->format == DRM_FORMAT_NV12) {
 -              ret = skl_check_nv12_surface(plane_state);
 -              if (ret)
 -                      return ret;
                ret = skl_check_nv12_aux_surface(plane_state);
                if (ret)
                        return ret;
@@@ -3331,6 -3399,7 +3331,6 @@@ static void i9xx_update_plane(struct in
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
        u32 linear_offset;
        u32 dspcntr = plane_state->ctl;
 -      i915_reg_t reg = DSPCNTR(i9xx_plane);
        int x = plane_state->color_plane[0].x;
        int y = plane_state->color_plane[0].y;
        unsigned long irqflags;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  
 +      I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
 +
        if (INTEL_GEN(dev_priv) < 4) {
                /* pipesrc and dspsize control the size that is scaled from,
                 * which should always be the user's requested size.
                 */
 +              I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
                I915_WRITE_FW(DSPSIZE(i9xx_plane),
                              ((crtc_state->pipe_src_h - 1) << 16) |
                              (crtc_state->pipe_src_w - 1));
 -              I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
        } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
 +              I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
                I915_WRITE_FW(PRIMSIZE(i9xx_plane),
                              ((crtc_state->pipe_src_h - 1) << 16) |
                              (crtc_state->pipe_src_w - 1));
 -              I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
                I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
        }
  
 -      I915_WRITE_FW(reg, dspcntr);
 -
 -      I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 -              I915_WRITE_FW(DSPSURF(i9xx_plane),
 -                            intel_plane_ggtt_offset(plane_state) +
 -                            dspaddr_offset);
                I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
        } else if (INTEL_GEN(dev_priv) >= 4) {
 +              I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
 +              I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
 +      }
 +
 +      /*
 +       * The control register self-arms if the plane was previously
 +       * disabled. Try to make the plane enable atomic by writing
 +       * the control register just before the surface register.
 +       */
 +      I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
 +      if (INTEL_GEN(dev_priv) >= 4)
                I915_WRITE_FW(DSPSURF(i9xx_plane),
                              intel_plane_ggtt_offset(plane_state) +
                              dspaddr_offset);
 -              I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
 -              I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
 -      } else {
 +      else
                I915_WRITE_FW(DSPADDR(i9xx_plane),
                              intel_plane_ggtt_offset(plane_state) +
                              dspaddr_offset);
 -      }
 -      POSTING_READ_FW(reg);
  
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
  static void i9xx_disable_plane(struct intel_plane *plane,
 -                             struct intel_crtc *crtc)
 +                             const struct intel_crtc_state *crtc_state)
  {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
                I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
        else
                I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
 -      POSTING_READ_FW(DSPCNTR(i9xx_plane));
  
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
@@@ -3461,13 -3528,13 +3461,13 @@@ static void skl_detach_scaler(struct in
  /*
   * This function detaches (aka. unbinds) unused scalers in hardware
   */
 -static void skl_detach_scalers(struct intel_crtc *intel_crtc)
 +static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
  {
 -      struct intel_crtc_scaler_state *scaler_state;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 +      const struct intel_crtc_scaler_state *scaler_state =
 +              &crtc_state->scaler_state;
        int i;
  
 -      scaler_state = &intel_crtc->config->scaler_state;
 -
        /* loop through and disable scalers that aren't in use */
        for (i = 0; i < intel_crtc->num_scalers; i++) {
                if (!scaler_state->scalers[i].in_use)
        }
  }
  
 +static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
 +                                        int color_plane, unsigned int rotation)
 +{
 +      /*
 +       * The stride is either expressed as a multiple of 64 bytes chunks for
 +       * linear buffers or in number of tiles for tiled buffers.
 +       */
 +      if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
 +              return 64;
 +      else if (drm_rotation_90_or_270(rotation))
 +              return intel_tile_height(fb, color_plane);
 +      else
 +              return intel_tile_width_bytes(fb, color_plane);
 +}
 +
  u32 skl_plane_stride(const struct intel_plane_state *plane_state,
                     int color_plane)
  {
        if (color_plane >= fb->format->num_planes)
                return 0;
  
 -      /*
 -       * The stride is either expressed as a multiple of 64 bytes chunks for
 -       * linear buffers or in number of tiles for tiled buffers.
 -       */
 -      if (drm_rotation_90_or_270(rotation))
 -              stride /= intel_tile_height(fb, color_plane);
 -      else
 -              stride /= intel_fb_stride_alignment(fb, color_plane);
 -
 -      return stride;
 +      return stride / skl_plane_stride_mult(fb, color_plane, rotation);
  }
  
  static u32 skl_plane_ctl_format(uint32_t pixel_format)
        return 0;
  }
  
 -/*
 - * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
 - * to be already pre-multiplied. We need to add a knob (or a different
 - * DRM_FORMAT) for user-space to configure that.
 - */
 -static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
 +static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
  {
 -      switch (pixel_format) {
 -      case DRM_FORMAT_ABGR8888:
 -      case DRM_FORMAT_ARGB8888:
 +      if (!plane_state->base.fb->format->has_alpha)
 +              return PLANE_CTL_ALPHA_DISABLE;
 +
 +      switch (plane_state->base.pixel_blend_mode) {
 +      case DRM_MODE_BLEND_PIXEL_NONE:
 +              return PLANE_CTL_ALPHA_DISABLE;
 +      case DRM_MODE_BLEND_PREMULTI:
                return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
 +      case DRM_MODE_BLEND_COVERAGE:
 +              return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
        default:
 +              MISSING_CASE(plane_state->base.pixel_blend_mode);
                return PLANE_CTL_ALPHA_DISABLE;
        }
  }
  
 -static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
 +static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
  {
 -      switch (pixel_format) {
 -      case DRM_FORMAT_ABGR8888:
 -      case DRM_FORMAT_ARGB8888:
 +      if (!plane_state->base.fb->format->has_alpha)
 +              return PLANE_COLOR_ALPHA_DISABLE;
 +
 +      switch (plane_state->base.pixel_blend_mode) {
 +      case DRM_MODE_BLEND_PIXEL_NONE:
 +              return PLANE_COLOR_ALPHA_DISABLE;
 +      case DRM_MODE_BLEND_PREMULTI:
                return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
 +      case DRM_MODE_BLEND_COVERAGE:
 +              return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
        default:
 +              MISSING_CASE(plane_state->base.pixel_blend_mode);
                return PLANE_COLOR_ALPHA_DISABLE;
        }
  }
@@@ -3645,7 -3697,7 +3645,7 @@@ u32 skl_plane_ctl(const struct intel_cr
        plane_ctl = PLANE_CTL_ENABLE;
  
        if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
 -              plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
 +              plane_ctl |= skl_plane_ctl_alpha(plane_state);
                plane_ctl |=
                        PLANE_CTL_PIPE_GAMMA_ENABLE |
                        PLANE_CTL_PIPE_CSC_ENABLE |
@@@ -3680,7 -3732,6 +3680,7 @@@ u32 glk_plane_color_ctl(const struct in
        struct drm_i915_private *dev_priv =
                to_i915(plane_state->base.plane->dev);
        const struct drm_framebuffer *fb = plane_state->base.fb;
 +      struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
        u32 plane_color_ctl = 0;
  
        if (INTEL_GEN(dev_priv) < 11) {
                plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
        }
        plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
 -      plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
 +      plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
  
 -      if (fb->format->is_yuv) {
 +      if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
                if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
                        plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
                else
  
                if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
                        plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
 +      } else if (fb->format->is_yuv) {
 +              plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
        }
  
        return plane_color_ctl;
@@@ -3884,15 -3933,15 +3884,15 @@@ static void intel_update_pipe_config(co
  
        /* on skylake this is done by detaching scalers */
        if (INTEL_GEN(dev_priv) >= 9) {
 -              skl_detach_scalers(crtc);
 +              skl_detach_scalers(new_crtc_state);
  
                if (new_crtc_state->pch_pfit.enabled)
 -                      skylake_pfit_enable(crtc);
 +                      skylake_pfit_enable(new_crtc_state);
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                if (new_crtc_state->pch_pfit.enabled)
 -                      ironlake_pfit_enable(crtc);
 +                      ironlake_pfit_enable(new_crtc_state);
                else if (old_crtc_state->pch_pfit.enabled)
 -                      ironlake_pfit_disable(crtc, true);
 +                      ironlake_pfit_disable(old_crtc_state);
        }
  }
  
@@@ -4291,10 -4340,10 +4291,10 @@@ train_done
        DRM_DEBUG_KMS("FDI train done.\n");
  }
  
 -static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 +static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_device *dev = intel_crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
        u32 temp;
        reg = FDI_RX_CTL(pipe);
        temp = I915_READ(reg);
        temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
 -      temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
 +      temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
        temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
        I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
  
@@@ -4452,11 -4501,10 +4452,11 @@@ void lpt_disable_iclkip(struct drm_i915
  }
  
  /* Program iCLKIP clock to the desired frequency */
 -static void lpt_program_iclkip(struct intel_crtc *crtc)
 +static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
  {
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 -      int clock = crtc->config->base.adjusted_mode.crtc_clock;
 +      int clock = crtc_state->base.adjusted_mode.crtc_clock;
        u32 divsel, phaseinc, auxdiv, phasedir = 0;
        u32 temp;
  
@@@ -4567,12 -4615,12 +4567,12 @@@ int lpt_get_iclkip(struct drm_i915_priv
                                 desired_divisor << auxdiv);
  }
  
 -static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
 +static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
                                                enum pipe pch_transcoder)
  {
 -      struct drm_device *dev = crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  
        I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
                   I915_READ(HTOTAL(cpu_transcoder)));
                   I915_READ(VSYNCSHIFT(cpu_transcoder)));
  }
  
 -static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
 +static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t temp;
  
        temp = I915_READ(SOUTH_CHICKEN1);
        POSTING_READ(SOUTH_CHICKEN1);
  }
  
 -static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
 +static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_device *dev = intel_crtc->base.dev;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  
 -      switch (intel_crtc->pipe) {
 +      switch (crtc->pipe) {
        case PIPE_A:
                break;
        case PIPE_B:
 -              if (intel_crtc->config->fdi_lanes > 2)
 -                      cpt_set_fdi_bc_bifurcation(dev, false);
 +              if (crtc_state->fdi_lanes > 2)
 +                      cpt_set_fdi_bc_bifurcation(dev_priv, false);
                else
 -                      cpt_set_fdi_bc_bifurcation(dev, true);
 +                      cpt_set_fdi_bc_bifurcation(dev_priv, true);
  
                break;
        case PIPE_C:
 -              cpt_set_fdi_bc_bifurcation(dev, true);
 +              cpt_set_fdi_bc_bifurcation(dev_priv, true);
  
                break;
        default:
@@@ -4684,7 -4732,7 +4684,7 @@@ static void ironlake_pch_enable(const s
        assert_pch_transcoder_disabled(dev_priv, pipe);
  
        if (IS_IVYBRIDGE(dev_priv))
 -              ivybridge_update_fdi_bc_bifurcation(crtc);
 +              ivybridge_update_fdi_bc_bifurcation(crtc_state);
  
        /* Write the TU size bits before fdi link training, so that error
         * detection works. */
         * Note that enable_shared_dpll tries to do the right thing, but
         * get_shared_dpll unconditionally resets the pll - we need that to have
         * the right LVDS enable sequence. */
 -      intel_enable_shared_dpll(crtc);
 +      intel_enable_shared_dpll(crtc_state);
  
        /* set transcoder timing, panel must allow it */
        assert_panel_unlocked(dev_priv, pipe);
 -      ironlake_pch_transcoder_set_timings(crtc, pipe);
 +      ironlake_pch_transcoder_set_timings(crtc_state, pipe);
  
        intel_fdi_normal_train(crtc);
  
                I915_WRITE(reg, temp);
        }
  
 -      ironlake_enable_pch_transcoder(dev_priv, pipe);
 +      ironlake_enable_pch_transcoder(crtc_state);
  }
  
  static void lpt_pch_enable(const struct intel_atomic_state *state,
  
        assert_pch_transcoder_disabled(dev_priv, PIPE_A);
  
 -      lpt_program_iclkip(crtc);
 +      lpt_program_iclkip(crtc_state);
  
        /* Set transcoder timing. */
 -      ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
 +      ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
  
        lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
  }
@@@ -4856,7 -4904,8 +4856,7 @@@ static in
  skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
                  unsigned int scaler_user, int *scaler_id,
                  int src_w, int src_h, int dst_w, int dst_h,
 -                bool plane_scaler_check,
 -                uint32_t pixel_format)
 +                const struct drm_format_info *format, bool need_scaler)
  {
        struct intel_crtc_scaler_state *scaler_state =
                &crtc_state->scaler_state;
        struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
 -      int need_scaling;
  
        /*
         * Src coordinates are already rotated by 270 degrees for
         * the 90/270 degree plane rotation cases (to match the
         * GTT mapping), hence no need to account for rotation here.
         */
 -      need_scaling = src_w != dst_w || src_h != dst_h;
 -
 -      if (plane_scaler_check)
 -              if (pixel_format == DRM_FORMAT_NV12)
 -                      need_scaling = true;
 -
 -      if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
 -              need_scaling = true;
 +      if (src_w != dst_w || src_h != dst_h)
 +              need_scaler = true;
  
        /*
         * Scaling/fitting not supported in IF-ID mode in GEN9+
         * for NV12.
         */
        if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
 -          need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 +          need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
                DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
                return -EINVAL;
        }
         * update to free the scaler is done in plane/panel-fit programming.
         * For this purpose crtc/plane_state->scaler_id isn't reset here.
         */
 -      if (force_detach || !need_scaling) {
 +      if (force_detach || !need_scaler) {
                if (*scaler_id >= 0) {
                        scaler_state->scaler_users &= ~(1 << scaler_user);
                        scaler_state->scalers[*scaler_id].in_use = 0;
                return 0;
        }
  
 -      if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
 +      if (format && format->format == DRM_FORMAT_NV12 &&
            (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
                DRM_DEBUG_KMS("NV12: src dimensions not met\n");
                return -EINVAL;
  int skl_update_scaler_crtc(struct intel_crtc_state *state)
  {
        const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
 +      bool need_scaler = false;
 +
 +      if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
 +              need_scaler = true;
  
        return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
                                 &state->scaler_state.scaler_id,
                                 state->pipe_src_w, state->pipe_src_h,
                                 adjusted_mode->crtc_hdisplay,
 -                               adjusted_mode->crtc_vdisplay, false, 0);
 +                               adjusted_mode->crtc_vdisplay, NULL, need_scaler);
  }
  
  /**
  static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
                                   struct intel_plane_state *plane_state)
  {
 -
        struct intel_plane *intel_plane =
                to_intel_plane(plane_state->base.plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        int ret;
 -
        bool force_detach = !fb || !plane_state->base.visible;
 +      bool need_scaler = false;
 +
 +      /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
 +      if (!icl_is_hdr_plane(intel_plane) &&
 +          fb && fb->format->format == DRM_FORMAT_NV12)
 +              need_scaler = true;
  
        ret = skl_update_scaler(crtc_state, force_detach,
                                drm_plane_index(&intel_plane->base),
                                drm_rect_height(&plane_state->base.src) >> 16,
                                drm_rect_width(&plane_state->base.dst),
                                drm_rect_height(&plane_state->base.dst),
 -                              fb ? true : false, fb ? fb->format->format : 0);
 +                              fb ? fb->format : NULL, need_scaler);
  
        if (ret || plane_state->scaler_id < 0)
                return ret;
@@@ -5042,27 -5090,27 +5042,27 @@@ static void skylake_scaler_disable(stru
                skl_detach_scaler(crtc, i);
  }
  
 -static void skylake_pfit_enable(struct intel_crtc *crtc)
 +static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_device *dev = crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      int pipe = crtc->pipe;
 -      struct intel_crtc_scaler_state *scaler_state =
 -              &crtc->config->scaler_state;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
 +      const struct intel_crtc_scaler_state *scaler_state =
 +              &crtc_state->scaler_state;
  
 -      if (crtc->config->pch_pfit.enabled) {
 +      if (crtc_state->pch_pfit.enabled) {
                u16 uv_rgb_hphase, uv_rgb_vphase;
                int pfit_w, pfit_h, hscale, vscale;
                int id;
  
 -              if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
 +              if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
                        return;
  
 -              pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
 -              pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
 +              pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
 +              pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
  
 -              hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
 -              vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
 +              hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
 +              vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
  
                uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
                uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
                              PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
                I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
                              PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
 -              I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
 -              I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
 +              I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
 +              I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
        }
  }
  
 -static void ironlake_pfit_enable(struct intel_crtc *crtc)
 +static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_device *dev = crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        int pipe = crtc->pipe;
  
 -      if (crtc->config->pch_pfit.enabled) {
 +      if (crtc_state->pch_pfit.enabled) {
                /* Force use of hard-coded filter coefficients
                 * as some pre-programmed values are broken,
                 * e.g. x201.
                                                 PF_PIPE_SEL_IVB(pipe));
                else
                        I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
 -              I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
 -              I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
 +              I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
 +              I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
        }
  }
  
@@@ -5291,8 -5339,11 +5291,8 @@@ static bool needs_nv12_wa(struct drm_i9
        if (!crtc_state->nv12_planes)
                return false;
  
 -      if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
 -              return false;
 -
 -      if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
 -          IS_CANNONLAKE(dev_priv))
 +      /* WA Display #0827: Gen9:all */
 +      if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
                return true;
  
        return false;
@@@ -5335,6 -5386,7 +5335,6 @@@ static void intel_post_plane_update(str
        if (needs_nv12_wa(dev_priv, old_crtc_state) &&
            !needs_nv12_wa(dev_priv, pipe_config)) {
                skl_wa_clkgate(dev_priv, crtc->pipe, false);
 -              skl_wa_528(dev_priv, crtc->pipe, false);
        }
  }
  
@@@ -5374,6 -5426,7 +5374,6 @@@ static void intel_pre_plane_update(stru
        if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
            needs_nv12_wa(dev_priv, pipe_config)) {
                skl_wa_clkgate(dev_priv, crtc->pipe, true);
 -              skl_wa_528(dev_priv, crtc->pipe, true);
        }
  
        /*
         *
         * WaCxSRDisabledForSpriteScaling:ivb
         */
 -      if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
 +      if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
 +          old_crtc_state->base.active)
                intel_wait_for_vblank(dev_priv, crtc->pipe);
  
        /*
                intel_update_watermarks(crtc);
  }
  
 -static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
 +static void intel_crtc_disable_planes(struct intel_atomic_state *state,
 +                                    struct intel_crtc *crtc)
  {
 -      struct drm_device *dev = crtc->dev;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      struct drm_plane *p;
 -      int pipe = intel_crtc->pipe;
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      const struct intel_crtc_state *new_crtc_state =
 +              intel_atomic_get_new_crtc_state(state, crtc);
 +      unsigned int update_mask = new_crtc_state->update_planes;
 +      const struct intel_plane_state *old_plane_state;
 +      struct intel_plane *plane;
 +      unsigned fb_bits = 0;
 +      int i;
  
 -      intel_crtc_dpms_overlay_disable(intel_crtc);
 +      intel_crtc_dpms_overlay_disable(crtc);
  
 -      drm_for_each_plane_mask(p, dev, plane_mask)
 -              to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
 +      for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
 +              if (crtc->pipe != plane->pipe ||
 +                  !(update_mask & BIT(plane->id)))
 +                      continue;
  
 -      /*
 -       * FIXME: Once we grow proper nuclear flip support out of this we need
 -       * to compute the mask of flip planes precisely. For the time being
 -       * consider this a flip to a NULL plane.
 -       */
 -      intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
 +              plane->disable_plane(plane, new_crtc_state);
 +
 +              if (old_plane_state->base.visible)
 +                      fb_bits |= plane->frontbuffer_bit;
 +      }
 +
 +      intel_frontbuffer_flip(dev_priv, fb_bits);
  }
  
  static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@@ -5511,8 -5555,7 +5511,8 @@@ static void intel_encoders_enable(struc
                if (conn_state->crtc != crtc)
                        continue;
  
 -              encoder->enable(encoder, crtc_state, conn_state);
 +              if (encoder->enable)
 +                      encoder->enable(encoder, crtc_state, conn_state);
                intel_opregion_notify_encoder(encoder, true);
        }
  }
@@@ -5533,8 -5576,7 +5533,8 @@@ static void intel_encoders_disable(stru
                        continue;
  
                intel_opregion_notify_encoder(encoder, false);
 -              encoder->disable(encoder, old_crtc_state, old_conn_state);
 +              if (encoder->disable)
 +                      encoder->disable(encoder, old_crtc_state, old_conn_state);
        }
  }
  
@@@ -5605,37 -5647,37 +5605,37 @@@ static void ironlake_crtc_enable(struc
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
  
 -      if (intel_crtc->config->has_pch_encoder)
 -              intel_prepare_shared_dpll(intel_crtc);
 +      if (pipe_config->has_pch_encoder)
 +              intel_prepare_shared_dpll(pipe_config);
  
 -      if (intel_crtc_has_dp_encoder(intel_crtc->config))
 -              intel_dp_set_m_n(intel_crtc, M1_N1);
 +      if (intel_crtc_has_dp_encoder(pipe_config))
 +              intel_dp_set_m_n(pipe_config, M1_N1);
  
 -      intel_set_pipe_timings(intel_crtc);
 -      intel_set_pipe_src_size(intel_crtc);
 +      intel_set_pipe_timings(pipe_config);
 +      intel_set_pipe_src_size(pipe_config);
  
 -      if (intel_crtc->config->has_pch_encoder) {
 -              intel_cpu_transcoder_set_m_n(intel_crtc,
 -                                   &intel_crtc->config->fdi_m_n, NULL);
 +      if (pipe_config->has_pch_encoder) {
 +              intel_cpu_transcoder_set_m_n(pipe_config,
 +                                           &pipe_config->fdi_m_n, NULL);
        }
  
 -      ironlake_set_pipeconf(crtc);
 +      ironlake_set_pipeconf(pipe_config);
  
        intel_crtc->active = true;
  
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
  
 -      if (intel_crtc->config->has_pch_encoder) {
 +      if (pipe_config->has_pch_encoder) {
                /* Note: FDI PLL enabling _must_ be done before we enable the
                 * cpu pipes, hence this is separate from all the other fdi/pch
                 * enabling. */
 -              ironlake_fdi_pll_enable(intel_crtc);
 +              ironlake_fdi_pll_enable(pipe_config);
        } else {
                assert_fdi_tx_disabled(dev_priv, pipe);
                assert_fdi_rx_disabled(dev_priv, pipe);
        }
  
 -      ironlake_pfit_enable(intel_crtc);
 +      ironlake_pfit_enable(pipe_config);
  
        /*
         * On ILK+ LUT must be loaded before the pipe is running but with
        intel_color_load_luts(&pipe_config->base);
  
        if (dev_priv->display.initial_watermarks != NULL)
 -              dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
 +              dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
        intel_enable_pipe(pipe_config);
  
 -      if (intel_crtc->config->has_pch_encoder)
 +      if (pipe_config->has_pch_encoder)
                ironlake_pch_enable(old_intel_state, pipe_config);
  
        assert_vblank_disabled(crtc);
         * some interlaced HDMI modes. Let's do the double wait always
         * in case there are more corner cases we don't know about.
         */
 -      if (intel_crtc->config->has_pch_encoder) {
 +      if (pipe_config->has_pch_encoder) {
                intel_wait_for_vblank(dev_priv, pipe);
                intel_wait_for_vblank(dev_priv, pipe);
        }
@@@ -5698,9 -5740,10 +5698,9 @@@ static void icl_pipe_mbus_enable(struc
        enum pipe pipe = crtc->pipe;
        uint32_t val;
  
 -      val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
 -
 -      /* Program B credit equally to all pipes */
 -      val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
 +      val = MBUS_DBOX_A_CREDIT(2);
 +      val |= MBUS_DBOX_BW_CREDIT(1);
 +      val |= MBUS_DBOX_B_CREDIT(8);
  
        I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
  }
@@@ -5712,7 -5755,7 +5712,7 @@@ static void haswell_crtc_enable(struct 
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe, hsw_workaround_pipe;
 -      enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 +      enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        struct intel_atomic_state *old_intel_state =
                to_intel_atomic_state(old_state);
        bool psl_clkgate_wa;
  
        intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  
 -      if (intel_crtc->config->shared_dpll)
 -              intel_enable_shared_dpll(intel_crtc);
 -
 -      if (INTEL_GEN(dev_priv) >= 11)
 -              icl_map_plls_to_ports(crtc, pipe_config, old_state);
 +      if (pipe_config->shared_dpll)
 +              intel_enable_shared_dpll(pipe_config);
  
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
  
 -      if (intel_crtc_has_dp_encoder(intel_crtc->config))
 -              intel_dp_set_m_n(intel_crtc, M1_N1);
 +      if (intel_crtc_has_dp_encoder(pipe_config))
 +              intel_dp_set_m_n(pipe_config, M1_N1);
  
        if (!transcoder_is_dsi(cpu_transcoder))
 -              intel_set_pipe_timings(intel_crtc);
 +              intel_set_pipe_timings(pipe_config);
  
 -      intel_set_pipe_src_size(intel_crtc);
 +      intel_set_pipe_src_size(pipe_config);
  
        if (cpu_transcoder != TRANSCODER_EDP &&
            !transcoder_is_dsi(cpu_transcoder)) {
                I915_WRITE(PIPE_MULT(cpu_transcoder),
 -                         intel_crtc->config->pixel_multiplier - 1);
 +                         pipe_config->pixel_multiplier - 1);
        }
  
 -      if (intel_crtc->config->has_pch_encoder) {
 -              intel_cpu_transcoder_set_m_n(intel_crtc,
 -                                   &intel_crtc->config->fdi_m_n, NULL);
 +      if (pipe_config->has_pch_encoder) {
 +              intel_cpu_transcoder_set_m_n(pipe_config,
 +                                           &pipe_config->fdi_m_n, NULL);
        }
  
        if (!transcoder_is_dsi(cpu_transcoder))
 -              haswell_set_pipeconf(crtc);
 +              haswell_set_pipeconf(pipe_config);
  
 -      haswell_set_pipemisc(crtc);
 +      haswell_set_pipemisc(pipe_config);
  
        intel_color_set_csc(&pipe_config->base);
  
  
        /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
        psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
 -                       intel_crtc->config->pch_pfit.enabled;
 +                       pipe_config->pch_pfit.enabled;
        if (psl_clkgate_wa)
                glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
  
        if (INTEL_GEN(dev_priv) >= 9)
 -              skylake_pfit_enable(intel_crtc);
 +              skylake_pfit_enable(pipe_config);
        else
 -              ironlake_pfit_enable(intel_crtc);
 +              ironlake_pfit_enable(pipe_config);
  
        /*
         * On ILK+ LUT must be loaded before the pipe is running but with
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_enable_pipe(pipe_config);
  
 -      if (intel_crtc->config->has_pch_encoder)
 +      if (pipe_config->has_pch_encoder)
                lpt_pch_enable(old_intel_state, pipe_config);
  
 -      if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
 +      if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
                intel_ddi_set_vc_payload_alloc(pipe_config, true);
  
        assert_vblank_disabled(crtc);
        }
  }
  
 -static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
 +static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
  {
 -      struct drm_device *dev = crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      int pipe = crtc->pipe;
 +      struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
  
        /* To avoid upsetting the power well on haswell only disable the pfit if
         * it's in use. The hw state code will make sure we get this right. */
 -      if (force || crtc->config->pch_pfit.enabled) {
 +      if (old_crtc_state->pch_pfit.enabled) {
                I915_WRITE(PF_CTL(pipe), 0);
                I915_WRITE(PF_WIN_POS(pipe), 0);
                I915_WRITE(PF_WIN_SZ(pipe), 0);
@@@ -5862,14 -5908,14 +5862,14 @@@ static void ironlake_crtc_disable(struc
  
        intel_disable_pipe(old_crtc_state);
  
 -      ironlake_pfit_disable(intel_crtc, false);
 +      ironlake_pfit_disable(old_crtc_state);
  
 -      if (intel_crtc->config->has_pch_encoder)
 +      if (old_crtc_state->has_pch_encoder)
                ironlake_fdi_disable(crtc);
  
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  
 -      if (intel_crtc->config->has_pch_encoder) {
 +      if (old_crtc_state->has_pch_encoder) {
                ironlake_disable_pch_transcoder(dev_priv, pipe);
  
                if (HAS_PCH_CPT(dev_priv)) {
@@@ -5920,24 -5966,24 +5920,24 @@@ static void haswell_crtc_disable(struc
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_transcoder_func(old_crtc_state);
  
 +      intel_dsc_disable(old_crtc_state);
 +
        if (INTEL_GEN(dev_priv) >= 9)
                skylake_scaler_disable(intel_crtc);
        else
 -              ironlake_pfit_disable(intel_crtc, false);
 +              ironlake_pfit_disable(old_crtc_state);
  
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  
 -      if (INTEL_GEN(dev_priv) >= 11)
 -              icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
 +      intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
  }
  
 -static void i9xx_pfit_enable(struct intel_crtc *crtc)
 +static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_device *dev = crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      struct intel_crtc_state *pipe_config = crtc->config;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  
 -      if (!pipe_config->gmch_pfit.control)
 +      if (!crtc_state->gmch_pfit.control)
                return;
  
        /*
        WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
        assert_pipe_disabled(dev_priv, crtc->pipe);
  
 -      I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
 -      I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
 +      I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
 +      I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
  
        /* Border color in case we don't scale up to the full screen. Black by
         * default, change to something else for debugging. */
@@@ -6003,28 -6049,6 +6003,28 @@@ enum intel_display_power_domain intel_p
        }
  }
  
 +enum intel_display_power_domain
 +intel_aux_power_domain(struct intel_digital_port *dig_port)
 +{
 +      switch (dig_port->aux_ch) {
 +      case AUX_CH_A:
 +              return POWER_DOMAIN_AUX_A;
 +      case AUX_CH_B:
 +              return POWER_DOMAIN_AUX_B;
 +      case AUX_CH_C:
 +              return POWER_DOMAIN_AUX_C;
 +      case AUX_CH_D:
 +              return POWER_DOMAIN_AUX_D;
 +      case AUX_CH_E:
 +              return POWER_DOMAIN_AUX_E;
 +      case AUX_CH_F:
 +              return POWER_DOMAIN_AUX_F;
 +      default:
 +              MISSING_CASE(dig_port->aux_ch);
 +              return POWER_DOMAIN_AUX_A;
 +      }
 +}
 +
  static u64 get_crtc_power_domains(struct drm_crtc *crtc,
                                  struct intel_crtc_state *crtc_state)
  {
@@@ -6104,18 -6128,20 +6104,18 @@@ static void valleyview_crtc_enable(stru
        if (WARN_ON(intel_crtc->active))
                return;
  
 -      if (intel_crtc_has_dp_encoder(intel_crtc->config))
 -              intel_dp_set_m_n(intel_crtc, M1_N1);
 +      if (intel_crtc_has_dp_encoder(pipe_config))
 +              intel_dp_set_m_n(pipe_config, M1_N1);
  
 -      intel_set_pipe_timings(intel_crtc);
 -      intel_set_pipe_src_size(intel_crtc);
 +      intel_set_pipe_timings(pipe_config);
 +      intel_set_pipe_src_size(pipe_config);
  
        if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
 -              struct drm_i915_private *dev_priv = to_i915(dev);
 -
                I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
                I915_WRITE(CHV_CANVAS(pipe), 0);
        }
  
 -      i9xx_set_pipeconf(intel_crtc);
 +      i9xx_set_pipeconf(pipe_config);
  
        intel_color_set_csc(&pipe_config->base);
  
        intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
  
        if (IS_CHERRYVIEW(dev_priv)) {
 -              chv_prepare_pll(intel_crtc, intel_crtc->config);
 -              chv_enable_pll(intel_crtc, intel_crtc->config);
 +              chv_prepare_pll(intel_crtc, pipe_config);
 +              chv_enable_pll(intel_crtc, pipe_config);
        } else {
 -              vlv_prepare_pll(intel_crtc, intel_crtc->config);
 -              vlv_enable_pll(intel_crtc, intel_crtc->config);
 +              vlv_prepare_pll(intel_crtc, pipe_config);
 +              vlv_enable_pll(intel_crtc, pipe_config);
        }
  
        intel_encoders_pre_enable(crtc, pipe_config, old_state);
  
 -      i9xx_pfit_enable(intel_crtc);
 +      i9xx_pfit_enable(pipe_config);
  
        intel_color_load_luts(&pipe_config->base);
  
        intel_encoders_enable(crtc, pipe_config, old_state);
  }
  
 -static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
 +static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_device *dev = crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  
 -      I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
 -      I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
 +      I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
 +      I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
  }
  
  static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
        if (WARN_ON(intel_crtc->active))
                return;
  
 -      i9xx_set_pll_dividers(intel_crtc);
 +      i9xx_set_pll_dividers(pipe_config);
  
 -      if (intel_crtc_has_dp_encoder(intel_crtc->config))
 -              intel_dp_set_m_n(intel_crtc, M1_N1);
 +      if (intel_crtc_has_dp_encoder(pipe_config))
 +              intel_dp_set_m_n(pipe_config, M1_N1);
  
 -      intel_set_pipe_timings(intel_crtc);
 -      intel_set_pipe_src_size(intel_crtc);
 +      intel_set_pipe_timings(pipe_config);
 +      intel_set_pipe_src_size(pipe_config);
  
 -      i9xx_set_pipeconf(intel_crtc);
 +      i9xx_set_pipeconf(pipe_config);
  
        intel_crtc->active = true;
  
  
        i9xx_enable_pll(intel_crtc, pipe_config);
  
 -      i9xx_pfit_enable(intel_crtc);
 +      i9xx_pfit_enable(pipe_config);
  
        intel_color_load_luts(&pipe_config->base);
  
        if (dev_priv->display.initial_watermarks != NULL)
                dev_priv->display.initial_watermarks(old_intel_state,
 -                                                   intel_crtc->config);
 +                                                   pipe_config);
        else
                intel_update_watermarks(intel_crtc);
        intel_enable_pipe(pipe_config);
        intel_encoders_enable(crtc, pipe_config, old_state);
  }
  
 -static void i9xx_pfit_disable(struct intel_crtc *crtc)
 +static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
  {
 -      struct drm_device *dev = crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  
 -      if (!crtc->config->gmch_pfit.control)
 +      if (!old_crtc_state->gmch_pfit.control)
                return;
  
        assert_pipe_disabled(dev_priv, crtc->pipe);
@@@ -6246,17 -6272,17 +6246,17 @@@ static void i9xx_crtc_disable(struct in
  
        intel_disable_pipe(old_crtc_state);
  
 -      i9xx_pfit_disable(intel_crtc);
 +      i9xx_pfit_disable(old_crtc_state);
  
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
  
 -      if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
 +      if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
                if (IS_CHERRYVIEW(dev_priv))
                        chv_disable_pll(dev_priv, pipe);
                else if (IS_VALLEYVIEW(dev_priv))
                        vlv_disable_pll(dev_priv, pipe);
                else
 -                      i9xx_disable_pll(intel_crtc);
 +                      i9xx_disable_pll(old_crtc_state);
        }
  
        intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
@@@ -6330,7 -6356,7 +6330,7 @@@ static void intel_crtc_disable_noatomic
  
        intel_fbc_disable(intel_crtc);
        intel_update_watermarks(intel_crtc);
 -      intel_disable_shared_dpll(intel_crtc);
 +      intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
  
        domains = intel_crtc->enabled_power_domains;
        for_each_power_domain(domain, domains)
@@@ -6408,6 -6434,66 +6408,6 @@@ static void intel_connector_verify_stat
        }
  }
  
 -int intel_connector_init(struct intel_connector *connector)
 -{
 -      struct intel_digital_connector_state *conn_state;
 -
 -      /*
 -       * Allocate enough memory to hold intel_digital_connector_state,
 -       * This might be a few bytes too many, but for connectors that don't
 -       * need it we'll free the state and allocate a smaller one on the first
 -       * succesful commit anyway.
 -       */
 -      conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
 -      if (!conn_state)
 -              return -ENOMEM;
 -
 -      __drm_atomic_helper_connector_reset(&connector->base,
 -                                          &conn_state->base);
 -
 -      return 0;
 -}
 -
 -struct intel_connector *intel_connector_alloc(void)
 -{
 -      struct intel_connector *connector;
 -
 -      connector = kzalloc(sizeof *connector, GFP_KERNEL);
 -      if (!connector)
 -              return NULL;
 -
 -      if (intel_connector_init(connector) < 0) {
 -              kfree(connector);
 -              return NULL;
 -      }
 -
 -      return connector;
 -}
 -
 -/*
 - * Free the bits allocated by intel_connector_alloc.
 - * This should only be used after intel_connector_alloc has returned
 - * successfully, and before drm_connector_init returns successfully.
 - * Otherwise the destroy callbacks for the connector and the state should
 - * take care of proper cleanup/free
 - */
 -void intel_connector_free(struct intel_connector *connector)
 -{
 -      kfree(to_intel_digital_connector_state(connector->base.state));
 -      kfree(connector);
 -}
 -
 -/* Simple connector->get_hw_state implementation for encoders that support only
 - * one connector and no cloning and hence the encoder state determines the state
 - * of the connector. */
 -bool intel_connector_get_hw_state(struct intel_connector *connector)
 -{
 -      enum pipe pipe = 0;
 -      struct intel_encoder *encoder = connector->encoder;
 -
 -      return encoder->get_hw_state(encoder, &pipe);
 -}
 -
  static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
  {
        if (crtc_state->base.enable && crtc_state->has_pch_encoder)
@@@ -6518,9 -6604,6 +6518,9 @@@ retry
                               link_bw, &pipe_config->fdi_m_n, false);
  
        ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
 +      if (ret == -EDEADLK)
 +              return ret;
 +
        if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
                pipe_config->pipe_bpp -= 2*3;
                DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@@ -6677,9 -6760,7 +6677,9 @@@ static int intel_crtc_compute_config(st
                return -EINVAL;
        }
  
 -      if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
 +      if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
 +           pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
 +           pipe_config->base.ctm) {
                /*
                 * There is only one pipe CSC unit per pipe, and we need that
                 * for output conversion from RGB->YCBCR. So if CTM is already
@@@ -6754,7 -6835,7 +6754,7 @@@ static void compute_m_n(unsigned int m
  }
  
  void
 -intel_link_compute_m_n(int bits_per_pixel, int nlanes,
 +intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
                       int pixel_clock, int link_clock,
                       struct intel_link_m_n *m_n,
                       bool constant_n)
@@@ -6845,12 -6926,12 +6845,12 @@@ static void vlv_pllb_recal_opamp(struc
        vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  }
  
 -static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
 -                                       struct intel_link_m_n *m_n)
 +static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
 +                                       const struct intel_link_m_n *m_n)
  {
 -      struct drm_device *dev = crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      int pipe = crtc->pipe;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
  
        I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
        I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
        I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
  }
  
 -static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 -                                       struct intel_link_m_n *m_n,
 -                                       struct intel_link_m_n *m2_n2)
 +static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
 +                               enum transcoder transcoder)
  {
 -      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 -      int pipe = crtc->pipe;
 -      enum transcoder transcoder = crtc->config->cpu_transcoder;
 +      if (IS_HASWELL(dev_priv))
 +              return transcoder == TRANSCODER_EDP;
  
 -      if (INTEL_GEN(dev_priv) >= 5) {
 -              I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
 +      /*
 +       * Strictly speaking some registers are available before
 +       * gen7, but we only support DRRS on gen7+
 +       */
 +      return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
 +}
 +
 +static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
 +                                       const struct intel_link_m_n *m_n,
 +                                       const struct intel_link_m_n *m2_n2)
 +{
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
 +      enum transcoder transcoder = crtc_state->cpu_transcoder;
 +
 +      if (INTEL_GEN(dev_priv) >= 5) {
 +              I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
                I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
                I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
                I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
 -              /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
 -               * for gen < 8) and if DRRS is supported (to make sure the
 -               * registers are not unnecessarily accessed).
 +              /*
 +               *  M2_N2 registers are set only if DRRS is supported
 +               * (to make sure the registers are not unnecessarily accessed).
                 */
 -              if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
 -                  INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
 +              if (m2_n2 && crtc_state->has_drrs &&
 +                  transcoder_has_m2_n2(dev_priv, transcoder)) {
                        I915_WRITE(PIPE_DATA_M2(transcoder),
                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
                        I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
        }
  }
  
 -void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
 +void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
  {
 -      struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
 +      const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
  
        if (m_n == M1_N1) {
 -              dp_m_n = &crtc->config->dp_m_n;
 -              dp_m2_n2 = &crtc->config->dp_m2_n2;
 +              dp_m_n = &crtc_state->dp_m_n;
 +              dp_m2_n2 = &crtc_state->dp_m2_n2;
        } else if (m_n == M2_N2) {
  
                /*
                 * M2_N2 registers are not supported. Hence m2_n2 divider value
                 * needs to be programmed into M1_N1.
                 */
 -              dp_m_n = &crtc->config->dp_m2_n2;
 +              dp_m_n = &crtc_state->dp_m2_n2;
        } else {
                DRM_ERROR("Unsupported divider value\n");
                return;
        }
  
 -      if (crtc->config->has_pch_encoder)
 -              intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
 +      if (crtc_state->has_pch_encoder)
 +              intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
        else
 -              intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
 +              intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
  }
  
  static void vlv_compute_dpll(struct intel_crtc *crtc,
@@@ -7026,8 -7093,8 +7026,8 @@@ static void vlv_prepare_pll(struct inte
  
        /* Set HBR and RBR LPF coefficients */
        if (pipe_config->port_clock == 162000 ||
 -          intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
 -          intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
 +          intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
 +          intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x009f0003);
        else
  
        coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
 -      if (intel_crtc_has_dp_encoder(crtc->config))
 +      if (intel_crtc_has_dp_encoder(pipe_config))
                coreclk |= 0x01000000;
        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
  
@@@ -7333,13 -7400,12 +7333,13 @@@ static void i8xx_compute_dpll(struct in
        crtc_state->dpll_hw_state.dpll = dpll;
  }
  
 -static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 +static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 -      enum pipe pipe = intel_crtc->pipe;
 -      enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 -      const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
 +      enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 +      const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
        uint32_t crtc_vtotal, crtc_vblank_end;
        int vsyncshift = 0;
  
                crtc_vtotal -= 1;
                crtc_vblank_end -= 1;
  
 -              if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
 +              if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
                        vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
                else
                        vsyncshift = adjusted_mode->crtc_hsync_start -
  
  }
  
 -static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
 +static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_device *dev = intel_crtc->base.dev;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      enum pipe pipe = intel_crtc->pipe;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
  
        /* pipesrc controls the size that is scaled from, which should
         * always be the user's requested size.
         */
        I915_WRITE(PIPESRC(pipe),
 -                 ((intel_crtc->config->pipe_src_w - 1) << 16) |
 -                 (intel_crtc->config->pipe_src_h - 1));
 +                 ((crtc_state->pipe_src_w - 1) << 16) |
 +                 (crtc_state->pipe_src_h - 1));
  }
  
  static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@@ -7482,30 -7548,29 +7482,30 @@@ void intel_mode_from_pipe_config(struc
        drm_mode_set_name(mode);
  }
  
 -static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 +static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        uint32_t pipeconf;
  
        pipeconf = 0;
  
        /* we keep both pipes enabled on 830 */
        if (IS_I830(dev_priv))
 -              pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
 +              pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
  
 -      if (intel_crtc->config->double_wide)
 +      if (crtc_state->double_wide)
                pipeconf |= PIPECONF_DOUBLE_WIDE;
  
        /* only g4x and later have fancy bpc/dither controls */
        if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
            IS_CHERRYVIEW(dev_priv)) {
                /* Bspec claims that we can't use dithering for 30bpp pipes. */
 -              if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
 +              if (crtc_state->dither && crtc_state->pipe_bpp != 30)
                        pipeconf |= PIPECONF_DITHER_EN |
                                    PIPECONF_DITHER_TYPE_SP;
  
 -              switch (intel_crtc->config->pipe_bpp) {
 +              switch (crtc_state->pipe_bpp) {
                case 18:
                        pipeconf |= PIPECONF_6BPC;
                        break;
                }
        }
  
 -      if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 +      if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
                if (INTEL_GEN(dev_priv) < 4 ||
 -                  intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
 +                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
                        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
                else
                        pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
                pipeconf |= PIPECONF_PROGRESSIVE;
  
        if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 -           intel_crtc->config->limited_color_range)
 +           crtc_state->limited_color_range)
                pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
  
 -      I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
 -      POSTING_READ(PIPECONF(intel_crtc->pipe));
 +      I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
 +      POSTING_READ(PIPECONF(crtc->pipe));
  }
  
  static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@@ -7898,49 -7963,6 +7898,49 @@@ static void chv_crtc_clock_get(struct i
        pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
  }
  
 +static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
 +                                      struct intel_crtc_state *pipe_config)
 +{
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
 +
 +      pipe_config->lspcon_downsampling = false;
 +
 +      if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
 +              u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
 +
 +              if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
 +                      bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
 +                      bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
 +
 +                      if (ycbcr420_enabled) {
 +                              /* We support 4:2:0 in full blend mode only */
 +                              if (!blend)
 +                                      output = INTEL_OUTPUT_FORMAT_INVALID;
 +                              else if (!(IS_GEMINILAKE(dev_priv) ||
 +                                         INTEL_GEN(dev_priv) >= 10))
 +                                      output = INTEL_OUTPUT_FORMAT_INVALID;
 +                              else
 +                                      output = INTEL_OUTPUT_FORMAT_YCBCR420;
 +                      } else {
 +                              /*
 +                               * Currently there is no interface defined to
 +                               * check user preference between RGB/YCBCR444
 +                               * or YCBCR420. So the only possible case for
 +                               * YCBCR444 usage is driving YCBCR420 output
 +                               * with LSPCON, when pipe is configured for
 +                               * YCBCR444 output and LSPCON takes care of
 +                               * downsampling it.
 +                               */
 +                              pipe_config->lspcon_downsampling = true;
 +                              output = INTEL_OUTPUT_FORMAT_YCBCR444;
 +                      }
 +              }
 +      }
 +
 +      pipe_config->output_format = output;
 +}
 +
  static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                                 struct intel_crtc_state *pipe_config)
  {
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
  
 +      pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = NULL;
  
@@@ -8485,16 -8506,16 +8485,16 @@@ void intel_init_pch_refclk(struct drm_i
                lpt_init_pch_refclk(dev_priv);
  }
  
 -static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 +static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int pipe = intel_crtc->pipe;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
        uint32_t val;
  
        val = 0;
  
 -      switch (intel_crtc->config->pipe_bpp) {
 +      switch (crtc_state->pipe_bpp) {
        case 18:
                val |= PIPECONF_6BPC;
                break;
                BUG();
        }
  
 -      if (intel_crtc->config->dither)
 +      if (crtc_state->dither)
                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  
 -      if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 +      if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
                val |= PIPECONF_INTERLACED_ILK;
        else
                val |= PIPECONF_PROGRESSIVE;
  
 -      if (intel_crtc->config->limited_color_range)
 +      if (crtc_state->limited_color_range)
                val |= PIPECONF_COLOR_RANGE_SELECT;
  
        I915_WRITE(PIPECONF(pipe), val);
        POSTING_READ(PIPECONF(pipe));
  }
  
 -static void haswell_set_pipeconf(struct drm_crtc *crtc)
 +static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        u32 val = 0;
  
 -      if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
 +      if (IS_HASWELL(dev_priv) && crtc_state->dither)
                val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
  
 -      if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 +      if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
                val |= PIPECONF_INTERLACED_ILK;
        else
                val |= PIPECONF_PROGRESSIVE;
        POSTING_READ(PIPECONF(cpu_transcoder));
  }
  
 -static void haswell_set_pipemisc(struct drm_crtc *crtc)
 +static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
  {
 -      struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      struct intel_crtc_state *config = intel_crtc->config;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
  
        if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
                u32 val = 0;
  
 -              switch (intel_crtc->config->pipe_bpp) {
 +              switch (crtc_state->pipe_bpp) {
                case 18:
                        val |= PIPEMISC_DITHER_6_BPC;
                        break;
                        BUG();
                }
  
 -              if (intel_crtc->config->dither)
 +              if (crtc_state->dither)
                        val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
  
 -              if (config->ycbcr420) {
 -                      val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
 -                              PIPEMISC_YUV420_ENABLE |
 +              if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
 +                  crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
 +                      val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
 +
 +              if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
 +                      val |= PIPEMISC_YUV420_ENABLE |
                                PIPEMISC_YUV420_MODE_FULL_BLEND;
 -              }
  
                I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
        }
@@@ -8792,8 -8812,12 +8792,8 @@@ static void intel_cpu_transcoder_get_m_
                m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
                m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
                            & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 -              /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
 -               * gen < 8) and if DRRS is supported (to make sure the
 -               * registers are not unnecessarily read).
 -               */
 -              if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
 -                      crtc->config->has_drrs) {
 +
 +              if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
                        m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
                        m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
                        m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@@ -8969,7 -8993,7 +8969,7 @@@ skylake_get_initial_plane_config(struc
        fb->width = ((val >> 0) & 0x1fff) + 1;
  
        val = I915_READ(PLANE_STRIDE(pipe, plane_id));
 -      stride_mult = intel_fb_stride_alignment(fb, 0);
 +      stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
        fb->pitches[0] = (val & 0x3ff) * stride_mult;
  
        aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@@ -9025,7 -9049,6 +9025,7 @@@ static bool ironlake_get_pipe_config(st
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
  
 +      pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = NULL;
  
@@@ -9333,12 -9356,10 +9333,12 @@@ void hsw_disable_pc8(struct drm_i915_pr
  static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
                                      struct intel_crtc_state *crtc_state)
  {
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_atomic_state *state =
                to_intel_atomic_state(crtc_state->base.state);
  
 -      if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
 +      if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
 +          IS_ICELAKE(dev_priv)) {
                struct intel_encoder *encoder =
                        intel_get_crtc_new_encoder(state, crtc_state);
  
@@@ -9376,17 -9397,30 +9376,17 @@@ static void icelake_get_ddi_pll(struct 
        u32 temp;
  
        /* TODO: TBT pll not implemented. */
 -      switch (port) {
 -      case PORT_A:
 -      case PORT_B:
 +      if (intel_port_is_combophy(dev_priv, port)) {
                temp = I915_READ(DPCLKA_CFGCR0_ICL) &
                       DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
                id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
  
 -              if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
 +              if (WARN_ON(!intel_dpll_is_combophy(id)))
                        return;
 -              break;
 -      case PORT_C:
 -              id = DPLL_ID_ICL_MGPLL1;
 -              break;
 -      case PORT_D:
 -              id = DPLL_ID_ICL_MGPLL2;
 -              break;
 -      case PORT_E:
 -              id = DPLL_ID_ICL_MGPLL3;
 -              break;
 -      case PORT_F:
 -              id = DPLL_ID_ICL_MGPLL4;
 -              break;
 -      default:
 -              MISSING_CASE(port);
 +      } else if (intel_port_is_tc(dev_priv, port)) {
 +              id = icl_port_to_mg_pll_id(port);
 +      } else {
 +              WARN(1, "Invalid port %x\n", port);
                return;
        }
  
@@@ -9476,18 -9510,11 +9476,18 @@@ static bool hsw_get_transcoder_state(st
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
 +      unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
 +      unsigned long enabled_panel_transcoders = 0;
 +      enum transcoder panel_transcoder;
        u32 tmp;
  
 +      if (IS_ICELAKE(dev_priv))
 +              panel_transcoder_mask |=
 +                      BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
 +
        /*
         * The pipe->transcoder mapping is fixed with the exception of the eDP
 -       * transcoder handled below.
 +       * and DSI transcoders handled below.
         */
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
  
         * XXX: Do intel_display_power_get_if_enabled before reading this (for
         * consistency and less surprising code; it's in always on power).
         */
 -      tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 -      if (tmp & TRANS_DDI_FUNC_ENABLE) {
 -              enum pipe trans_edp_pipe;
 +      for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
 +              enum pipe trans_pipe;
 +
 +              tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
 +              if (!(tmp & TRANS_DDI_FUNC_ENABLE))
 +                      continue;
 +
 +              /*
 +               * Log all enabled ones, only use the first one.
 +               *
 +               * FIXME: This won't work for two separate DSI displays.
 +               */
 +              enabled_panel_transcoders |= BIT(panel_transcoder);
 +              if (enabled_panel_transcoders != BIT(panel_transcoder))
 +                      continue;
 +
                switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
                default:
 -                      WARN(1, "unknown pipe linked to edp transcoder\n");
 +                      WARN(1, "unknown pipe linked to transcoder %s\n",
 +                           transcoder_name(panel_transcoder));
                        /* fall through */
                case TRANS_DDI_EDP_INPUT_A_ONOFF:
                case TRANS_DDI_EDP_INPUT_A_ON:
 -                      trans_edp_pipe = PIPE_A;
 +                      trans_pipe = PIPE_A;
                        break;
                case TRANS_DDI_EDP_INPUT_B_ONOFF:
 -                      trans_edp_pipe = PIPE_B;
 +                      trans_pipe = PIPE_B;
                        break;
                case TRANS_DDI_EDP_INPUT_C_ONOFF:
 -                      trans_edp_pipe = PIPE_C;
 +                      trans_pipe = PIPE_C;
                        break;
                }
  
 -              if (trans_edp_pipe == crtc->pipe)
 -                      pipe_config->cpu_transcoder = TRANSCODER_EDP;
 +              if (trans_pipe == crtc->pipe)
 +                      pipe_config->cpu_transcoder = panel_transcoder;
        }
  
 +      /*
 +       * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
 +       */
 +      WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
 +              enabled_panel_transcoders != BIT(TRANSCODER_EDP));
 +
        power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
@@@ -9670,18 -9677,33 +9670,18 @@@ static bool haswell_get_pipe_config(str
        if (!active)
                goto out;
  
 -      if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
 +      if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
 +          IS_ICELAKE(dev_priv)) {
                haswell_get_ddi_port_state(crtc, pipe_config);
                intel_get_pipe_timings(crtc, pipe_config);
        }
  
        intel_get_pipe_src_size(crtc, pipe_config);
 +      intel_get_crtc_ycbcr_config(crtc, pipe_config);
  
        pipe_config->gamma_mode =
                I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
  
 -      if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
 -              u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
 -              bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
 -
 -              if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
 -                      bool blend_mode_420 = tmp &
 -                                            PIPEMISC_YUV420_MODE_FULL_BLEND;
 -
 -                      pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
 -                      if (pipe_config->ycbcr420 != clrspace_yuv ||
 -                          pipe_config->ycbcr420 != blend_mode_420)
 -                              DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
 -              } else if (clrspace_yuv) {
 -                      DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
 -              }
 -      }
 -
        power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
        if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
                power_domain_mask |= BIT_ULL(power_domain);
@@@ -9727,7 -9749,7 +9727,7 @@@ static u32 intel_cursor_base(const stru
        const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        u32 base;
  
 -      if (INTEL_INFO(dev_priv)->cursor_needs_physical)
 +      if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
                base = obj->phys_handle->busaddr;
        else
                base = intel_plane_ggtt_offset(plane_state);
@@@ -9950,13 -9972,15 +9950,13 @@@ static void i845_update_cursor(struct i
                I915_WRITE_FW(CURPOS(PIPE_A), pos);
        }
  
 -      POSTING_READ_FW(CURCNTR(PIPE_A));
 -
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
  static void i845_disable_cursor(struct intel_plane *plane,
 -                              struct intel_crtc *crtc)
 +                              const struct intel_crtc_state *crtc_state)
  {
 -      i845_update_cursor(plane, NULL, NULL);
 +      i845_update_cursor(plane, crtc_state, NULL);
  }
  
  static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@@ -10147,8 -10171,8 +10147,8 @@@ static void i9xx_update_cursor(struct i
         * On some platforms writing CURCNTR first will also
         * cause CURPOS to be armed by the CURBASE write.
         * Without the CURCNTR write the CURPOS write would
 -       * arm itself. Thus we always start the full update
 -       * with a CURCNTR write.
 +       * arm itself. Thus we always update CURCNTR before
 +       * CURPOS.
         *
         * On other platforms CURPOS always requires the
         * CURBASE write to arm the update. Additonally
         * cursor that doesn't appear to move, or even change
         * shape. Thus we always write CURBASE.
         *
 -       * CURCNTR and CUR_FBC_CTL are always
 -       * armed by the CURBASE write only.
 +       * The other registers are armed by by the CURBASE write
 +       * except when the plane is getting enabled at which time
 +       * the CURCNTR write arms the update.
         */
 +
 +      if (INTEL_GEN(dev_priv) >= 9)
 +              skl_write_cursor_wm(plane, crtc_state);
 +
        if (plane->cursor.base != base ||
            plane->cursor.size != fbc_ctl ||
            plane->cursor.cntl != cntl) {
 -              I915_WRITE_FW(CURCNTR(pipe), cntl);
                if (HAS_CUR_FBC(dev_priv))
                        I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
 +              I915_WRITE_FW(CURCNTR(pipe), cntl);
                I915_WRITE_FW(CURPOS(pipe), pos);
                I915_WRITE_FW(CURBASE(pipe), base);
  
                I915_WRITE_FW(CURBASE(pipe), base);
        }
  
 -      POSTING_READ_FW(CURBASE(pipe));
 -
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
  static void i9xx_disable_cursor(struct intel_plane *plane,
 -                              struct intel_crtc *crtc)
 +                              const struct intel_crtc_state *crtc_state)
  {
 -      i9xx_update_cursor(plane, NULL, NULL);
 +      i9xx_update_cursor(plane, crtc_state, NULL);
  }
  
  static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@@ -10787,40 -10808,14 +10787,40 @@@ int intel_plane_atomic_calc_changes(con
                pipe_config->fb_bits |= plane->frontbuffer_bit;
  
        /*
 +       * ILK/SNB DVSACNTR/Sprite Enable
 +       * IVB SPR_CTL/Sprite Enable
 +       * "When in Self Refresh Big FIFO mode, a write to enable the
 +       *  plane will be internally buffered and delayed while Big FIFO
 +       *  mode is exiting."
 +       *
 +       * Which means that enabling the sprite can take an extra frame
 +       * when we start in big FIFO mode (LP1+). Thus we need to drop
 +       * down to LP0 and wait for vblank in order to make sure the
 +       * sprite gets enabled on the next vblank after the register write.
 +       * Doing otherwise would risk enabling the sprite one frame after
 +       * we've already signalled flip completion. We can resume LP1+
 +       * once the sprite has been enabled.
 +       *
 +       *
         * WaCxSRDisabledForSpriteScaling:ivb
 +       * IVB SPR_SCALE/Scaling Enable
 +       * "Low Power watermarks must be disabled for at least one
 +       *  frame before enabling sprite scaling, and kept disabled
 +       *  until sprite scaling is disabled."
         *
 -       * cstate->update_wm was already set above, so this flag will
 -       * take effect when we commit and program watermarks.
 +       * ILK/SNB DVSASCALE/Scaling Enable
 +       * "When in Self Refresh Big FIFO mode, scaling enable will be
 +       *  masked off while Big FIFO mode is exiting."
 +       *
 +       * Despite the w/a only being listed for IVB we assume that
 +       * the ILK/SNB note has similar ramifications, hence we apply
 +       * the w/a on all three platforms.
         */
 -      if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
 -          needs_scaling(to_intel_plane_state(plane_state)) &&
 -          !needs_scaling(old_plane_state))
 +      if (plane->id == PLANE_SPRITE0 &&
 +          (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
 +           IS_IVYBRIDGE(dev_priv)) &&
 +          (turn_on || (!needs_scaling(old_plane_state) &&
 +                       needs_scaling(to_intel_plane_state(plane_state)))))
                pipe_config->disable_lp_wm = true;
  
        return 0;
@@@ -10856,101 -10851,6 +10856,101 @@@ static bool check_single_encoder_clonin
        return true;
  }
  
 +static int icl_add_linked_planes(struct intel_atomic_state *state)
 +{
 +      struct intel_plane *plane, *linked;
 +      struct intel_plane_state *plane_state, *linked_plane_state;
 +      int i;
 +
 +      for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
 +              linked = plane_state->linked_plane;
 +
 +              if (!linked)
 +                      continue;
 +
 +              linked_plane_state = intel_atomic_get_plane_state(state, linked);
 +              if (IS_ERR(linked_plane_state))
 +                      return PTR_ERR(linked_plane_state);
 +
 +              WARN_ON(linked_plane_state->linked_plane != plane);
 +              WARN_ON(linked_plane_state->slave == plane_state->slave);
 +      }
 +
 +      return 0;
 +}
 +
 +static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 +{
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 +      struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 +      struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
 +      struct intel_plane *plane, *linked;
 +      struct intel_plane_state *plane_state;
 +      int i;
 +
 +      if (INTEL_GEN(dev_priv) < 11)
 +              return 0;
 +
 +      /*
 +       * Destroy all old plane links and make the slave plane invisible
 +       * in the crtc_state->active_planes mask.
 +       */
 +      for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
 +              if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
 +                      continue;
 +
 +              plane_state->linked_plane = NULL;
 +              if (plane_state->slave && !plane_state->base.visible) {
 +                      crtc_state->active_planes &= ~BIT(plane->id);
 +                      crtc_state->update_planes |= BIT(plane->id);
 +              }
 +
 +              plane_state->slave = false;
 +      }
 +
 +      if (!crtc_state->nv12_planes)
 +              return 0;
 +
 +      for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
 +              struct intel_plane_state *linked_state = NULL;
 +
 +              if (plane->pipe != crtc->pipe ||
 +                  !(crtc_state->nv12_planes & BIT(plane->id)))
 +                      continue;
 +
 +              for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
 +                      if (!icl_is_nv12_y_plane(linked->id))
 +                              continue;
 +
 +                      if (crtc_state->active_planes & BIT(linked->id))
 +                              continue;
 +
 +                      linked_state = intel_atomic_get_plane_state(state, linked);
 +                      if (IS_ERR(linked_state))
 +                              return PTR_ERR(linked_state);
 +
 +                      break;
 +              }
 +
 +              if (!linked_state) {
 +                      DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
 +                                    hweight8(crtc_state->nv12_planes));
 +
 +                      return -EINVAL;
 +              }
 +
 +              plane_state->linked_plane = linked;
 +
 +              linked_state->slave = true;
 +              linked_state->linked_plane = plane;
 +              crtc_state->active_planes |= BIT(linked->id);
 +              crtc_state->update_planes |= BIT(linked->id);
 +              DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
 +      }
 +
 +      return 0;
 +}
 +
  static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                                   struct drm_crtc_state *crtc_state)
  {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc_state);
 -      struct drm_atomic_state *state = crtc_state->state;
        int ret;
        bool mode_changed = needs_modeset(crtc_state);
  
                }
        }
  
 -      if (dev_priv->display.compute_intermediate_wm &&
 -          !to_intel_atomic_state(state)->skip_intermediate_wm) {
 +      if (dev_priv->display.compute_intermediate_wm) {
                if (WARN_ON(!dev_priv->display.compute_pipe_wm))
                        return 0;
  
                        DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
                        return ret;
                }
 -      } else if (dev_priv->display.compute_intermediate_wm) {
 -              if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
 -                      pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
        }
  
        if (INTEL_GEN(dev_priv) >= 9) {
                if (mode_changed)
                        ret = skl_update_scaler_crtc(pipe_config);
  
 +              if (!ret)
 +                      ret = icl_check_nv12_planes(pipe_config);
                if (!ret)
                        ret = skl_check_pipe_max_pixel_rate(intel_crtc,
                                                            pipe_config);
  }
  
  static const struct drm_crtc_helper_funcs intel_helper_funcs = {
 -      .atomic_begin = intel_begin_crtc_commit,
 -      .atomic_flush = intel_finish_crtc_commit,
        .atomic_check = intel_crtc_atomic_check,
  };
  
@@@ -11062,42 -10967,30 +11062,42 @@@ static void intel_modeset_update_connec
        drm_connector_list_iter_end(&conn_iter);
  }
  
 -static void
 -connected_sink_compute_bpp(struct intel_connector *connector,
 -                         struct intel_crtc_state *pipe_config)
 +static int
 +compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
 +                    struct intel_crtc_state *pipe_config)
  {
 -      const struct drm_display_info *info = &connector->base.display_info;
 -      int bpp = pipe_config->pipe_bpp;
 +      struct drm_connector *connector = conn_state->connector;
 +      const struct drm_display_info *info = &connector->display_info;
 +      int bpp;
  
 -      DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
 -                    connector->base.base.id,
 -                    connector->base.name);
 -
 -      /* Don't use an invalid EDID bpc value */
 -      if (info->bpc != 0 && info->bpc * 3 < bpp) {
 -              DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
 -                            bpp, info->bpc * 3);
 -              pipe_config->pipe_bpp = info->bpc * 3;
 +      switch (conn_state->max_bpc) {
 +      case 6 ... 7:
 +              bpp = 6 * 3;
 +              break;
 +      case 8 ... 9:
 +              bpp = 8 * 3;
 +              break;
 +      case 10 ... 11:
 +              bpp = 10 * 3;
 +              break;
 +      case 12:
 +              bpp = 12 * 3;
 +              break;
 +      default:
 +              return -EINVAL;
        }
  
 -      /* Clamp bpp to 8 on screens without EDID 1.4 */
 -      if (info->bpc == 0 && bpp > 24) {
 -              DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
 -                            bpp);
 -              pipe_config->pipe_bpp = 24;
 +      if (bpp < pipe_config->pipe_bpp) {
 +              DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
 +                            "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
 +                            connector->base.id, connector->name,
 +                            bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
 +                            pipe_config->pipe_bpp);
 +
 +              pipe_config->pipe_bpp = bpp;
        }
 +
 +      return 0;
  }
  
  static int
@@@ -11105,7 -10998,7 +11105,7 @@@ compute_baseline_pipe_bpp(struct intel_
                          struct intel_crtc_state *pipe_config)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 -      struct drm_atomic_state *state;
 +      struct drm_atomic_state *state = pipe_config->base.state;
        struct drm_connector *connector;
        struct drm_connector_state *connector_state;
        int bpp, i;
        else
                bpp = 8*3;
  
 -
        pipe_config->pipe_bpp = bpp;
  
 -      state = pipe_config->base.state;
 -
 -      /* Clamp display bpp to EDID value */
 +      /* Clamp display bpp to connector max bpp */
        for_each_new_connector_in_state(state, connector, connector_state, i) {
 +              int ret;
 +
                if (connector_state->crtc != &crtc->base)
                        continue;
  
 -              connected_sink_compute_bpp(to_intel_connector(connector),
 -                                         pipe_config);
 +              ret = compute_sink_pipe_bpp(connector_state, pipe_config);
 +              if (ret)
 +                      return ret;
        }
  
 -      return bpp;
 +      return 0;
  }
  
  static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
@@@ -11202,20 -11095,6 +11202,20 @@@ static void snprintf_output_types(char 
        WARN_ON_ONCE(output_types != 0);
  }
  
 +static const char * const output_format_str[] = {
 +      [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
 +      [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
 +      [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
 +      [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
 +};
 +
 +static const char *output_formats(enum intel_output_format format)
 +{
 +      if (format >= ARRAY_SIZE(output_format_str))
 +              format = INTEL_OUTPUT_FORMAT_INVALID;
 +      return output_format_str[format];
 +}
 +
  static void intel_dump_pipe_config(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config,
                                   const char *context)
        DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
                      buf, pipe_config->output_types);
  
 +      DRM_DEBUG_KMS("output format: %s\n",
 +                    output_formats(pipe_config->output_format));
 +
        DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
                      transcoder_name(pipe_config->cpu_transcoder),
                      pipe_config->pipe_bpp, pipe_config->dither);
                                      pipe_config->fdi_lanes,
                                      &pipe_config->fdi_m_n);
  
 -      if (pipe_config->ycbcr420)
 -              DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
 -
        if (intel_crtc_has_dp_encoder(pipe_config)) {
                intel_dump_m_n_config(pipe_config, "dp m_n",
                                pipe_config->lane_count, &pipe_config->dp_m_n);
@@@ -11435,7 -11314,7 +11435,7 @@@ intel_modeset_pipe_config(struct drm_cr
        struct intel_encoder *encoder;
        struct drm_connector *connector;
        struct drm_connector_state *connector_state;
 -      int base_bpp, ret = -EINVAL;
 +      int base_bpp, ret;
        int i;
        bool retry = true;
  
              (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
  
 -      base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
 -                                           pipe_config);
 -      if (base_bpp < 0)
 -              goto fail;
 +      ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
 +                                      pipe_config);
 +      if (ret)
 +              return ret;
 +
 +      base_bpp = pipe_config->pipe_bpp;
  
        /*
         * Determine the real pipe dimensions. Note that stereo modes can
  
                if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
                        DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
 -                      goto fail;
 +                      return -EINVAL;
                }
  
                /*
@@@ -11520,7 -11397,7 +11520,7 @@@ encoder_retry
  
                if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
                        DRM_DEBUG_KMS("Encoder config failure\n");
 -                      goto fail;
 +                      return -EINVAL;
                }
        }
  
                        * pipe_config->pixel_multiplier;
  
        ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
 +      if (ret == -EDEADLK)
 +              return ret;
        if (ret < 0) {
                DRM_DEBUG_KMS("CRTC fixup failed\n");
 -              goto fail;
 +              return ret;
        }
  
        if (ret == RETRY) {
 -              if (WARN(!retry, "loop in pipe configuration computation\n")) {
 -                      ret = -EINVAL;
 -                      goto fail;
 -              }
 +              if (WARN(!retry, "loop in pipe configuration computation\n"))
 +                      return -EINVAL;
  
                DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
                retry = false;
        DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  
 -fail:
 -      return ret;
 +      return 0;
  }
  
  static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@@ -11825,7 -11703,6 +11825,7 @@@ intel_pipe_config_compare(struct drm_i9
        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
  
        PIPE_CONF_CHECK_I(pixel_multiplier);
 +      PIPE_CONF_CHECK_I(output_format);
        PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
        if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
            IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
        PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
        PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
        PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
 -      PIPE_CONF_CHECK_BOOL(ycbcr420);
  
        PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
  
@@@ -11955,8 -11833,6 +11955,8 @@@ static void verify_wm_state(struct drm_
        struct skl_pipe_wm hw_wm, *sw_wm;
        struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
        struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
 +      struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
 +      struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        const enum pipe pipe = intel_crtc->pipe;
        int plane, level, max_level = ilk_wm_max_level(dev_priv);
        skl_pipe_wm_get_hw_state(crtc, &hw_wm);
        sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
  
 +      skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
 +
        skl_ddb_get_hw_state(dev_priv, &hw_ddb);
        sw_ddb = &dev_priv->wm.skl_hw.ddb;
  
                }
  
                /* DDB */
 -              hw_ddb_entry = &hw_ddb.plane[pipe][plane];
 -              sw_ddb_entry = &sw_ddb->plane[pipe][plane];
 +              hw_ddb_entry = &hw_ddb_y[plane];
 +              sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
  
                if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
                        DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
                }
  
                /* DDB */
 -              hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
 -              sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
 +              hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
 +              sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
  
                if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
                        DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@@ -12346,9 -12220,8 +12346,9 @@@ intel_modeset_verify_disabled(struct dr
        verify_disabled_dpll_state(dev);
  }
  
 -static void update_scanline_offset(struct intel_crtc *crtc)
 +static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
  {
 +      struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  
        /*
         * answer that's slightly in the future.
         */
        if (IS_GEN2(dev_priv)) {
 -              const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
 +              const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
                int vtotal;
  
                vtotal = adjusted_mode->crtc_vtotal;
  
                crtc->scanline_offset = vtotal - 1;
        } else if (HAS_DDI(dev_priv) &&
 -                 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
 +                 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
                crtc->scanline_offset = 2;
        } else
                crtc->scanline_offset = 1;
@@@ -12671,8 -12544,6 +12671,8 @@@ static int intel_atomic_check(struct dr
                }
  
                ret = intel_modeset_pipe_config(crtc, pipe_config);
 +              if (ret == -EDEADLK)
 +                      return ret;
                if (ret) {
                        intel_dump_pipe_config(to_intel_crtc(crtc),
                                               pipe_config, "[failed]");
                intel_state->cdclk.logical = dev_priv->cdclk.logical;
        }
  
 +      ret = icl_add_linked_planes(intel_state);
 +      if (ret)
 +              return ret;
 +
        ret = drm_atomic_helper_check_planes(dev, state);
        if (ret)
                return ret;
@@@ -12747,7 -12614,7 +12747,7 @@@ static void intel_update_crtc(struct dr
                                                 to_intel_plane(crtc->primary));
  
        if (modeset) {
 -              update_scanline_offset(intel_crtc);
 +              update_scanline_offset(pipe_config);
                dev_priv->display.crtc_enable(pipe_config, state);
  
                /* vblanks work again, re-enable pipe CRC. */
        if (new_plane_state)
                intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
  
 -      drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
 +      intel_begin_crtc_commit(crtc, old_crtc_state);
 +
 +      if (INTEL_GEN(dev_priv) >= 9)
 +              skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
 +      else
 +              i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
 +
 +      intel_finish_crtc_commit(crtc, old_crtc_state);
  }
  
  static void intel_update_crtcs(struct drm_atomic_state *state)
@@@ -12799,12 -12659,13 +12799,12 @@@ static void skl_update_crtcs(struct drm
        int i;
        u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
        u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
 -
 -      const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
 +      struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
  
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
                /* ignore allocations for crtc's that have been turned off. */
                if (new_crtc_state->active)
 -                      entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
 +                      entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
  
        /* If 2nd DBuf slice required, enable it here */
        if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
                        if (updated & cmask || !cstate->base.active)
                                continue;
  
 -                      if (skl_ddb_allocation_overlaps(dev_priv,
 +                      if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
                                                        entries,
 -                                                      &cstate->wm.skl.ddb,
 -                                                      i))
 +                                                      INTEL_INFO(dev_priv)->num_pipes, i))
                                continue;
  
                        updated |= cmask;
 -                      entries[i] = &cstate->wm.skl.ddb;
 +                      entries[i] = cstate->wm.skl.ddb;
  
                        /*
                         * If this is an already active pipe, it's DDB changed,
@@@ -12926,9 -12788,8 +12926,9 @@@ static void intel_atomic_commit_tail(st
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 +      struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
        struct drm_crtc *crtc;
 -      struct intel_crtc_state *intel_cstate;
 +      struct intel_crtc *intel_crtc;
        u64 put_domains[I915_MAX_PIPES] = {};
        int i;
  
                intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
  
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 -              struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +              old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
 +              new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
 +              intel_crtc = to_intel_crtc(crtc);
  
                if (needs_modeset(new_crtc_state) ||
                    to_intel_crtc_state(new_crtc_state)->update_pipe) {
  
 -                      put_domains[to_intel_crtc(crtc)->pipe] =
 +                      put_domains[intel_crtc->pipe] =
                                modeset_get_crtc_power_domains(crtc,
 -                                      to_intel_crtc_state(new_crtc_state));
 +                                      new_intel_crtc_state);
                }
  
                if (!needs_modeset(new_crtc_state))
                        continue;
  
 -              intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
 -                                     to_intel_crtc_state(new_crtc_state));
 +              intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
  
                if (old_crtc_state->active) {
 -                      intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
 +                      intel_crtc_disable_planes(intel_state, intel_crtc);
  
                        /*
                         * We need to disable pipe CRC before disabling the pipe,
                         */
                        intel_crtc_disable_pipe_crc(intel_crtc);
  
 -                      dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
 +                      dev_priv->display.crtc_disable(old_intel_crtc_state, state);
                        intel_crtc->active = false;
                        intel_fbc_disable(intel_crtc);
 -                      intel_disable_shared_dpll(intel_crtc);
 +                      intel_disable_shared_dpll(old_intel_crtc_state);
  
                        /*
                         * Underruns don't always raise
                            !HAS_GMCH_DISPLAY(dev_priv) &&
                            dev_priv->display.initial_watermarks)
                                dev_priv->display.initial_watermarks(intel_state,
 -                                                                   to_intel_crtc_state(new_crtc_state));
 +                                                                   new_intel_crtc_state);
                }
        }
  
         * TODO: Move this (and other cleanup) to an async worker eventually.
         */
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
 -              intel_cstate = to_intel_crtc_state(new_crtc_state);
 +              new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
  
                if (dev_priv->display.optimize_watermarks)
                        dev_priv->display.optimize_watermarks(intel_state,
 -                                                            intel_cstate);
 +                                                            new_intel_crtc_state);
        }
  
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@@ -13313,7 -13173,7 +13313,7 @@@ static int intel_plane_pin_fb(struct in
        struct i915_vma *vma;
  
        if (plane->id == PLANE_CURSOR &&
 -          INTEL_INFO(dev_priv)->cursor_needs_physical) {
 +          INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
                struct drm_i915_gem_object *obj = intel_fb_obj(fb);
                const int align = intel_cursor_alignment(dev_priv);
                int err;
@@@ -13429,12 -13289,13 +13429,12 @@@ intel_prepare_plane_fb(struct drm_plan
  
        ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
  
 -      fb_obj_bump_render_priority(obj);
 -
        mutex_unlock(&dev_priv->drm.struct_mutex);
        i915_gem_object_unpin_pages(obj);
        if (ret)
                return ret;
  
 +      fb_obj_bump_render_priority(obj);
        intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
  
        if (!new_state->fence) { /* implicit fencing */
@@@ -13565,7 -13426,7 +13565,7 @@@ static void intel_begin_crtc_commit(str
        if (intel_cstate->update_pipe)
                intel_update_pipe_config(old_intel_cstate, intel_cstate);
        else if (INTEL_GEN(dev_priv) >= 9)
 -              skl_detach_scalers(intel_crtc);
 +              skl_detach_scalers(intel_cstate);
  
  out:
        if (dev_priv->display.atomic_update_watermarks)
@@@ -13667,14 -13528,75 +13667,14 @@@ static bool i965_plane_format_mod_suppo
        }
  }
  
 -static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
 -                                         u32 format, u64 modifier)
 +static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
 +                                            u32 format, u64 modifier)
  {
 -      struct intel_plane *plane = to_intel_plane(_plane);
 -
 -      switch (modifier) {
 -      case DRM_FORMAT_MOD_LINEAR:
 -      case I915_FORMAT_MOD_X_TILED:
 -      case I915_FORMAT_MOD_Y_TILED:
 -      case I915_FORMAT_MOD_Yf_TILED:
 -              break;
 -      case I915_FORMAT_MOD_Y_TILED_CCS:
 -      case I915_FORMAT_MOD_Yf_TILED_CCS:
 -              if (!plane->has_ccs)
 -                      return false;
 -              break;
 -      default:
 -              return false;
 -      }
 +      return modifier == DRM_FORMAT_MOD_LINEAR &&
 +              format == DRM_FORMAT_ARGB8888;
 +}
  
 -      switch (format) {
 -      case DRM_FORMAT_XRGB8888:
 -      case DRM_FORMAT_XBGR8888:
 -      case DRM_FORMAT_ARGB8888:
 -      case DRM_FORMAT_ABGR8888:
 -              if (is_ccs_modifier(modifier))
 -                      return true;
 -              /* fall through */
 -      case DRM_FORMAT_RGB565:
 -      case DRM_FORMAT_XRGB2101010:
 -      case DRM_FORMAT_XBGR2101010:
 -      case DRM_FORMAT_YUYV:
 -      case DRM_FORMAT_YVYU:
 -      case DRM_FORMAT_UYVY:
 -      case DRM_FORMAT_VYUY:
 -      case DRM_FORMAT_NV12:
 -              if (modifier == I915_FORMAT_MOD_Yf_TILED)
 -                      return true;
 -              /* fall through */
 -      case DRM_FORMAT_C8:
 -              if (modifier == DRM_FORMAT_MOD_LINEAR ||
 -                  modifier == I915_FORMAT_MOD_X_TILED ||
 -                  modifier == I915_FORMAT_MOD_Y_TILED)
 -                      return true;
 -              /* fall through */
 -      default:
 -              return false;
 -      }
 -}
 -
 -static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
 -                                            u32 format, u64 modifier)
 -{
 -      return modifier == DRM_FORMAT_MOD_LINEAR &&
 -              format == DRM_FORMAT_ARGB8888;
 -}
 -
 -static struct drm_plane_funcs skl_plane_funcs = {
 -      .update_plane = drm_atomic_helper_update_plane,
 -      .disable_plane = drm_atomic_helper_disable_plane,
 -      .destroy = intel_plane_destroy,
 -      .atomic_get_property = intel_plane_atomic_get_property,
 -      .atomic_set_property = intel_plane_atomic_set_property,
 -      .atomic_duplicate_state = intel_plane_duplicate_state,
 -      .atomic_destroy_state = intel_plane_destroy_state,
 -      .format_mod_supported = skl_plane_format_mod_supported,
 -};
 -
 -static struct drm_plane_funcs i965_plane_funcs = {
 +static const struct drm_plane_funcs i965_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
        .format_mod_supported = i965_plane_format_mod_supported,
  };
  
 -static struct drm_plane_funcs i8xx_plane_funcs = {
 +static const struct drm_plane_funcs i8xx_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
@@@ -13711,16 -13633,14 +13711,16 @@@ intel_legacy_cursor_update(struct drm_p
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *old_fb;
 -      struct drm_crtc_state *crtc_state = crtc->state;
 +      struct intel_crtc_state *crtc_state =
 +              to_intel_crtc_state(crtc->state);
 +      struct intel_crtc_state *new_crtc_state;
  
        /*
         * When crtc is inactive or there is a modeset pending,
         * wait for it to complete in the slowpath
         */
 -      if (!crtc_state->active || needs_modeset(crtc_state) ||
 -          to_intel_crtc_state(crtc_state)->update_pipe)
 +      if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
 +          crtc_state->update_pipe)
                goto slow;
  
        old_plane_state = plane->state;
        if (!new_plane_state)
                return -ENOMEM;
  
 +      new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
 +      if (!new_crtc_state) {
 +              ret = -ENOMEM;
 +              goto out_free;
 +      }
 +
        drm_atomic_set_fb_for_plane(new_plane_state, fb);
  
        new_plane_state->src_x = src_x;
        new_plane_state->crtc_w = crtc_w;
        new_plane_state->crtc_h = crtc_h;
  
 -      ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
 -                                                to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
 -                                                to_intel_plane_state(plane->state),
 +      ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
 +                                                to_intel_plane_state(old_plane_state),
                                                  to_intel_plane_state(new_plane_state));
        if (ret)
                goto out_free;
        /* Swap plane state */
        plane->state = new_plane_state;
  
 +      /*
 +       * We cannot swap crtc_state as it may be in use by an atomic commit or
 +       * page flip that's running simultaneously. If we swap crtc_state and
 +       * destroy the old state, we will cause a use-after-free there.
 +       *
 +       * Only update active_planes, which is needed for our internal
 +       * bookkeeping. Either value will do the right thing when updating
 +       * planes atomically. If the cursor was part of the atomic update then
 +       * we would have taken the slowpath.
 +       */
 +      crtc_state->active_planes = new_crtc_state->active_planes;
 +
        if (plane->state->visible) {
                trace_intel_update_plane(plane, to_intel_crtc(crtc));
 -              intel_plane->update_plane(intel_plane,
 -                                        to_intel_crtc_state(crtc->state),
 +              intel_plane->update_plane(intel_plane, crtc_state,
                                          to_intel_plane_state(plane->state));
        } else {
                trace_intel_disable_plane(plane, to_intel_crtc(crtc));
 -              intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
 +              intel_plane->disable_plane(intel_plane, crtc_state);
        }
  
        intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
  out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
  out_free:
 +      if (new_crtc_state)
 +              intel_crtc_destroy_state(crtc, &new_crtc_state->base);
        if (ret)
                intel_plane_destroy_state(plane, new_plane_state);
        else
@@@ -13858,90 -13760,176 +13858,90 @@@ static bool i9xx_plane_has_fbc(struct d
                return i9xx_plane == PLANE_A;
  }
  
 -static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
 -                            enum pipe pipe, enum plane_id plane_id)
 -{
 -      if (!HAS_FBC(dev_priv))
 -              return false;
 -
 -      return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
 -}
 -
 -bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
 -                        enum pipe pipe, enum plane_id plane_id)
 -{
 -      /*
 -       * FIXME: ICL requires two hardware planes for scanning out NV12
 -       * framebuffers. Do not advertize support until this is implemented.
 -       */
 -      if (INTEL_GEN(dev_priv) >= 11)
 -              return false;
 -
 -      if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
 -              return false;
 -
 -      if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
 -              return false;
 -
 -      if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
 -              return false;
 -
 -      return true;
 -}
 -
  static struct intel_plane *
  intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
  {
 -      struct intel_plane *primary = NULL;
 -      struct intel_plane_state *state = NULL;
 +      struct intel_plane *plane;
        const struct drm_plane_funcs *plane_funcs;
 -      const uint32_t *intel_primary_formats;
        unsigned int supported_rotations;
 -      unsigned int num_formats;
 -      const uint64_t *modifiers;
 +      unsigned int possible_crtcs;
 +      const u64 *modifiers;
 +      const u32 *formats;
 +      int num_formats;
        int ret;
  
 -      primary = kzalloc(sizeof(*primary), GFP_KERNEL);
 -      if (!primary) {
 -              ret = -ENOMEM;
 -              goto fail;
 -      }
 -
 -      state = intel_create_plane_state(&primary->base);
 -      if (!state) {
 -              ret = -ENOMEM;
 -              goto fail;
 -      }
 +      if (INTEL_GEN(dev_priv) >= 9)
 +              return skl_universal_plane_create(dev_priv, pipe,
 +                                                PLANE_PRIMARY);
  
 -      primary->base.state = &state->base;
 +      plane = intel_plane_alloc();
 +      if (IS_ERR(plane))
 +              return plane;
  
 -      if (INTEL_GEN(dev_priv) >= 9)
 -              state->scaler_id = -1;
 -      primary->pipe = pipe;
 +      plane->pipe = pipe;
        /*
         * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
         * port is hooked to pipe B. Hence we want plane A feeding pipe B.
         */
        if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
 -              primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
 +              plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
        else
 -              primary->i9xx_plane = (enum i9xx_plane_id) pipe;
 -      primary->id = PLANE_PRIMARY;
 -      primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
 +              plane->i9xx_plane = (enum i9xx_plane_id) pipe;
 +      plane->id = PLANE_PRIMARY;
 +      plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
  
 -      if (INTEL_GEN(dev_priv) >= 9)
 -              primary->has_fbc = skl_plane_has_fbc(dev_priv,
 -                                                   primary->pipe,
 -                                                   primary->id);
 -      else
 -              primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
 -                                                    primary->i9xx_plane);
 -
 -      if (primary->has_fbc) {
 +      plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
 +      if (plane->has_fbc) {
                struct intel_fbc *fbc = &dev_priv->fbc;
  
 -              fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
 +              fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
        }
  
 -      if (INTEL_GEN(dev_priv) >= 9) {
 -              primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
 -                                                   PLANE_PRIMARY);
 -
 -              if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
 -                      intel_primary_formats = skl_pri_planar_formats;
 -                      num_formats = ARRAY_SIZE(skl_pri_planar_formats);
 -              } else {
 -                      intel_primary_formats = skl_primary_formats;
 -                      num_formats = ARRAY_SIZE(skl_primary_formats);
 -              }
 -
 -              if (primary->has_ccs)
 -                      modifiers = skl_format_modifiers_ccs;
 -              else
 -                      modifiers = skl_format_modifiers_noccs;
 -
 -              primary->max_stride = skl_plane_max_stride;
 -              primary->update_plane = skl_update_plane;
 -              primary->disable_plane = skl_disable_plane;
 -              primary->get_hw_state = skl_plane_get_hw_state;
 -              primary->check_plane = skl_plane_check;
 -
 -              plane_funcs = &skl_plane_funcs;
 -      } else if (INTEL_GEN(dev_priv) >= 4) {
 -              intel_primary_formats = i965_primary_formats;
 +      if (INTEL_GEN(dev_priv) >= 4) {
 +              formats = i965_primary_formats;
                num_formats = ARRAY_SIZE(i965_primary_formats);
                modifiers = i9xx_format_modifiers;
  
 -              primary->max_stride = i9xx_plane_max_stride;
 -              primary->update_plane = i9xx_update_plane;
 -              primary->disable_plane = i9xx_disable_plane;
 -              primary->get_hw_state = i9xx_plane_get_hw_state;
 -              primary->check_plane = i9xx_plane_check;
 +              plane->max_stride = i9xx_plane_max_stride;
 +              plane->update_plane = i9xx_update_plane;
 +              plane->disable_plane = i9xx_disable_plane;
 +              plane->get_hw_state = i9xx_plane_get_hw_state;
 +              plane->check_plane = i9xx_plane_check;
  
                plane_funcs = &i965_plane_funcs;
        } else {
 -              intel_primary_formats = i8xx_primary_formats;
 +              formats = i8xx_primary_formats;
                num_formats = ARRAY_SIZE(i8xx_primary_formats);
                modifiers = i9xx_format_modifiers;
  
 -              primary->max_stride = i9xx_plane_max_stride;
 -              primary->update_plane = i9xx_update_plane;
 -              primary->disable_plane = i9xx_disable_plane;
 -              primary->get_hw_state = i9xx_plane_get_hw_state;
 -              primary->check_plane = i9xx_plane_check;
 +              plane->max_stride = i9xx_plane_max_stride;
 +              plane->update_plane = i9xx_update_plane;
 +              plane->disable_plane = i9xx_disable_plane;
 +              plane->get_hw_state = i9xx_plane_get_hw_state;
 +              plane->check_plane = i9xx_plane_check;
  
                plane_funcs = &i8xx_plane_funcs;
        }
  
 -      if (INTEL_GEN(dev_priv) >= 9)
 -              ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
 -                                             0, plane_funcs,
 -                                             intel_primary_formats, num_formats,
 -                                             modifiers,
 -                                             DRM_PLANE_TYPE_PRIMARY,
 -                                             "plane 1%c", pipe_name(pipe));
 -      else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
 -              ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
 -                                             0, plane_funcs,
 -                                             intel_primary_formats, num_formats,
 -                                             modifiers,
 +      possible_crtcs = BIT(pipe);
 +
 +      if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
 +              ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
 +                                             possible_crtcs, plane_funcs,
 +                                             formats, num_formats, modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "primary %c", pipe_name(pipe));
        else
 -              ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
 -                                             0, plane_funcs,
 -                                             intel_primary_formats, num_formats,
 -                                             modifiers,
 +              ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
 +                                             possible_crtcs, plane_funcs,
 +                                             formats, num_formats, modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "plane %c",
 -                                             plane_name(primary->i9xx_plane));
 +                                             plane_name(plane->i9xx_plane));
        if (ret)
                goto fail;
  
 -      if (INTEL_GEN(dev_priv) >= 10) {
 -              supported_rotations =
 -                      DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
 -                      DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
 -                      DRM_MODE_REFLECT_X;
 -      } else if (INTEL_GEN(dev_priv) >= 9) {
 -              supported_rotations =
 -                      DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
 -                      DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
 -      } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
 +      if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
                supported_rotations =
                        DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
                        DRM_MODE_REFLECT_X;
        }
  
        if (INTEL_GEN(dev_priv) >= 4)
 -              drm_plane_create_rotation_property(&primary->base,
 +              drm_plane_create_rotation_property(&plane->base,
                                                   DRM_MODE_ROTATE_0,
                                                   supported_rotations);
  
 -      if (INTEL_GEN(dev_priv) >= 9)
 -              drm_plane_create_color_properties(&primary->base,
 -                                                BIT(DRM_COLOR_YCBCR_BT601) |
 -                                                BIT(DRM_COLOR_YCBCR_BT709),
 -                                                BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
 -                                                BIT(DRM_COLOR_YCBCR_FULL_RANGE),
 -                                                DRM_COLOR_YCBCR_BT709,
 -                                                DRM_COLOR_YCBCR_LIMITED_RANGE);
 +      drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
  
 -      drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
 -
 -      return primary;
 +      return plane;
  
  fail:
 -      kfree(state);
 -      kfree(primary);
 +      intel_plane_free(plane);
  
        return ERR_PTR(ret);
  }
@@@ -13971,13 -13969,23 +13971,13 @@@ static struct intel_plane 
  intel_cursor_plane_create(struct drm_i915_private *dev_priv,
                          enum pipe pipe)
  {
 -      struct intel_plane *cursor = NULL;
 -      struct intel_plane_state *state = NULL;
 +      unsigned int possible_crtcs;
 +      struct intel_plane *cursor;
        int ret;
  
 -      cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
 -      if (!cursor) {
 -              ret = -ENOMEM;
 -              goto fail;
 -      }
 -
 -      state = intel_create_plane_state(&cursor->base);
 -      if (!state) {
 -              ret = -ENOMEM;
 -              goto fail;
 -      }
 -
 -      cursor->base.state = &state->base;
 +      cursor = intel_plane_alloc();
 +      if (IS_ERR(cursor))
 +              return cursor;
  
        cursor->pipe = pipe;
        cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
        if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
                cursor->cursor.size = ~0;
  
 +      possible_crtcs = BIT(pipe);
 +
        ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
 -                                     0, &intel_cursor_plane_funcs,
 +                                     possible_crtcs, &intel_cursor_plane_funcs,
                                       intel_cursor_formats,
                                       ARRAY_SIZE(intel_cursor_formats),
                                       cursor_format_modifiers,
                                                   DRM_MODE_ROTATE_0 |
                                                   DRM_MODE_ROTATE_180);
  
 -      if (INTEL_GEN(dev_priv) >= 9)
 -              state->scaler_id = -1;
 -
        drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
  
        return cursor;
  
  fail:
 -      kfree(state);
 -      kfree(cursor);
 +      intel_plane_free(cursor);
  
        return ERR_PTR(ret);
  }
@@@ -14048,7 -14058,7 +14048,7 @@@ static void intel_crtc_init_scalers(str
                struct intel_scaler *scaler = &scaler_state->scalers[i];
  
                scaler->in_use = 0;
 -              scaler->mode = PS_SCALER_MODE_DYN;
 +              scaler->mode = 0;
        }
  
        scaler_state->scaler_id = -1;
        return ret;
  }
  
 -enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
 -{
 -      struct drm_device *dev = connector->base.dev;
 -
 -      WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 -
 -      if (!connector->base.state->crtc)
 -              return INVALID_PIPE;
 -
 -      return to_intel_crtc(connector->base.state->crtc)->pipe;
 -}
 -
  int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file)
  {
@@@ -14259,7 -14281,7 +14259,7 @@@ static void intel_setup_outputs(struct 
  
        intel_pps_init(dev_priv);
  
 -      if (INTEL_INFO(dev_priv)->num_pipes == 0)
 +      if (!HAS_DISPLAY(dev_priv))
                return;
  
        /*
                intel_ddi_init(dev_priv, PORT_D);
                intel_ddi_init(dev_priv, PORT_E);
                intel_ddi_init(dev_priv, PORT_F);
 +              icl_dsi_init(dev_priv);
        } else if (IS_GEN9_LP(dev_priv)) {
                /*
                 * FIXME: Broxton doesn't support port detection via the
@@@ -14502,7 -14523,7 +14502,7 @@@ static const struct drm_framebuffer_fun
  
  static
  u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
 -                       uint64_t fb_modifier, uint32_t pixel_format)
 +                       u32 pixel_format, u64 fb_modifier)
  {
        struct intel_crtc *crtc;
        struct intel_plane *plane;
@@@ -14524,6 -14545,7 +14524,6 @@@ static int intel_framebuffer_init(struc
  {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct drm_framebuffer *fb = &intel_fb->base;
 -      struct drm_format_name_buf format_name;
        u32 pitch_limit;
        unsigned int tiling, stride;
        int ret = -EINVAL;
                }
        }
  
 -      /* Passed in modifier sanity checking. */
 -      switch (mode_cmd->modifier[0]) {
 -      case I915_FORMAT_MOD_Y_TILED_CCS:
 -      case I915_FORMAT_MOD_Yf_TILED_CCS:
 -              switch (mode_cmd->pixel_format) {
 -              case DRM_FORMAT_XBGR8888:
 -              case DRM_FORMAT_ABGR8888:
 -              case DRM_FORMAT_XRGB8888:
 -              case DRM_FORMAT_ARGB8888:
 -                      break;
 -              default:
 -                      DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
 -                      goto err;
 -              }
 -              /* fall through */
 -      case I915_FORMAT_MOD_Y_TILED:
 -      case I915_FORMAT_MOD_Yf_TILED:
 -              if (INTEL_GEN(dev_priv) < 9) {
 -                      DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
 -                                    mode_cmd->modifier[0]);
 -                      goto err;
 -              }
 -      case DRM_FORMAT_MOD_LINEAR:
 -      case I915_FORMAT_MOD_X_TILED:
 -              break;
 -      default:
 -              DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
 +      if (!drm_any_plane_has_format(&dev_priv->drm,
 +                                    mode_cmd->pixel_format,
 +                                    mode_cmd->modifier[0])) {
 +              struct drm_format_name_buf format_name;
 +
 +              DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
 +                            drm_get_format_name(mode_cmd->pixel_format,
 +                                                &format_name),
                              mode_cmd->modifier[0]);
                goto err;
        }
                goto err;
        }
  
 -      pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
 -                                         mode_cmd->pixel_format);
 +      pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
 +                                         mode_cmd->modifier[0]);
        if (mode_cmd->pitches[0] > pitch_limit) {
                DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
                              mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
                goto err;
        }
  
 -      /* Reject formats not supported by any plane early. */
 -      switch (mode_cmd->pixel_format) {
 -      case DRM_FORMAT_C8:
 -      case DRM_FORMAT_RGB565:
 -      case DRM_FORMAT_XRGB8888:
 -      case DRM_FORMAT_ARGB8888:
 -              break;
 -      case DRM_FORMAT_XRGB1555:
 -              if (INTEL_GEN(dev_priv) > 3) {
 -                      DRM_DEBUG_KMS("unsupported pixel format: %s\n",
 -                                    drm_get_format_name(mode_cmd->pixel_format, &format_name));
 -                      goto err;
 -              }
 -              break;
 -      case DRM_FORMAT_ABGR8888:
 -              if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
 -                  INTEL_GEN(dev_priv) < 9) {
 -                      DRM_DEBUG_KMS("unsupported pixel format: %s\n",
 -                                    drm_get_format_name(mode_cmd->pixel_format, &format_name));
 -                      goto err;
 -              }
 -              break;
 -      case DRM_FORMAT_XBGR8888:
 -      case DRM_FORMAT_XRGB2101010:
 -      case DRM_FORMAT_XBGR2101010:
 -              if (INTEL_GEN(dev_priv) < 4) {
 -                      DRM_DEBUG_KMS("unsupported pixel format: %s\n",
 -                                    drm_get_format_name(mode_cmd->pixel_format, &format_name));
 -                      goto err;
 -              }
 -              break;
 -      case DRM_FORMAT_ABGR2101010:
 -              if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
 -                      DRM_DEBUG_KMS("unsupported pixel format: %s\n",
 -                                    drm_get_format_name(mode_cmd->pixel_format, &format_name));
 -                      goto err;
 -              }
 -              break;
 -      case DRM_FORMAT_YUYV:
 -      case DRM_FORMAT_UYVY:
 -      case DRM_FORMAT_YVYU:
 -      case DRM_FORMAT_VYUY:
 -              if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
 -                      DRM_DEBUG_KMS("unsupported pixel format: %s\n",
 -                                    drm_get_format_name(mode_cmd->pixel_format, &format_name));
 -                      goto err;
 -              }
 -              break;
 -      case DRM_FORMAT_NV12:
 -              if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
 -                  IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
 -                      DRM_DEBUG_KMS("unsupported pixel format: %s\n",
 -                                    drm_get_format_name(mode_cmd->pixel_format,
 -                                                        &format_name));
 -                      goto err;
 -              }
 -              break;
 -      default:
 -              DRM_DEBUG_KMS("unsupported pixel format: %s\n",
 -                            drm_get_format_name(mode_cmd->pixel_format, &format_name));
 -              goto err;
 -      }
 -
        /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
        if (mode_cmd->offsets[0] != 0)
                goto err;
@@@ -14867,6 -14971,174 +14867,6 @@@ void intel_init_display_hooks(struct dr
                dev_priv->display.update_crtcs = intel_update_crtcs;
  }
  
 -/*
 - * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
 - */
 -static void quirk_ssc_force_disable(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
 -      DRM_INFO("applying lvds SSC disable quirk\n");
 -}
 -
 -/*
 - * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
 - * brightness value
 - */
 -static void quirk_invert_brightness(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
 -      DRM_INFO("applying inverted panel brightness quirk\n");
 -}
 -
 -/* Some VBT's incorrectly indicate no backlight is present */
 -static void quirk_backlight_present(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
 -      DRM_INFO("applying backlight present quirk\n");
 -}
 -
 -/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
 - * which is 300 ms greater than eDP spec T12 min.
 - */
 -static void quirk_increase_t12_delay(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -
 -      dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
 -      DRM_INFO("Applying T12 delay quirk\n");
 -}
 -
 -/*
 - * GeminiLake NUC HDMI outputs require additional off time
 - * this allows the onboard retimer to correctly sync to signal
 - */
 -static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -
 -      dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
 -      DRM_INFO("Applying Increase DDI Disabled quirk\n");
 -}
 -
 -struct intel_quirk {
 -      int device;
 -      int subsystem_vendor;
 -      int subsystem_device;
 -      void (*hook)(struct drm_device *dev);
 -};
 -
 -/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
 -struct intel_dmi_quirk {
 -      void (*hook)(struct drm_device *dev);
 -      const struct dmi_system_id (*dmi_id_list)[];
 -};
 -
 -static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
 -{
 -      DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
 -      return 1;
 -}
 -
 -static const struct intel_dmi_quirk intel_dmi_quirks[] = {
 -      {
 -              .dmi_id_list = &(const struct dmi_system_id[]) {
 -                      {
 -                              .callback = intel_dmi_reverse_brightness,
 -                              .ident = "NCR Corporation",
 -                              .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
 -                                          DMI_MATCH(DMI_PRODUCT_NAME, ""),
 -                              },
 -                      },
 -                      { }  /* terminating entry */
 -              },
 -              .hook = quirk_invert_brightness,
 -      },
 -};
 -
 -static struct intel_quirk intel_quirks[] = {
 -      /* Lenovo U160 cannot use SSC on LVDS */
 -      { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
 -
 -      /* Sony Vaio Y cannot use SSC on LVDS */
 -      { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
 -
 -      /* Acer Aspire 5734Z must invert backlight brightness */
 -      { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
 -
 -      /* Acer/eMachines G725 */
 -      { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
 -
 -      /* Acer/eMachines e725 */
 -      { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
 -
 -      /* Acer/Packard Bell NCL20 */
 -      { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
 -
 -      /* Acer Aspire 4736Z */
 -      { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
 -
 -      /* Acer Aspire 5336 */
 -      { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
 -
 -      /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
 -      { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
 -
 -      /* Acer C720 Chromebook (Core i3 4005U) */
 -      { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
 -
 -      /* Apple Macbook 2,1 (Core 2 T7400) */
 -      { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
 -
 -      /* Apple Macbook 4,1 */
 -      { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
 -
 -      /* Toshiba CB35 Chromebook (Celeron 2955U) */
 -      { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
 -
 -      /* HP Chromebook 14 (Celeron 2955U) */
 -      { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
 -
 -      /* Dell Chromebook 11 */
 -      { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
 -
 -      /* Dell Chromebook 11 (2015 version) */
 -      { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
 -
 -      /* Toshiba Satellite P50-C-18C */
 -      { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
 -
 -      /* GeminiLake NUC */
 -      { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
 -      { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
 -      /* ASRock ITX*/
 -      { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
 -      { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
 -};
 -
 -static void intel_init_quirks(struct drm_device *dev)
 -{
 -      struct pci_dev *d = dev->pdev;
 -      int i;
 -
 -      for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
 -              struct intel_quirk *q = &intel_quirks[i];
 -
 -              if (d->device == q->device &&
 -                  (d->subsystem_vendor == q->subsystem_vendor ||
 -                   q->subsystem_vendor == PCI_ANY_ID) &&
 -                  (d->subsystem_device == q->subsystem_device ||
 -                   q->subsystem_device == PCI_ANY_ID))
 -                      q->hook(dev);
 -      }
 -      for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
 -              if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
 -                      intel_dmi_quirks[i].hook(dev);
 -      }
 -}
 -
  /* Disable the VGA plane that we never use */
  static void i915_disable_vga(struct drm_i915_private *dev_priv)
  {
@@@ -15080,9 -15352,7 +15080,9 @@@ int intel_modeset_init(struct drm_devic
        INIT_WORK(&dev_priv->atomic_helper.free_work,
                  intel_atomic_helper_free_state_worker);
  
 -      intel_init_quirks(dev);
 +      intel_init_quirks(dev_priv);
 +
 +      intel_fbc_init(dev_priv);
  
        intel_init_pm(dev_priv);
  
@@@ -15314,8 -15584,8 +15314,8 @@@ intel_sanitize_plane_mapping(struct drm
                if (pipe == crtc->pipe)
                        continue;
  
 -              DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
 -                            plane->base.name);
 +              DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
 +                            plane->base.base.id, plane->base.name);
  
                plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
                intel_plane_disable_noatomic(plane_crtc, plane);
@@@ -15356,8 -15626,7 +15356,8 @@@ static void intel_sanitize_crtc(struct 
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 -      enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 +      struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
 +      enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  
        /* Clear any frame start delays used for debugging left by the BIOS */
        if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
                           I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
        }
  
 -      if (crtc->active) {
 +      if (crtc_state->base.active) {
                struct intel_plane *plane;
  
                /* Disable everything but the primary plane */
  
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
 -      if (crtc->active && !intel_crtc_has_encoders(crtc))
 +      if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
                intel_crtc_disable_noatomic(&crtc->base, ctx);
  
 -      if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
 +      if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
                /*
                 * We start out with underrun reporting disabled to avoid races.
                 * For correct bookkeeping mark this on active crtcs.
  
  static void intel_sanitize_encoder(struct intel_encoder *encoder)
  {
 +      struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_connector *connector;
  
        /* We need to check both for a crtc link (meaning that the
                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
                                      encoder->base.base.id,
                                      encoder->base.name);
 -                      encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
 +                      if (encoder->disable)
 +                              encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
                        if (encoder->post_disable)
                                encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
                }
  
        /* notify opregion of the sanitized encoder state */
        intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
 +
 +      if (INTEL_GEN(dev_priv) >= 11)
 +              icl_sanitize_encoder_pll_mapping(encoder);
  }
  
  void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@@ -15510,10 -15774,6 +15510,10 @@@ static void readout_plane_state(struct 
                crtc_state = to_intel_crtc_state(crtc->base.state);
  
                intel_set_plane_visible(crtc_state, plane_state, visible);
 +
 +              DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
 +                            plane->base.base.id, plane->base.name,
 +                            enableddisabled(visible), pipe_name(pipe));
        }
  
        for_each_intel_crtc(&dev_priv->drm, crtc) {
@@@ -15666,7 -15926,7 +15666,7 @@@ static void intel_modeset_readout_hw_st
  
                        drm_calc_timestamping_constants(&crtc->base,
                                                        &crtc_state->base.adjusted_mode);
 -                      update_scanline_offset(crtc);
 +                      update_scanline_offset(crtc_state);
                }
  
                dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
@@@ -15721,65 -15981,6 +15721,65 @@@ static void intel_early_display_was(str
        }
  }
  
 +static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
 +                                     enum port port, i915_reg_t hdmi_reg)
 +{
 +      u32 val = I915_READ(hdmi_reg);
 +
 +      if (val & SDVO_ENABLE ||
 +          (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
 +              return;
 +
 +      DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
 +                    port_name(port));
 +
 +      val &= ~SDVO_PIPE_SEL_MASK;
 +      val |= SDVO_PIPE_SEL(PIPE_A);
 +
 +      I915_WRITE(hdmi_reg, val);
 +}
 +
 +static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
 +                                   enum port port, i915_reg_t dp_reg)
 +{
 +      u32 val = I915_READ(dp_reg);
 +
 +      if (val & DP_PORT_EN ||
 +          (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
 +              return;
 +
 +      DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
 +                    port_name(port));
 +
 +      val &= ~DP_PIPE_SEL_MASK;
 +      val |= DP_PIPE_SEL(PIPE_A);
 +
 +      I915_WRITE(dp_reg, val);
 +}
 +
 +static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
 +{
 +      /*
 +       * The BIOS may select transcoder B on some of the PCH
 +       * ports even it doesn't enable the port. This would trip
 +       * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
 +       * Sanitize the transcoder select bits to prevent that. We
 +       * assume that the BIOS never actually enabled the port,
 +       * because if it did we'd actually have to toggle the port
 +       * on and back off to make the transcoder A select stick
 +       * (see. intel_dp_link_down(), intel_disable_hdmi(),
 +       * intel_disable_sdvo()).
 +       */
 +      ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
 +      ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
 +      ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
 +
 +      /* PCH SDVOB multiplex with HDMIB */
 +      ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
 +      ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
 +      ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
 +}
 +
  /* Scan out the current hw modeset state,
   * and sanitizes it to the current state
   */
@@@ -15789,7 -15990,6 +15789,7 @@@ intel_modeset_setup_hw_state(struct drm
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
 +      struct intel_crtc_state *crtc_state;
        struct intel_encoder *encoder;
        int i;
  
        /* HW state is read out, now we need to sanitize this mess. */
        get_encoder_power_domains(dev_priv);
  
 +      if (HAS_PCH_IBX(dev_priv))
 +              ibx_sanitize_pch_ports(dev_priv);
 +
        /*
         * intel_sanitize_plane_mapping() may need to do vblank
         * waits, so we need vblank interrupts restored beforehand.
        for_each_intel_crtc(&dev_priv->drm, crtc) {
                drm_crtc_vblank_reset(&crtc->base);
  
 -              if (crtc->active)
 +              if (crtc->base.state->active)
                        drm_crtc_vblank_on(&crtc->base);
        }
  
                intel_sanitize_encoder(encoder);
  
        for_each_intel_crtc(&dev_priv->drm, crtc) {
 +              crtc_state = to_intel_crtc_state(crtc->base.state);
                intel_sanitize_crtc(crtc, ctx);
 -              intel_dump_pipe_config(crtc, crtc->config,
 +              intel_dump_pipe_config(crtc, crtc_state,
                                       "[setup_hw_state]");
        }
  
        for_each_intel_crtc(dev, crtc) {
                u64 put_domains;
  
 -              put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
 +              crtc_state = to_intel_crtc_state(crtc->base.state);
 +              put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
                if (WARN_ON(put_domains))
                        modeset_put_power_domains(dev_priv, put_domains);
        }
@@@ -15902,6 -16097,29 +15902,6 @@@ void intel_display_resume(struct drm_de
                drm_atomic_state_put(state);
  }
  
 -int intel_connector_register(struct drm_connector *connector)
 -{
 -      struct intel_connector *intel_connector = to_intel_connector(connector);
 -      int ret;
 -
 -      ret = intel_backlight_device_register(intel_connector);
 -      if (ret)
 -              goto err;
 -
 -      return 0;
 -
 -err:
 -      return ret;
 -}
 -
 -void intel_connector_unregister(struct drm_connector *connector)
 -{
 -      struct intel_connector *intel_connector = to_intel_connector(connector);
 -
 -      intel_backlight_device_unregister(intel_connector);
 -      intel_panel_destroy_backlight(connector);
 -}
 -
  static void intel_hpd_poll_fini(struct drm_device *dev)
  {
        struct intel_connector *connector;
        for_each_intel_connector_iter(connector, &conn_iter) {
                if (connector->modeset_retry_work.func)
                        cancel_work_sync(&connector->modeset_retry_work);
 -              if (connector->hdcp_shim) {
 -                      cancel_delayed_work_sync(&connector->hdcp_check_work);
 -                      cancel_work_sync(&connector->hdcp_prop_work);
 +              if (connector->hdcp.shim) {
 +                      cancel_delayed_work_sync(&connector->hdcp.check_work);
 +                      cancel_work_sync(&connector->hdcp.prop_work);
                }
        }
        drm_connector_list_iter_end(&conn_iter);
@@@ -15954,13 -16172,18 +15954,13 @@@ void intel_modeset_cleanup(struct drm_d
  
        drm_mode_config_cleanup(dev);
  
 -      intel_cleanup_overlay(dev_priv);
 +      intel_overlay_cleanup(dev_priv);
  
        intel_teardown_gmbus(dev_priv);
  
        destroy_workqueue(dev_priv->modeset_wq);
 -}
  
 -void intel_connector_attach_encoder(struct intel_connector *connector,
 -                                  struct intel_encoder *encoder)
 -{
 -      connector->encoder = encoder;
 -      drm_connector_attach_encoder(&connector->base, &encoder->base);
 +      intel_fbc_cleanup_cfb(dev_priv);
  }
  
  /*
@@@ -16050,7 -16273,7 +16050,7 @@@ intel_display_capture_error_state(struc
        };
        int i;
  
 -      if (INTEL_INFO(dev_priv)->num_pipes == 0)
 +      if (!HAS_DISPLAY(dev_priv))
                return NULL;
  
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
index 9fd5fbe8bebf45edc11088ab0e604ddceb963ffc,47fcc232748197bf3ef1999ce71049ad42dfe6a3..25afb1d594e326c10e435e19a8f4346db2561459
@@@ -34,7 -34,7 +34,7 @@@
  #include <drm/ttm/ttm_placement.h>
  #include <drm/ttm/ttm_bo_driver.h>
  #include <drm/ttm/ttm_module.h>
- #include <linux/dma_remapping.h>
+ #include <linux/intel-iommu.h>
  
  #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  #define VMWGFX_CHIP_SVGAII 0
@@@ -583,7 -583,7 +583,7 @@@ static int vmw_dma_select_mode(struct v
  
        dev_priv->map_mode = vmw_dma_map_populate;
  
 -      if (dma_ops->sync_single_for_cpu)
 +      if (dma_ops && dma_ops->sync_single_for_cpu)
                dev_priv->map_mode = vmw_dma_alloc_coherent;
  #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl() == 0)
@@@ -667,6 -667,7 +667,6 @@@ static int vmw_driver_load(struct drm_d
        mutex_init(&dev_priv->cmdbuf_mutex);
        mutex_init(&dev_priv->release_mutex);
        mutex_init(&dev_priv->binding_mutex);
 -      mutex_init(&dev_priv->requested_layout_mutex);
        mutex_init(&dev_priv->global_kms_state_mutex);
        ttm_lock_init(&dev_priv->reservation_sem);
        spin_lock_init(&dev_priv->resource_lock);
        DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
                 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
  
 -      ret = vmw_ttm_global_init(dev_priv);
 -      if (unlikely(ret != 0))
 -              goto out_err0;
 -
 -
        vmw_master_init(&dev_priv->fbdev_master);
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        dev_priv->active_master = &dev_priv->fbdev_master;
        if (unlikely(dev_priv->mmio_virt == NULL)) {
                ret = -ENOMEM;
                DRM_ERROR("Failed mapping MMIO.\n");
 -              goto out_err3;
 +              goto out_err0;
        }
  
        /* Need mmio memory to check for fifo pitchlock cap. */
                goto out_err4;
        }
  
 -      dev_priv->tdev = ttm_object_device_init
 -              (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
 +      dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
 +                                              &vmw_prime_dmabuf_ops);
  
        if (unlikely(dev_priv->tdev == NULL)) {
                DRM_ERROR("Unable to initialize TTM object management.\n");
        }
  
        ret = ttm_bo_device_init(&dev_priv->bdev,
 -                               dev_priv->bo_global_ref.ref.object,
                                 &vmw_bo_driver,
                                 dev->anon_inode->i_mapping,
                                 VMWGFX_FILE_PAGE_OFFSET,
@@@ -987,6 -994,8 +987,6 @@@ out_no_device
        ttm_object_device_release(&dev_priv->tdev);
  out_err4:
        memunmap(dev_priv->mmio_virt);
 -out_err3:
 -      vmw_ttm_global_release(dev_priv);
  out_err0:
        for (i = vmw_res_context; i < vmw_res_max; ++i)
                idr_destroy(&dev_priv->res_idr[i]);
@@@ -1038,6 -1047,7 +1038,6 @@@ static void vmw_driver_unload(struct dr
        memunmap(dev_priv->mmio_virt);
        if (dev_priv->ctx.staged_bindings)
                vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 -      vmw_ttm_global_release(dev_priv);
  
        for (i = vmw_res_context; i < vmw_res_max; ++i)
                idr_destroy(&dev_priv->res_idr[i]);
index 567221cca13c8b8c2389407142c13038d1d70af1,49a669b1ce116082213805d9791957c180097812..87ba23a75b381ff245690a1106ddf190b0b327f7
@@@ -17,6 -17,8 +17,8 @@@
   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   */
  
+ #define pr_fmt(fmt)     "AMD-Vi: " fmt
  #include <linux/ratelimit.h>
  #include <linux/pci.h>
  #include <linux/acpi.h>
@@@ -55,6 -57,8 +57,6 @@@
  #include "amd_iommu_types.h"
  #include "irq_remapping.h"
  
 -#define AMD_IOMMU_MAPPING_ERROR       0
 -
  #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  
  #define LOOP_TIMEOUT  100000
@@@ -277,7 -281,7 +279,7 @@@ static u16 get_alias(struct device *dev
                return pci_alias;
        }
  
-       pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+       pr_info("Using IVRS reported alias %02x:%02x.%d "
                "for device %s[%04x:%04x], kernel reported alias "
                "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
                PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
        if (pci_alias == devid &&
            PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
                pci_add_dma_alias(pdev, ivrs_alias & 0xff);
-               pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+               pr_info("Added PCI DMA alias %02x.%d for %s\n",
                        PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
                        dev_name(dev));
        }
@@@ -436,7 -440,14 +438,14 @@@ static int iommu_init_device(struct dev
  
        dev_data->alias = get_alias(dev);
  
-       if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
+       /*
+        * By default we use passthrough mode for IOMMUv2 capable device.
+        * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
+        * invalid address), we ignore the capability for the device so
+        * it'll be forced to go into translation mode.
+        */
+       if ((iommu_pass_through || !amd_iommu_force_isolation) &&
+           dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
                struct amd_iommu *iommu;
  
                iommu = amd_iommu_rlookup_table[dev_data->devid];
@@@ -511,7 -522,7 +520,7 @@@ static void dump_dte_entry(u16 devid
        int i;
  
        for (i = 0; i < 4; ++i)
-               pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
+               pr_err("DTE[%d]: %016llx\n", i,
                        amd_iommu_dev_table[devid].data[i]);
  }
  
@@@ -521,7 -532,7 +530,7 @@@ static void dump_command(unsigned long 
        int i;
  
        for (i = 0; i < 4; ++i)
-               pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
+               pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
  }
  
  static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
                dev_data = get_dev_data(&pdev->dev);
  
        if (dev_data && __ratelimit(&dev_data->rs)) {
-               dev_err(&pdev->dev, "AMD-Vi: Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+               dev_err(&pdev->dev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
                        domain_id, address, flags);
        } else if (printk_ratelimit()) {
-               pr_err("AMD-Vi: Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+               pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        domain_id, address, flags);
        }
@@@ -566,7 -577,7 +575,7 @@@ retry
        if (type == 0) {
                /* Did we hit the erratum? */
                if (++count == LOOP_TIMEOUT) {
-                       pr_err("AMD-Vi: No event written to event log\n");
+                       pr_err("No event written to event log\n");
                        return;
                }
                udelay(1);
        if (type == EVENT_TYPE_IO_FAULT) {
                amd_iommu_report_page_fault(devid, pasid, address, flags);
                return;
-       } else {
-               dev_err(dev, "AMD-Vi: Event logged [");
        }
  
        switch (type) {
        case EVENT_TYPE_ILL_DEV:
-               dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        pasid, address, flags);
                dump_dte_entry(devid);
                break;
        case EVENT_TYPE_DEV_TAB_ERR:
-               dev_err(dev, "DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
-                       "address=0x%016llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+                       "address=0x%llx flags=0x%04x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        address, flags);
                break;
        case EVENT_TYPE_PAGE_TAB_ERR:
-               dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        pasid, address, flags);
                break;
        case EVENT_TYPE_ILL_CMD:
-               dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
+               dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
                dump_command(address);
                break;
        case EVENT_TYPE_CMD_HARD_ERR:
-               dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
                        address, flags);
                break;
        case EVENT_TYPE_IOTLB_INV_TO:
-               dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%016llx]\n",
+               dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        address);
                break;
        case EVENT_TYPE_INV_DEV_REQ:
-               dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        pasid, address, flags);
                break;
                pasid = ((event[0] >> 16) & 0xFFFF)
                        | ((event[1] << 6) & 0xF0000);
                tag = event[1] & 0x03FF;
-               dev_err(dev, "INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%016llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        pasid, address, flags);
                break;
        default:
-               dev_err(dev, "UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
+               dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
                        event[0], event[1], event[2], event[3]);
        }
  
@@@ -652,7 -661,7 +659,7 @@@ static void iommu_handle_ppr_entry(stru
        struct amd_iommu_fault fault;
  
        if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
-               pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
+               pr_err_ratelimited("Unknown PPR request received\n");
                return;
        }
  
@@@ -757,12 -766,12 +764,12 @@@ static void iommu_poll_ga_log(struct am
                        if (!iommu_ga_log_notifier)
                                break;
  
-                       pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
+                       pr_debug("%s: devid=%#x, ga_tag=%#x\n",
                                 __func__, GA_DEVID(log_entry),
                                 GA_TAG(log_entry));
  
                        if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
-                               pr_err("AMD-Vi: GA log notifier failed.\n");
+                               pr_err("GA log notifier failed.\n");
                        break;
                default:
                        break;
@@@ -787,18 -796,18 +794,18 @@@ irqreturn_t amd_iommu_int_thread(int ir
                        iommu->mmio_base + MMIO_STATUS_OFFSET);
  
                if (status & MMIO_STATUS_EVT_INT_MASK) {
-                       pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
+                       pr_devel("Processing IOMMU Event Log\n");
                        iommu_poll_events(iommu);
                }
  
                if (status & MMIO_STATUS_PPR_INT_MASK) {
-                       pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
+                       pr_devel("Processing IOMMU PPR Log\n");
                        iommu_poll_ppr_log(iommu);
                }
  
  #ifdef CONFIG_IRQ_REMAP
                if (status & MMIO_STATUS_GALOG_INT_MASK) {
-                       pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
+                       pr_devel("Processing IOMMU GA Log\n");
                        iommu_poll_ga_log(iommu);
                }
  #endif
@@@ -842,7 -851,7 +849,7 @@@ static int wait_on_sem(volatile u64 *se
        }
  
        if (i == LOOP_TIMEOUT) {
-               pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
+               pr_alert("Completion-Wait loop timed out\n");
                return -EIO;
        }
  
@@@ -1034,7 -1043,7 +1041,7 @@@ again
                /* Skip udelay() the first time around */
                if (count++) {
                        if (count == LOOP_TIMEOUT) {
-                               pr_err("AMD-Vi: Command buffer timeout\n");
+                               pr_err("Command buffer timeout\n");
                                return -EIO;
                        }
  
@@@ -1315,6 -1324,101 +1322,101 @@@ static void domain_flush_devices(struc
   *
   ****************************************************************************/
  
+ static void free_page_list(struct page *freelist)
+ {
+       while (freelist != NULL) {
+               unsigned long p = (unsigned long)page_address(freelist);
+               freelist = freelist->freelist;
+               free_page(p);
+       }
+ }
+ static struct page *free_pt_page(unsigned long pt, struct page *freelist)
+ {
+       struct page *p = virt_to_page((void *)pt);
+       p->freelist = freelist;
+       return p;
+ }
+ #define DEFINE_FREE_PT_FN(LVL, FN)                                            \
+ static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
+ {                                                                             \
+       unsigned long p;                                                        \
+       u64 *pt;                                                                \
+       int i;                                                                  \
+                                                                               \
+       pt = (u64 *)__pt;                                                       \
+                                                                               \
+       for (i = 0; i < 512; ++i) {                                             \
+               /* PTE present? */                                              \
+               if (!IOMMU_PTE_PRESENT(pt[i]))                                  \
+                       continue;                                               \
+                                                                               \
+               /* Large PTE? */                                                \
+               if (PM_PTE_LEVEL(pt[i]) == 0 ||                                 \
+                   PM_PTE_LEVEL(pt[i]) == 7)                                   \
+                       continue;                                               \
+                                                                               \
+               p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);                       \
+               freelist = FN(p, freelist);                                     \
+       }                                                                       \
+                                                                               \
+       return free_pt_page((unsigned long)pt, freelist);                       \
+ }
+ DEFINE_FREE_PT_FN(l2, free_pt_page)
+ DEFINE_FREE_PT_FN(l3, free_pt_l2)
+ DEFINE_FREE_PT_FN(l4, free_pt_l3)
+ DEFINE_FREE_PT_FN(l5, free_pt_l4)
+ DEFINE_FREE_PT_FN(l6, free_pt_l5)
+ static struct page *free_sub_pt(unsigned long root, int mode,
+                               struct page *freelist)
+ {
+       switch (mode) {
+       case PAGE_MODE_NONE:
+       case PAGE_MODE_7_LEVEL:
+               break;
+       case PAGE_MODE_1_LEVEL:
+               freelist = free_pt_page(root, freelist);
+               break;
+       case PAGE_MODE_2_LEVEL:
+               freelist = free_pt_l2(root, freelist);
+               break;
+       case PAGE_MODE_3_LEVEL:
+               freelist = free_pt_l3(root, freelist);
+               break;
+       case PAGE_MODE_4_LEVEL:
+               freelist = free_pt_l4(root, freelist);
+               break;
+       case PAGE_MODE_5_LEVEL:
+               freelist = free_pt_l5(root, freelist);
+               break;
+       case PAGE_MODE_6_LEVEL:
+               freelist = free_pt_l6(root, freelist);
+               break;
+       default:
+               BUG();
+       }
+       return freelist;
+ }
+ static void free_pagetable(struct protection_domain *domain)
+ {
+       unsigned long root = (unsigned long)domain->pt_root;
+       struct page *freelist = NULL;
+       BUG_ON(domain->mode < PAGE_MODE_NONE ||
+              domain->mode > PAGE_MODE_6_LEVEL);
+       free_sub_pt(root, domain->mode, freelist);
+       free_page_list(freelist);
+ }
  /*
   * This function is used to add another level to an IO page table. Adding
   * another level increases the size of the address space by 9 bits to a size up
@@@ -1363,10 -1467,13 +1465,13 @@@ static u64 *alloc_pte(struct protection
  
        while (level > end_lvl) {
                u64 __pte, __npte;
+               int pte_level;
  
-               __pte = *pte;
+               __pte     = *pte;
+               pte_level = PM_PTE_LEVEL(__pte);
  
-               if (!IOMMU_PTE_PRESENT(__pte)) {
+               if (!IOMMU_PTE_PRESENT(__pte) ||
+                   pte_level == PAGE_MODE_7_LEVEL) {
                        page = (u64 *)get_zeroed_page(gfp);
                        if (!page)
                                return NULL;
                        __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
  
                        /* pte could have been changed somewhere. */
-                       if (cmpxchg64(pte, __pte, __npte) != __pte) {
+                       if (cmpxchg64(pte, __pte, __npte) != __pte)
                                free_page((unsigned long)page);
-                               continue;
-                       }
+                       else if (pte_level == PAGE_MODE_7_LEVEL)
+                               domain->updated = true;
+                       continue;
                }
  
                /* No level skipping support yet */
-               if (PM_PTE_LEVEL(*pte) != level)
+               if (pte_level != level)
                        return NULL;
  
                level -= 1;
  
-               pte = IOMMU_PTE_PAGE(*pte);
+               pte = IOMMU_PTE_PAGE(__pte);
  
                if (pte_page && level == end_lvl)
                        *pte_page = pte;
@@@ -1455,6 -1564,25 +1562,25 @@@ static u64 *fetch_pte(struct protection
        return pte;
  }
  
+ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
+ {
+       unsigned long pt;
+       int mode;
+       while (cmpxchg64(pte, pteval, 0) != pteval) {
+               pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
+               pteval = *pte;
+       }
+       if (!IOMMU_PTE_PRESENT(pteval))
+               return freelist;
+       pt   = (unsigned long)IOMMU_PTE_PAGE(pteval);
+       mode = IOMMU_PTE_MODE(pteval);
+       return free_sub_pt(pt, mode, freelist);
+ }
  /*
   * Generic mapping functions. It maps a physical address into a DMA
   * address space. It allocates the page table pages if necessary.
@@@ -1469,6 -1597,7 +1595,7 @@@ static int iommu_map_page(struct protec
                          int prot,
                          gfp_t gfp)
  {
+       struct page *freelist = NULL;
        u64 __pte, *pte;
        int i, count;
  
                return -ENOMEM;
  
        for (i = 0; i < count; ++i)
-               if (IOMMU_PTE_PRESENT(pte[i]))
-                       return -EBUSY;
+               freelist = free_clear_pte(&pte[i], pte[i], freelist);
+       if (freelist != NULL)
+               dom->updated = true;
  
        if (count > 1) {
                __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
  
        update_domain(dom);
  
+       /* Everything flushed out, free pages now */
+       free_page_list(freelist);
        return 0;
  }
  
@@@ -1636,67 -1770,6 +1768,6 @@@ static void domain_id_free(int id
        spin_unlock(&pd_bitmap_lock);
  }
  
- #define DEFINE_FREE_PT_FN(LVL, FN)                            \
- static void free_pt_##LVL (unsigned long __pt)                        \
- {                                                             \
-       unsigned long p;                                        \
-       u64 *pt;                                                \
-       int i;                                                  \
-                                                               \
-       pt = (u64 *)__pt;                                       \
-                                                               \
-       for (i = 0; i < 512; ++i) {                             \
-               /* PTE present? */                              \
-               if (!IOMMU_PTE_PRESENT(pt[i]))                  \
-                       continue;                               \
-                                                               \
-               /* Large PTE? */                                \
-               if (PM_PTE_LEVEL(pt[i]) == 0 ||                 \
-                   PM_PTE_LEVEL(pt[i]) == 7)                   \
-                       continue;                               \
-                                                               \
-               p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);       \
-               FN(p);                                          \
-       }                                                       \
-       free_page((unsigned long)pt);                           \
- }
- DEFINE_FREE_PT_FN(l2, free_page)
- DEFINE_FREE_PT_FN(l3, free_pt_l2)
- DEFINE_FREE_PT_FN(l4, free_pt_l3)
- DEFINE_FREE_PT_FN(l5, free_pt_l4)
- DEFINE_FREE_PT_FN(l6, free_pt_l5)
- static void free_pagetable(struct protection_domain *domain)
- {
-       unsigned long root = (unsigned long)domain->pt_root;
-       switch (domain->mode) {
-       case PAGE_MODE_NONE:
-               break;
-       case PAGE_MODE_1_LEVEL:
-               free_page(root);
-               break;
-       case PAGE_MODE_2_LEVEL:
-               free_pt_l2(root);
-               break;
-       case PAGE_MODE_3_LEVEL:
-               free_pt_l3(root);
-               break;
-       case PAGE_MODE_4_LEVEL:
-               free_pt_l4(root);
-               break;
-       case PAGE_MODE_5_LEVEL:
-               free_pt_l5(root);
-               break;
-       case PAGE_MODE_6_LEVEL:
-               free_pt_l6(root);
-               break;
-       default:
-               BUG();
-       }
- }
  static void free_gcr3_tbl_level1(u64 *tbl)
  {
        u64 *ptr;
@@@ -2184,7 -2257,7 +2255,7 @@@ static int amd_iommu_add_device(struct 
                                dev_name(dev));
  
                iommu_ignore_device(dev);
 -              dev->dma_ops = &dma_direct_ops;
 +              dev->dma_ops = NULL;
                goto out;
        }
        init_iommu_group(dev);
@@@ -2337,7 -2410,7 +2408,7 @@@ static dma_addr_t __map_single(struct d
        paddr &= PAGE_MASK;
  
        address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
 -      if (address == AMD_IOMMU_MAPPING_ERROR)
 +      if (!address)
                goto out;
  
        prot = dir2prot(direction);
@@@ -2374,7 -2447,7 +2445,7 @@@ out_unmap
  
        dma_ops_free_iova(dma_dom, address, pages);
  
 -      return AMD_IOMMU_MAPPING_ERROR;
 +      return DMA_MAPPING_ERROR;
  }
  
  /*
@@@ -2425,7 -2498,7 +2496,7 @@@ static dma_addr_t map_page(struct devic
        if (PTR_ERR(domain) == -EINVAL)
                return (dma_addr_t)paddr;
        else if (IS_ERR(domain))
 -              return AMD_IOMMU_MAPPING_ERROR;
 +              return DMA_MAPPING_ERROR;
  
        dma_mask = *dev->dma_mask;
        dma_dom = to_dma_ops_domain(domain);
@@@ -2502,7 -2575,7 +2573,7 @@@ static int map_sg(struct device *dev, s
        npages = sg_num_pages(dev, sglist, nelems);
  
        address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
 -      if (address == AMD_IOMMU_MAPPING_ERROR)
 +      if (address == DMA_MAPPING_ERROR)
                goto out_err;
  
        prot = dir2prot(direction);
@@@ -2625,7 -2698,7 +2696,7 @@@ static void *alloc_coherent(struct devi
        *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
                                 size, DMA_BIDIRECTIONAL, dma_mask);
  
 -      if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
 +      if (*dma_addr == DMA_MAPPING_ERROR)
                goto out_free;
  
        return page_address(page);
@@@ -2676,6 -2749,11 +2747,6 @@@ static int amd_iommu_dma_supported(stru
        return check_device(dev);
  }
  
 -static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
 -{
 -      return dma_addr == AMD_IOMMU_MAPPING_ERROR;
 -}
 -
  static const struct dma_map_ops amd_iommu_dma_ops = {
        .alloc          = alloc_coherent,
        .free           = free_coherent,
        .map_sg         = map_sg,
        .unmap_sg       = unmap_sg,
        .dma_supported  = amd_iommu_dma_supported,
 -      .mapping_error  = amd_iommu_mapping_error,
  };
  
  static int init_reserved_iova_ranges(void)
@@@ -2770,10 -2849,21 +2841,10 @@@ int __init amd_iommu_init_dma_ops(void
        swiotlb        = (iommu_pass_through || sme_me_mask) ? 1 : 0;
        iommu_detected = 1;
  
 -      /*
 -       * In case we don't initialize SWIOTLB (actually the common case
 -       * when AMD IOMMU is enabled and SME is not active), make sure there
 -       * are global dma_ops set as a fall-back for devices not handled by
 -       * this driver (for example non-PCI devices). When SME is active,
 -       * make sure that swiotlb variable remains set so the global dma_ops
 -       * continue to be SWIOTLB.
 -       */
 -      if (!swiotlb)
 -              dma_ops = &dma_direct_ops;
 -
        if (amd_iommu_unmap_flush)
-               pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
+               pr_info("IO/TLB flush on unmap enabled\n");
        else
-               pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
+               pr_info("Lazy IO/TLB flushing enabled\n");
  
        return 0;
  
@@@ -2878,7 -2968,7 +2949,7 @@@ static struct iommu_domain *amd_iommu_d
        case IOMMU_DOMAIN_DMA:
                dma_domain = dma_ops_domain_alloc();
                if (!dma_domain) {
-                       pr_err("AMD-Vi: Failed to allocate\n");
+                       pr_err("Failed to allocate\n");
                        return NULL;
                }
                pdomain = &dma_domain->domain;
@@@ -4299,7 -4389,7 +4370,7 @@@ static int amd_ir_set_vcpu_affinity(str
         * legacy mode. So, we force legacy mode instead.
         */
        if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
-               pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
+               pr_debug("%s: Fall back to using intr legacy remap\n",
                         __func__);
                pi_data->is_guest_mode = false;
        }
index 60c7e9e9901e90767ce82e055e2c2ffd9b80b7ff,13787d3ee33825f7b42fb42e622f7335871ff826..d19f3d6b43c16c7094a093f442d544e5e73d5680
@@@ -32,6 -32,8 +32,6 @@@
  #include <linux/scatterlist.h>
  #include <linux/vmalloc.h>
  
 -#define IOMMU_MAPPING_ERROR   0
 -
  struct iommu_dma_msi_page {
        struct list_head        list;
        dma_addr_t              iova;
@@@ -175,7 -177,7 +175,7 @@@ EXPORT_SYMBOL(iommu_put_dma_cookie)
  void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
  {
  
-       if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
+       if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
                iort_iommu_msi_get_resv_regions(dev, list);
  
  }
@@@ -447,20 -449,17 +447,17 @@@ static void __iommu_dma_free_pages(stru
        kvfree(pages);
  }
  
- static struct page **__iommu_dma_alloc_pages(unsigned int count,
-               unsigned long order_mask, gfp_t gfp)
+ static struct page **__iommu_dma_alloc_pages(struct device *dev,
+               unsigned int count, unsigned long order_mask, gfp_t gfp)
  {
        struct page **pages;
-       unsigned int i = 0, array_size = count * sizeof(*pages);
+       unsigned int i = 0, nid = dev_to_node(dev);
  
        order_mask &= (2U << MAX_ORDER) - 1;
        if (!order_mask)
                return NULL;
  
-       if (array_size <= PAGE_SIZE)
-               pages = kzalloc(array_size, GFP_KERNEL);
-       else
-               pages = vzalloc(array_size);
+       pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
        if (!pages)
                return NULL;
  
                for (order_mask &= (2U << __fls(count)) - 1;
                     order_mask; order_mask &= ~order_size) {
                        unsigned int order = __fls(order_mask);
+                       gfp_t alloc_flags = gfp;
  
                        order_size = 1U << order;
-                       page = alloc_pages((order_mask - order_size) ?
-                                          gfp | __GFP_NORETRY : gfp, order);
+                       if (order_mask > order_size)
+                               alloc_flags |= __GFP_NORETRY;
+                       page = alloc_pages_node(nid, alloc_flags, order);
                        if (!page)
                                continue;
                        if (!order)
@@@ -521,7 -522,7 +520,7 @@@ void iommu_dma_free(struct device *dev
  {
        __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
        __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 -      *handle = IOMMU_MAPPING_ERROR;
 +      *handle = DMA_MAPPING_ERROR;
  }
  
  /**
@@@ -554,7 -555,7 +553,7 @@@ struct page **iommu_dma_alloc(struct de
        dma_addr_t iova;
        unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
  
 -      *handle = IOMMU_MAPPING_ERROR;
 +      *handle = DMA_MAPPING_ERROR;
  
        min_size = alloc_sizes & -alloc_sizes;
        if (min_size < PAGE_SIZE) {
                alloc_sizes = min_size;
  
        count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
+       pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
+                                       gfp);
        if (!pages)
                return NULL;
  
@@@ -647,11 -649,11 +647,11 @@@ static dma_addr_t __iommu_dma_map(struc
  
        iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
        if (!iova)
 -              return IOMMU_MAPPING_ERROR;
 +              return DMA_MAPPING_ERROR;
  
        if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
                iommu_dma_free_iova(cookie, iova, size);
 -              return IOMMU_MAPPING_ERROR;
 +              return DMA_MAPPING_ERROR;
        }
        return iova + iova_off;
  }
@@@ -692,7 -694,7 +692,7 @@@ static int __finalise_sg(struct device 
  
                s->offset += s_iova_off;
                s->length = s_length;
 -              sg_dma_address(s) = IOMMU_MAPPING_ERROR;
 +              sg_dma_address(s) = DMA_MAPPING_ERROR;
                sg_dma_len(s) = 0;
  
                /*
@@@ -735,11 -737,11 +735,11 @@@ static void __invalidate_sg(struct scat
        int i;
  
        for_each_sg(sg, s, nents, i) {
 -              if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
 +              if (sg_dma_address(s) != DMA_MAPPING_ERROR)
                        s->offset += sg_dma_address(s);
                if (sg_dma_len(s))
                        s->length = sg_dma_len(s);
 -              sg_dma_address(s) = IOMMU_MAPPING_ERROR;
 +              sg_dma_address(s) = DMA_MAPPING_ERROR;
                sg_dma_len(s) = 0;
        }
  }
@@@ -856,6 -858,11 +856,6 @@@ void iommu_dma_unmap_resource(struct de
        __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
  }
  
 -int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 -{
 -      return dma_addr == IOMMU_MAPPING_ERROR;
 -}
 -
  static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
                phys_addr_t msi_addr, struct iommu_domain *domain)
  {
                return NULL;
  
        iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
 -      if (iommu_dma_mapping_error(dev, iova))
 +      if (iova == DMA_MAPPING_ERROR)
                goto out_free_page;
  
        INIT_LIST_HEAD(&msi_page->list);
diff --combined drivers/iommu/dmar.c
index 1edf2a25133621fbf1301b77d0f3042db74039bc,9511f9aeb77c7f758ff5ee17f3c5ed4f2054e83c..dc9f14811e0f4d9ed3a12936501274ad55789a55
@@@ -1160,6 -1160,7 +1160,7 @@@ static int qi_check_fault(struct intel_
        int head, tail;
        struct q_inval *qi = iommu->qi;
        int wait_index = (index + 1) % QI_LENGTH;
+       int shift = qi_shift(iommu);
  
        if (qi->desc_status[wait_index] == QI_ABORT)
                return -EAGAIN;
         */
        if (fault & DMA_FSTS_IQE) {
                head = readl(iommu->reg + DMAR_IQH_REG);
-               if ((head >> DMAR_IQ_SHIFT) == index) {
-                       pr_err("VT-d detected invalid descriptor: "
-                               "low=%llx, high=%llx\n",
-                               (unsigned long long)qi->desc[index].low,
-                               (unsigned long long)qi->desc[index].high);
-                       memcpy(&qi->desc[index], &qi->desc[wait_index],
-                                       sizeof(struct qi_desc));
+               if ((head >> shift) == index) {
+                       struct qi_desc *desc = qi->desc + head;
+                       /*
+                        * desc->qw2 and desc->qw3 are either reserved or
+                        * used by software as private data. We won't print
+                        * out these two qw's for security consideration.
+                        */
+                       pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
+                              (unsigned long long)desc->qw0,
+                              (unsigned long long)desc->qw1);
+                       memcpy(desc, qi->desc + (wait_index << shift),
+                              1 << shift);
                        writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
                        return -EINVAL;
                }
         */
        if (fault & DMA_FSTS_ITE) {
                head = readl(iommu->reg + DMAR_IQH_REG);
-               head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+               head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
                head |= 1;
                tail = readl(iommu->reg + DMAR_IQT_REG);
-               tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+               tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
  
                writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
  
@@@ -1222,15 -1229,14 +1229,14 @@@ int qi_submit_sync(struct qi_desc *desc
  {
        int rc;
        struct q_inval *qi = iommu->qi;
-       struct qi_desc *hw, wait_desc;
+       int offset, shift, length;
+       struct qi_desc wait_desc;
        int wait_index, index;
        unsigned long flags;
  
        if (!qi)
                return 0;
  
-       hw = qi->desc;
  restart:
        rc = 0;
  
  
        index = qi->free_head;
        wait_index = (index + 1) % QI_LENGTH;
+       shift = qi_shift(iommu);
+       length = 1 << shift;
  
        qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
  
-       hw[index] = *desc;
-       wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
+       offset = index << shift;
+       memcpy(qi->desc + offset, desc, length);
+       wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
                        QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
-       wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
+       wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
+       wait_desc.qw2 = 0;
+       wait_desc.qw3 = 0;
  
-       hw[wait_index] = wait_desc;
+       offset = wait_index << shift;
+       memcpy(qi->desc + offset, &wait_desc, length);
  
        qi->free_head = (qi->free_head + 2) % QI_LENGTH;
        qi->free_cnt -= 2;
         * update the HW tail register indicating the presence of
         * new descriptors.
         */
-       writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
+       writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
  
        while (qi->desc_status[wait_index] != QI_DONE) {
                /*
@@@ -1298,8 -1309,10 +1309,10 @@@ void qi_global_iec(struct intel_iommu *
  {
        struct qi_desc desc;
  
-       desc.low = QI_IEC_TYPE;
-       desc.high = 0;
+       desc.qw0 = QI_IEC_TYPE;
+       desc.qw1 = 0;
+       desc.qw2 = 0;
+       desc.qw3 = 0;
  
        /* should never fail */
        qi_submit_sync(&desc, iommu);
@@@ -1310,9 -1323,11 +1323,11 @@@ void qi_flush_context(struct intel_iomm
  {
        struct qi_desc desc;
  
-       desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
+       desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
                        | QI_CC_GRAN(type) | QI_CC_TYPE;
-       desc.high = 0;
+       desc.qw1 = 0;
+       desc.qw2 = 0;
+       desc.qw3 = 0;
  
        qi_submit_sync(&desc, iommu);
  }
@@@ -1331,10 -1346,12 +1346,12 @@@ void qi_flush_iotlb(struct intel_iommu 
        if (cap_read_drain(iommu->cap))
                dr = 1;
  
-       desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
+       desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
                | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
-       desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
+       desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
                | QI_IOTLB_AM(size_order);
+       desc.qw2 = 0;
+       desc.qw3 = 0;
  
        qi_submit_sync(&desc, iommu);
  }
@@@ -1347,15 -1364,17 +1364,17 @@@ void qi_flush_dev_iotlb(struct intel_io
        if (mask) {
                WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
                addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
-               desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+               desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
        } else
-               desc.high = QI_DEV_IOTLB_ADDR(addr);
+               desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
  
        if (qdep >= QI_DEV_IOTLB_MAX_INVS)
                qdep = 0;
  
-       desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+       desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
                   QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
+       desc.qw2 = 0;
+       desc.qw3 = 0;
  
        qi_submit_sync(&desc, iommu);
  }
@@@ -1403,16 -1422,24 +1422,24 @@@ static void __dmar_enable_qi(struct int
        u32 sts;
        unsigned long flags;
        struct q_inval *qi = iommu->qi;
+       u64 val = virt_to_phys(qi->desc);
  
        qi->free_head = qi->free_tail = 0;
        qi->free_cnt = QI_LENGTH;
  
+       /*
+        * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
+        * is present.
+        */
+       if (ecap_smts(iommu->ecap))
+               val |= (1 << 11) | 1;
        raw_spin_lock_irqsave(&iommu->register_lock, flags);
  
        /* write zero to the tail reg */
        writel(0, iommu->reg + DMAR_IQT_REG);
  
-       dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
+       dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
  
        iommu->gcmd |= DMA_GCMD_QIE;
        writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
@@@ -1448,8 -1475,12 +1475,12 @@@ int dmar_enable_qi(struct intel_iommu *
  
        qi = iommu->qi;
  
-       desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
+       /*
+        * Need two pages to accommodate 256 descriptors of 256 bits each
+        * if the remapping hardware supports scalable mode translation.
+        */
+       desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
+                                    !!ecap_smts(iommu->ecap));
        if (!desc_page) {
                kfree(qi);
                iommu->qi = NULL;
@@@ -2042,28 -2073,3 +2073,28 @@@ int dmar_device_remove(acpi_handle hand
  {
        return dmar_device_hotplug(handle, false);
  }
 +
 +/*
 + * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
 + *
 + * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
 + * the ACPI DMAR table. This means that the platform boot firmware has made
 + * sure no device can issue DMA outside of RMRR regions.
 + */
 +bool dmar_platform_optin(void)
 +{
 +      struct acpi_table_dmar *dmar;
 +      acpi_status status;
 +      bool ret;
 +
 +      status = acpi_get_table(ACPI_SIG_DMAR, 0,
 +                              (struct acpi_table_header **)&dmar);
 +      if (ACPI_FAILURE(status))
 +              return false;
 +
 +      ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
 +      acpi_put_table((struct acpi_table_header *)dmar);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(dmar_platform_optin);
index 63b6ce78492aada84756b417293444c2c7b2b454,eb5351e8cde5370cad92109e6ec5298a726dd2d4..2bd9ac285c0dee363fe9b9feecf2f9082e49ebbe
@@@ -184,7 -184,6 +184,7 @@@ static int rwbf_quirk
   */
  static int force_on = 0;
  int intel_iommu_tboot_noforce;
 +static int no_platform_optin;
  
  #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
  
@@@ -291,49 -290,6 +291,6 @@@ static inline void context_clear_entry(
        context->hi = 0;
  }
  
- /*
-  * 0: readable
-  * 1: writable
-  * 2-6: reserved
-  * 7: super page
-  * 8-10: available
-  * 11: snoop behavior
-  * 12-63: Host physcial address
-  */
- struct dma_pte {
-       u64 val;
- };
- static inline void dma_clear_pte(struct dma_pte *pte)
- {
-       pte->val = 0;
- }
- static inline u64 dma_pte_addr(struct dma_pte *pte)
- {
- #ifdef CONFIG_64BIT
-       return pte->val & VTD_PAGE_MASK;
- #else
-       /* Must have a full atomic 64-bit read */
-       return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
- #endif
- }
- static inline bool dma_pte_present(struct dma_pte *pte)
- {
-       return (pte->val & 3) != 0;
- }
- static inline bool dma_pte_superpage(struct dma_pte *pte)
- {
-       return (pte->val & DMA_PTE_LARGE_PAGE);
- }
- static inline int first_pte_in_page(struct dma_pte *pte)
- {
-       return !((unsigned long)pte & ~VTD_PAGE_MASK);
- }
  /*
   * This domain is a statically identity mapping domain.
   *    1. This domain creats a static 1:1 mapping to all usable memory.
@@@ -406,38 -362,16 +363,16 @@@ static int dmar_map_gfx = 1
  static int dmar_forcedac;
  static int intel_iommu_strict;
  static int intel_iommu_superpage = 1;
- static int intel_iommu_ecs = 1;
- static int intel_iommu_pasid28;
+ static int intel_iommu_sm = 1;
  static int iommu_identity_mapping;
  
  #define IDENTMAP_ALL          1
  #define IDENTMAP_GFX          2
  #define IDENTMAP_AZALIA               4
  
- /* Broadwell and Skylake have broken ECS support â€” normal so-called "second
-  * level" translation of DMA requests-without-PASID doesn't actually happen
-  * unless you also set the NESTE bit in an extended context-entry. Which of
-  * course means that SVM doesn't work because it's trying to do nested
-  * translation of the physical addresses it finds in the process page tables,
-  * through the IOVA->phys mapping found in the "second level" page tables.
-  *
-  * The VT-d specification was retroactively changed to change the definition
-  * of the capability bits and pretend that Broadwell/Skylake never happened...
-  * but unfortunately the wrong bit was changed. It's ECS which is broken, but
-  * for some reason it was the PASID capability bit which was redefined (from
-  * bit 28 on BDW/SKL to bit 40 in future).
-  *
-  * So our test for ECS needs to eschew those implementations which set the old
-  * PASID capabiity bit 28, since those are the ones on which ECS is broken.
-  * Unless we are working around the 'pasid28' limitations, that is, by putting
-  * the device into passthrough mode for normal DMA and thus masking the bug.
-  */
- #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
-                           (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
- /* PASID support is thus enabled if ECS is enabled and *either* of the old
-  * or new capability bits are set. */
- #define pasid_enabled(iommu) (ecs_enabled(iommu) &&                   \
-                             (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
+ #define sm_supported(iommu)   (intel_iommu_sm && ecap_smts((iommu)->ecap))
+ #define pasid_supported(iommu)        (sm_supported(iommu) &&                 \
+                                ecap_pasid((iommu)->ecap))
  
  int intel_iommu_gfx_mapped;
  EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@@ -448,21 -382,24 +383,24 @@@ static LIST_HEAD(device_domain_list)
  
  /*
   * Iterate over elements in device_domain_list and call the specified
-  * callback @fn against each element. This helper should only be used
-  * in the context where the device_domain_lock has already been holden.
+  * callback @fn against each element.
   */
  int for_each_device_domain(int (*fn)(struct device_domain_info *info,
                                     void *data), void *data)
  {
        int ret = 0;
+       unsigned long flags;
        struct device_domain_info *info;
  
-       assert_spin_locked(&device_domain_lock);
+       spin_lock_irqsave(&device_domain_lock, flags);
        list_for_each_entry(info, &device_domain_list, global) {
                ret = fn(info, data);
-               if (ret)
+               if (ret) {
+                       spin_unlock_irqrestore(&device_domain_lock, flags);
                        return ret;
+               }
        }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
  
        return 0;
  }
@@@ -504,7 -441,6 +442,7 @@@ static int __init intel_iommu_setup(cha
                        pr_info("IOMMU enabled\n");
                } else if (!strncmp(str, "off", 3)) {
                        dmar_disabled = 1;
 +                      no_platform_optin = 1;
                        pr_info("IOMMU disabled\n");
                } else if (!strncmp(str, "igfx_off", 8)) {
                        dmar_map_gfx = 0;
                } else if (!strncmp(str, "sp_off", 6)) {
                        pr_info("Disable supported super page\n");
                        intel_iommu_superpage = 0;
-               } else if (!strncmp(str, "ecs_off", 7)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: disable extended context table support\n");
-                       intel_iommu_ecs = 0;
-               } else if (!strncmp(str, "pasid28", 7)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: enable pre-production PASID support\n");
-                       intel_iommu_pasid28 = 1;
-                       iommu_identity_mapping |= IDENTMAP_GFX;
+               } else if (!strncmp(str, "sm_off", 6)) {
+                       pr_info("Intel-IOMMU: disable scalable mode support\n");
+                       intel_iommu_sm = 0;
                } else if (!strncmp(str, "tboot_noforce", 13)) {
                        printk(KERN_INFO
                                "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@@ -773,7 -703,7 +705,7 @@@ struct context_entry *iommu_context_add
        u64 *entry;
  
        entry = &root->lo;
-       if (ecs_enabled(iommu)) {
+       if (sm_supported(iommu)) {
                if (devfn >= 0x80) {
                        devfn -= 0x80;
                        entry = &root->hi;
@@@ -915,7 -845,7 +847,7 @@@ static void free_context_table(struct i
                if (context)
                        free_pgtable_page(context);
  
-               if (!ecs_enabled(iommu))
+               if (!sm_supported(iommu))
                        continue;
  
                context = iommu_context_addr(iommu, i, 0x80, 0);
@@@ -1267,8 -1197,8 +1199,8 @@@ static void iommu_set_root_entry(struc
        unsigned long flag;
  
        addr = virt_to_phys(iommu->root_entry);
-       if (ecs_enabled(iommu))
-               addr |= DMA_RTADDR_RTT;
+       if (sm_supported(iommu))
+               addr |= DMA_RTADDR_SMT;
  
        raw_spin_lock_irqsave(&iommu->register_lock, flag);
        dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
        raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
  }
  
static void iommu_flush_write_buffer(struct intel_iommu *iommu)
+ void iommu_flush_write_buffer(struct intel_iommu *iommu)
  {
        u32 val;
        unsigned long flag;
@@@ -1473,8 -1403,7 +1405,8 @@@ static void iommu_enable_dev_iotlb(stru
        if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
                info->pri_enabled = 1;
  #endif
 -      if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
 +      if (!pdev->untrusted && info->ats_supported &&
 +          !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
                info->ats_enabled = 1;
                domain_update_iotlb(info->domain);
                info->ats_qdep = pci_ats_queue_depth(pdev);
@@@ -1694,6 -1623,16 +1626,16 @@@ static int iommu_init_domains(struct in
         */
        set_bit(0, iommu->domain_ids);
  
+       /*
+        * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
+        * entry for first-level or pass-through translation modes should
+        * be programmed with a domain id different from those used for
+        * second-level or nested translation. We reserve a domain id for
+        * this purpose.
+        */
+       if (sm_supported(iommu))
+               set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
        return 0;
  }
  
@@@ -1758,10 -1697,9 +1700,9 @@@ static void free_dmar_iommu(struct inte
        free_context_table(iommu);
  
  #ifdef CONFIG_INTEL_IOMMU_SVM
-       if (pasid_enabled(iommu)) {
+       if (pasid_supported(iommu)) {
                if (ecap_prs(iommu->ecap))
                        intel_svm_finish_prq(iommu);
-               intel_svm_exit(iommu);
        }
  #endif
  }
@@@ -1981,8 -1919,59 +1922,59 @@@ static void domain_exit(struct dmar_dom
        free_domain_mem(domain);
  }
  
+ /*
+  * Get the PASID directory size for scalable mode context entry.
+  * Value of X in the PDTS field of a scalable mode context entry
+  * indicates PASID directory with 2^(X + 7) entries.
+  */
+ static inline unsigned long context_get_sm_pds(struct pasid_table *table)
+ {
+       int pds, max_pde;
+       max_pde = table->max_pasid >> PASID_PDE_SHIFT;
+       pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
+       if (pds < 7)
+               return 0;
+       return pds - 7;
+ }
+ /*
+  * Set the RID_PASID field of a scalable mode context entry. The
+  * IOMMU hardware will use the PASID value set in this field for
+  * DMA translations of DMA requests without PASID.
+  */
+ static inline void
+ context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
+ {
+       context->hi |= pasid & ((1 << 20) - 1);
+       context->hi |= (1 << 20);
+ }
+ /*
+  * Set the DTE(Device-TLB Enable) field of a scalable mode context
+  * entry.
+  */
+ static inline void context_set_sm_dte(struct context_entry *context)
+ {
+       context->lo |= (1 << 2);
+ }
+ /*
+  * Set the PRE(Page Request Enable) field of a scalable mode context
+  * entry.
+  */
+ static inline void context_set_sm_pre(struct context_entry *context)
+ {
+       context->lo |= (1 << 4);
+ }
+ /* Convert value to context PASID directory size field coding. */
+ #define context_pdts(pds)     (((pds) & 0x7) << 9)
  static int domain_context_mapping_one(struct dmar_domain *domain,
                                      struct intel_iommu *iommu,
+                                     struct pasid_table *table,
                                      u8 bus, u8 devfn)
  {
        u16 did = domain->iommu_did[iommu->seq_id];
        struct device_domain_info *info = NULL;
        struct context_entry *context;
        unsigned long flags;
-       struct dma_pte *pgd;
-       int ret, agaw;
+       int ret;
  
        WARN_ON(did == 0);
  
                }
        }
  
-       pgd = domain->pgd;
        context_clear_entry(context);
-       context_set_domain_id(context, did);
  
-       /*
-        * Skip top levels of page tables for iommu which has less agaw
-        * than default.  Unnecessary for PT mode.
-        */
-       if (translation != CONTEXT_TT_PASS_THROUGH) {
-               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
-                       ret = -ENOMEM;
-                       pgd = phys_to_virt(dma_pte_addr(pgd));
-                       if (!dma_pte_present(pgd))
-                               goto out_unlock;
-               }
+       if (sm_supported(iommu)) {
+               unsigned long pds;
  
-               info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
-               if (info && info->ats_supported)
-                       translation = CONTEXT_TT_DEV_IOTLB;
-               else
-                       translation = CONTEXT_TT_MULTI_LEVEL;
+               WARN_ON(!table);
+               /* Setup the PASID DIR pointer: */
+               pds = context_get_sm_pds(table);
+               context->lo = (u64)virt_to_phys(table->table) |
+                               context_pdts(pds);
+               /* Setup the RID_PASID field: */
+               context_set_sm_rid2pasid(context, PASID_RID2PASID);
  
-               context_set_address_root(context, virt_to_phys(pgd));
-               context_set_address_width(context, iommu->agaw);
-       } else {
                /*
-                * In pass through mode, AW must be programmed to
-                * indicate the largest AGAW value supported by
-                * hardware. And ASR is ignored by hardware.
+                * Setup the Device-TLB enable bit and Page request
+                * Enable bit:
                 */
-               context_set_address_width(context, iommu->msagaw);
+               info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+               if (info && info->ats_supported)
+                       context_set_sm_dte(context);
+               if (info && info->pri_supported)
+                       context_set_sm_pre(context);
+       } else {
+               struct dma_pte *pgd = domain->pgd;
+               int agaw;
+               context_set_domain_id(context, did);
+               context_set_translation_type(context, translation);
+               if (translation != CONTEXT_TT_PASS_THROUGH) {
+                       /*
+                        * Skip top levels of page tables for iommu which has
+                        * less agaw than default. Unnecessary for PT mode.
+                        */
+                       for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+                               ret = -ENOMEM;
+                               pgd = phys_to_virt(dma_pte_addr(pgd));
+                               if (!dma_pte_present(pgd))
+                                       goto out_unlock;
+                       }
+                       info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+                       if (info && info->ats_supported)
+                               translation = CONTEXT_TT_DEV_IOTLB;
+                       else
+                               translation = CONTEXT_TT_MULTI_LEVEL;
+                       context_set_address_root(context, virt_to_phys(pgd));
+                       context_set_address_width(context, agaw);
+               } else {
+                       /*
+                        * In pass through mode, AW must be programmed to
+                        * indicate the largest AGAW value supported by
+                        * hardware. And ASR is ignored by hardware.
+                        */
+                       context_set_address_width(context, iommu->msagaw);
+               }
        }
  
-       context_set_translation_type(context, translation);
        context_set_fault_enable(context);
        context_set_present(context);
        domain_flush_cache(domain, context, sizeof(*context));
@@@ -2105,6 -2119,7 +2122,7 @@@ out_unlock
  struct domain_context_mapping_data {
        struct dmar_domain *domain;
        struct intel_iommu *iommu;
+       struct pasid_table *table;
  };
  
  static int domain_context_mapping_cb(struct pci_dev *pdev,
        struct domain_context_mapping_data *data = opaque;
  
        return domain_context_mapping_one(data->domain, data->iommu,
-                                         PCI_BUS_NUM(alias), alias & 0xff);
+                                         data->table, PCI_BUS_NUM(alias),
+                                         alias & 0xff);
  }
  
  static int
  domain_context_mapping(struct dmar_domain *domain, struct device *dev)
  {
+       struct domain_context_mapping_data data;
+       struct pasid_table *table;
        struct intel_iommu *iommu;
        u8 bus, devfn;
-       struct domain_context_mapping_data data;
  
        iommu = device_to_iommu(dev, &bus, &devfn);
        if (!iommu)
                return -ENODEV;
  
+       table = intel_pasid_get_table(dev);
        if (!dev_is_pci(dev))
-               return domain_context_mapping_one(domain, iommu, bus, devfn);
+               return domain_context_mapping_one(domain, iommu, table,
+                                                 bus, devfn);
  
        data.domain = domain;
        data.iommu = iommu;
+       data.table = table;
  
        return pci_for_each_dma_alias(to_pci_dev(dev),
                                      &domain_context_mapping_cb, &data);
@@@ -2467,8 -2488,8 +2491,8 @@@ static struct dmar_domain *dmar_insert_
                    dmar_find_matched_atsr_unit(pdev))
                        info->ats_supported = 1;
  
-               if (ecs_enabled(iommu)) {
-                       if (pasid_enabled(iommu)) {
+               if (sm_supported(iommu)) {
+                       if (pasid_supported(iommu)) {
                                int features = pci_pasid_features(pdev);
                                if (features >= 0)
                                        info->pasid_supported = features | 1;
        list_add(&info->global, &device_domain_list);
        if (dev)
                dev->archdata.iommu = info;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
  
-       if (dev && dev_is_pci(dev) && info->pasid_supported) {
+       /* PASID table is mandatory for a PCI device in scalable mode. */
+       if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
                ret = intel_pasid_alloc_table(dev);
                if (ret) {
-                       pr_warn("No pasid table for %s, pasid disabled\n",
-                               dev_name(dev));
-                       info->pasid_supported = 0;
+                       pr_err("PASID table allocation for %s failed\n",
+                              dev_name(dev));
+                       dmar_remove_one_dev_info(domain, dev);
+                       return NULL;
+               }
+               /* Setup the PASID entry for requests without PASID: */
+               spin_lock(&iommu->lock);
+               if (hw_pass_through && domain_type_is_si(domain))
+                       ret = intel_pasid_setup_pass_through(iommu, domain,
+                                       dev, PASID_RID2PASID);
+               else
+                       ret = intel_pasid_setup_second_level(iommu, domain,
+                                       dev, PASID_RID2PASID);
+               spin_unlock(&iommu->lock);
+               if (ret) {
+                       pr_err("Setup RID2PASID for %s failed\n",
+                              dev_name(dev));
+                       dmar_remove_one_dev_info(domain, dev);
+                       return NULL;
                }
        }
-       spin_unlock_irqrestore(&device_domain_lock, flags);
  
        if (dev && domain_context_mapping(domain, dev)) {
                pr_err("Domain context map for %s failed\n", dev_name(dev));
@@@ -2898,13 -2937,6 +2940,13 @@@ static int iommu_should_identity_map(st
                if (device_is_rmrr_locked(dev))
                        return 0;
  
 +              /*
 +               * Prevent any device marked as untrusted from getting
 +               * placed into the statically identity mapping domain.
 +               */
 +              if (pdev->untrusted)
 +                      return 0;
 +
                if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
                        return 1;
  
@@@ -3287,7 -3319,7 +3329,7 @@@ static int __init init_dmars(void
                 * We need to ensure the system pasid table is no bigger
                 * than the smallest supported.
                 */
-               if (pasid_enabled(iommu)) {
+               if (pasid_supported(iommu)) {
                        u32 temp = 2 << ecap_pss(iommu->ecap);
  
                        intel_pasid_max_id = min_t(u32, temp,
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
  #ifdef CONFIG_INTEL_IOMMU_SVM
-               if (pasid_enabled(iommu))
+               if (pasid_supported(iommu))
                        intel_svm_init(iommu);
  #endif
        }
@@@ -3452,7 -3484,7 +3494,7 @@@ domains_done
                iommu_flush_write_buffer(iommu);
  
  #ifdef CONFIG_INTEL_IOMMU_SVM
-               if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
+               if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
                        ret = intel_svm_enable_prq(iommu);
                        if (ret)
                                goto free_iommu;
@@@ -3607,11 -3639,9 +3649,11 @@@ static int iommu_no_mapping(struct devi
        return 0;
  }
  
 -static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 -                                   size_t size, int dir, u64 dma_mask)
 +static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
 +                                 unsigned long offset, size_t size, int dir,
 +                                 u64 dma_mask)
  {
 +      phys_addr_t paddr = page_to_phys(page) + offset;
        struct dmar_domain *domain;
        phys_addr_t start_paddr;
        unsigned long iova_pfn;
  
        domain = get_valid_domain_for_dev(dev);
        if (!domain)
 -              return 0;
 +              return DMA_MAPPING_ERROR;
  
        iommu = domain_get_iommu(domain);
        size = aligned_nrpages(paddr, size);
@@@ -3665,7 -3695,7 +3707,7 @@@ error
                free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
        pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
                dev_name(dev), size, (unsigned long long)paddr, dir);
 -      return 0;
 +      return DMA_MAPPING_ERROR;
  }
  
  static dma_addr_t intel_map_page(struct device *dev, struct page *page,
                                 enum dma_data_direction dir,
                                 unsigned long attrs)
  {
 -      return __intel_map_single(dev, page_to_phys(page) + offset, size,
 -                                dir, *dev->dma_mask);
 +      return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
  }
  
  static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@@ -3764,9 -3795,10 +3806,9 @@@ static void *intel_alloc_coherent(struc
                return NULL;
        memset(page_address(page), 0, size);
  
 -      *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
 -                                       DMA_BIDIRECTIONAL,
 -                                       dev->coherent_dma_mask);
 -      if (*dma_handle)
 +      *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
 +                                     dev->coherent_dma_mask);
 +      if (*dma_handle != DMA_MAPPING_ERROR)
                return page_address(page);
        if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
                __free_pages(page, order);
@@@ -3875,6 -3907,11 +3917,6 @@@ static int intel_map_sg(struct device *
        return nelems;
  }
  
 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
 -{
 -      return !dma_addr;
 -}
 -
  static const struct dma_map_ops intel_dma_ops = {
        .alloc = intel_alloc_coherent,
        .free = intel_free_coherent,
        .unmap_sg = intel_unmap_sg,
        .map_page = intel_map_page,
        .unmap_page = intel_unmap_page,
 -      .mapping_error = intel_mapping_error,
        .dma_supported = dma_direct_supported,
  };
  
@@@ -4335,7 -4373,7 +4377,7 @@@ static int intel_iommu_add(struct dmar_
                goto out;
  
  #ifdef CONFIG_INTEL_IOMMU_SVM
-       if (pasid_enabled(iommu))
+       if (pasid_supported(iommu))
                intel_svm_init(iommu);
  #endif
  
        iommu_flush_write_buffer(iommu);
  
  #ifdef CONFIG_INTEL_IOMMU_SVM
-       if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
+       if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
                ret = intel_svm_enable_prq(iommu);
                if (ret)
                        goto disable_iommu;
@@@ -4732,54 -4770,14 +4774,54 @@@ const struct attribute_group *intel_iom
        NULL,
  };
  
 +static int __init platform_optin_force_iommu(void)
 +{
 +      struct pci_dev *pdev = NULL;
 +      bool has_untrusted_dev = false;
 +
 +      if (!dmar_platform_optin() || no_platform_optin)
 +              return 0;
 +
 +      for_each_pci_dev(pdev) {
 +              if (pdev->untrusted) {
 +                      has_untrusted_dev = true;
 +                      break;
 +              }
 +      }
 +
 +      if (!has_untrusted_dev)
 +              return 0;
 +
 +      if (no_iommu || dmar_disabled)
 +              pr_info("Intel-IOMMU force enabled due to platform opt in\n");
 +
 +      /*
 +       * If Intel-IOMMU is disabled by default, we will apply identity
 +       * map for all devices except those marked as being untrusted.
 +       */
 +      if (dmar_disabled)
 +              iommu_identity_mapping |= IDENTMAP_ALL;
 +
 +      dmar_disabled = 0;
 +#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
 +      swiotlb = 0;
 +#endif
 +      no_iommu = 0;
 +
 +      return 1;
 +}
 +
  int __init intel_iommu_init(void)
  {
        int ret = -ENODEV;
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
  
 -      /* VT-d is required for a TXT/tboot launch, so enforce that */
 -      force_on = tboot_force_iommu();
 +      /*
 +       * Intel IOMMU is required for a TXT/tboot launch or platform
 +       * opt in, so enforce that.
 +       */
 +      force_on = tboot_force_iommu() || platform_optin_force_iommu();
  
        if (iommu_init_mempool()) {
                if (force_on)
@@@ -4927,6 -4925,10 +4969,10 @@@ static void __dmar_remove_one_dev_info(
        iommu = info->iommu;
  
        if (info->dev) {
+               if (dev_is_pci(info->dev) && sm_supported(iommu))
+                       intel_pasid_tear_down_entry(iommu, info->dev,
+                                       PASID_RID2PASID);
                iommu_disable_dev_iotlb(info);
                domain_context_clear(iommu, info->dev);
                intel_pasid_free_table(info->dev);
@@@ -5254,19 -5256,6 +5300,6 @@@ static void intel_iommu_put_resv_region
  }
  
  #ifdef CONFIG_INTEL_IOMMU_SVM
- #define MAX_NR_PASID_BITS (20)
- static inline unsigned long intel_iommu_get_pts(struct device *dev)
- {
-       int pts, max_pasid;
-       max_pasid = intel_pasid_get_dev_max_id(dev);
-       pts = find_first_bit((unsigned long *)&max_pasid, MAX_NR_PASID_BITS);
-       if (pts < 5)
-               return 0;
-       return pts - 5;
- }
  int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
  {
        struct device_domain_info *info;
        sdev->sid = PCI_DEVID(info->bus, info->devfn);
  
        if (!(ctx_lo & CONTEXT_PASIDE)) {
-               if (iommu->pasid_state_table)
-                       context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
-               context[1].lo = (u64)virt_to_phys(info->pasid_table->table) |
-                       intel_iommu_get_pts(sdev->dev);
-               wmb();
-               /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
-                * extended to permit requests-with-PASID if the PASIDE bit
-                * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
-                * however, the PASIDE bit is ignored and requests-with-PASID
-                * are unconditionally blocked. Which makes less sense.
-                * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
-                * "guest mode" translation types depending on whether ATS
-                * is available or not. Annoyingly, we can't use the new
-                * modes *unless* PASIDE is set. */
-               if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
-                       ctx_lo &= ~CONTEXT_TT_MASK;
-                       if (info->ats_supported)
-                               ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
-                       else
-                               ctx_lo |= CONTEXT_TT_PT_PASID << 2;
-               }
                ctx_lo |= CONTEXT_PASIDE;
-               if (iommu->pasid_state_table)
-                       ctx_lo |= CONTEXT_DINVE;
-               if (info->pri_supported)
-                       ctx_lo |= CONTEXT_PRS;
                context[0].lo = ctx_lo;
                wmb();
                iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
index 84af3033a4730cf6d4f1a0903dac46ddc557161c,d90a06d4e93b85010b1149c349e4ec661e014cc7..964dd0fc3657d0698efc0e679d76469a472d3fff
@@@ -53,7 -53,7 +53,7 @@@
  #ifndef SCIF_RMA_H
  #define SCIF_RMA_H
  
- #include <linux/dma_remapping.h>
+ #include <linux/intel-iommu.h>
  #include <linux/mmu_notifier.h>
  
  #include "../bus/scif_bus.h"
@@@ -205,19 -205,6 +205,19 @@@ struct scif_status 
        struct scif_endpt *ep;
  };
  
 +/*
 + * struct scif_cb_arg - Stores the argument of the callback func
 + *
 + * @src_dma_addr: Source buffer DMA address
 + * @status: DMA status
 + * @ep: SCIF endpoint
 + */
 +struct scif_cb_arg {
 +      dma_addr_t src_dma_addr;
 +      struct scif_status *status;
 +      struct scif_endpt *ep;
 +};
 +
  /*
   * struct scif_window - Registration Window for Self and Remote
   *
diff --combined drivers/usb/host/xhci.c
index 46ab9c0410914e40b1d71a5d5c279f370733683c,8eacd2ed412bf4925647056550bc74d9b75d69d2..005e65922608e9f0f72c3f06d26a9a436d1362f7
@@@ -169,7 -169,7 +169,7 @@@ int xhci_reset(struct xhci_hcd *xhci
  {
        u32 command;
        u32 state;
 -      int ret, i;
 +      int ret;
  
        state = readl(&xhci->op_regs->status);
  
        ret = xhci_handshake(&xhci->op_regs->status,
                        STS_CNR, 0, 10 * 1000 * 1000);
  
 -      for (i = 0; i < 2; i++) {
 -              xhci->bus_state[i].port_c_suspend = 0;
 -              xhci->bus_state[i].suspended_ports = 0;
 -              xhci->bus_state[i].resuming_ports = 0;
 -      }
 +      xhci->usb2_rhub.bus_state.port_c_suspend = 0;
 +      xhci->usb2_rhub.bus_state.suspended_ports = 0;
 +      xhci->usb2_rhub.bus_state.resuming_ports = 0;
 +      xhci->usb3_rhub.bus_state.port_c_suspend = 0;
 +      xhci->usb3_rhub.bus_state.suspended_ports = 0;
 +      xhci->usb3_rhub.bus_state.resuming_ports = 0;
  
        return ret;
  }
@@@ -245,7 -244,7 +245,7 @@@ static void xhci_zero_64b_regs(struct x
         * an iommu. Doing anything when there is no iommu is definitely
         * unsafe...
         */
-       if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !dev->iommu_group)
+       if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
                return;
  
        xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
@@@ -1088,9 -1087,9 +1088,9 @@@ int xhci_resume(struct xhci_hcd *xhci, 
        /* Wait a bit if either of the roothubs need to settle from the
         * transition into bus suspend.
         */
 -      if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
 -                      time_before(jiffies,
 -                              xhci->bus_state[1].next_statechange))
 +
 +      if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
 +          time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
                msleep(100);
  
        set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@@ -4389,7 -4388,8 +4389,7 @@@ static int xhci_update_device(struct us
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        int             portnum = udev->portnum - 1;
  
 -      if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
 -                      !udev->lpm_capable)
 +      if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
                return 0;
  
        /* we only support lpm for non-hub device connected to root hub yet */