Merge tag 'drm-intel-next-2016-04-11' of git://anongit.freedesktop.org/drm-intel...
authorDave Airlie <airlied@redhat.com>
Thu, 21 Apr 2016 23:03:31 +0000 (09:03 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 21 Apr 2016 23:03:31 +0000 (09:03 +1000)
- make modeset hw state checker atomic aware (Maarten)
- close races in gpu stuck detection/seqno reading (Chris)
- tons&tons of small improvements from Chris Wilson all over the gem code
- more dsi/bxt work from Ramalingam&Jani
- macro polish from Joonas
- guc fw loading fixes (Arun&Dave)
- vmap notifier (acked by Andrew) + i915 support by Chris Wilson
- create bottom half for execlist irq processing (Chris Wilson)
- vlv/chv pll cleanup (Ville)
- rework DP detection, especially sink detection (Shubhangi Shrivastava)
- make color manager support fully atomic (Maarten)
- avoid livelock on chv in execlist irq handler (Chris)

* tag 'drm-intel-next-2016-04-11' of git://anongit.freedesktop.org/drm-intel: (82 commits)
  drm/i915: Update DRIVER_DATE to 20160411
  drm/i915: Avoid allocating a vmap arena for a single page
  drm,i915: Introduce drm_malloc_gfp()
  drm/i915/shrinker: Restrict vmap purge to objects with vmaps
  drm/i915: Refactor duplicate object vmap functions
  drm/i915: Consolidate common error handling in intel_pin_and_map_ringbuffer_obj
  drm/i915/dmabuf: Tighten struct_mutex for unmap_dma_buf
  drm/i915: implement WaClearTdlStateAckDirtyBits
  drm/i915/bxt: Reversed polarity of PORT_PLL_REF_SEL bit
  drm/i915: Rename hw state checker to hw state verifier.
  drm/i915: Move modeset state verifier calls.
  drm/i915: Make modeset state verifier take crtc as argument.
  drm/i915: Replace manual barrier() with READ_ONCE() in HWS accessor
  drm/i915: Use simplest form for flushing the single cacheline in the HWS
  drm/i915: Harden detection of missed interrupts
  drm/i915: Separate out the seqno-barrier from engine->get_seqno
  drm/i915: Remove forcewake dance from seqno/irq barrier on legacy gen6+
  drm/i915: Fixup the free space logic in ring_prepare
  drm/i915: Simplify check for idleness in hangcheck
  drm/i915: Apply a mb between emitting the request and hangcheck
  ...

50 files changed:
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_reg.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_fifo_underrun.c
drivers/gpu/drm/i915/intel_guc_loader.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_vbt_defs.h
include/drm/drm_mem_util.h
include/linux/vmalloc.h
mm/vmalloc.c

index 78a1c563a4e74c875e53948087d25d8c0aaacc89..644e80ba13e0d8ff1d1674f400abd787b57cc532 100644 (file)
@@ -134,6 +134,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        int pin_count = 0;
        enum intel_engine_id id;
 
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
        seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
                   &obj->base,
                   obj->active ? "*" : " ",
@@ -202,8 +204,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        uintptr_t list = (uintptr_t) node->info_ent->data;
        struct list_head *head;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->ggtt.base;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
        u64 total_obj_size, total_gtt_size;
        int count, ret;
@@ -216,11 +218,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        switch (list) {
        case ACTIVE_LIST:
                seq_puts(m, "Active:\n");
-               head = &vm->active_list;
+               head = &ggtt->base.active_list;
                break;
        case INACTIVE_LIST:
                seq_puts(m, "Inactive:\n");
-               head = &vm->inactive_list;
+               head = &ggtt->base.inactive_list;
                break;
        default:
                mutex_unlock(&dev->struct_mutex);
@@ -429,11 +431,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        u32 count, mappable_count, purgeable_count;
        u64 size, mappable_size, purgeable_size;
        struct drm_i915_gem_object *obj;
-       struct i915_address_space *vm = &dev_priv->ggtt.base;
        struct drm_file *file;
        struct i915_vma *vma;
        int ret;
@@ -452,12 +454,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_vmas(&vm->active_list, vm_link);
+       count_vmas(&ggtt->base.active_list, vm_link);
        seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_vmas(&vm->inactive_list, vm_link);
+       count_vmas(&ggtt->base.inactive_list, vm_link);
        seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
@@ -492,8 +494,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                   count, size);
 
        seq_printf(m, "%llu [%llu] gtt total\n",
-                  dev_priv->ggtt.base.total,
-                  (u64)dev_priv->ggtt.mappable_end - dev_priv->ggtt.base.start);
+                  ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
 
        seq_putc(m, '\n');
        print_batch_pool_stats(m, dev_priv);
@@ -597,7 +598,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                           engine->name,
                                           i915_gem_request_get_seqno(work->flip_queued_req),
                                           dev_priv->next_seqno,
-                                          engine->get_seqno(engine, true),
+                                          engine->get_seqno(engine),
                                           i915_gem_request_completed(work->flip_queued_req, true));
                        } else
                                seq_printf(m, "Flip not associated with any ring\n");
@@ -727,10 +728,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 static void i915_ring_seqno_info(struct seq_file *m,
                                 struct intel_engine_cs *engine)
 {
-       if (engine->get_seqno) {
-               seq_printf(m, "Current sequence (%s): %x\n",
-                          engine->name, engine->get_seqno(engine, false));
-       }
+       seq_printf(m, "Current sequence (%s): %x\n",
+                  engine->name, engine->get_seqno(engine));
+       seq_printf(m, "Current user interrupts (%s): %x\n",
+                  engine->name, READ_ONCE(engine->user_interrupts));
 }
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
@@ -1345,8 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
        intel_runtime_pm_get(dev_priv);
 
        for_each_engine_id(engine, dev_priv, id) {
-               seqno[id] = engine->get_seqno(engine, false);
                acthd[id] = intel_ring_get_active_head(engine);
+               seqno[id] = engine->get_seqno(engine);
        }
 
        i915_get_extra_instdone(dev, instdone);
@@ -1362,8 +1363,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
        for_each_engine_id(engine, dev_priv, id) {
                seq_printf(m, "%s:\n", engine->name);
-               seq_printf(m, "\tseqno = %x [current %x]\n",
-                          engine->hangcheck.seqno, seqno[id]);
+               seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
+                          engine->hangcheck.seqno,
+                          seqno[id],
+                          engine->last_submitted_seqno);
+               seq_printf(m, "\tuser interrupts = %x [current %x]\n",
+                          engine->hangcheck.user_interrupts,
+                          READ_ONCE(engine->user_interrupts));
                seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
                           (long long)engine->hangcheck.acthd,
                           (long long)acthd[id]);
@@ -1895,6 +1901,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct intel_framebuffer *fbdev_fb = NULL;
        struct drm_framebuffer *drm_fb;
+       int ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
        if (to_i915(dev)->fbdev) {
@@ -1929,6 +1940,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                seq_putc(m, '\n');
        }
        mutex_unlock(&dev->mode_config.fb_lock);
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -2093,7 +2105,6 @@ static int i915_execlists(struct seq_file *m, void *data)
        for_each_engine(engine, dev_priv) {
                struct drm_i915_gem_request *head_req = NULL;
                int count = 0;
-               unsigned long flags;
 
                seq_printf(m, "%s\n", engine->name);
 
@@ -2120,13 +2131,13 @@ static int i915_execlists(struct seq_file *m, void *data)
                                   i, status, ctx_id);
                }
 
-               spin_lock_irqsave(&engine->execlist_lock, flags);
+               spin_lock_bh(&engine->execlist_lock);
                list_for_each(cursor, &engine->execlist_queue)
                        count++;
                head_req = list_first_entry_or_null(&engine->execlist_queue,
                                                    struct drm_i915_gem_request,
                                                    execlist_link);
-               spin_unlock_irqrestore(&engine->execlist_lock, flags);
+               spin_unlock_bh(&engine->execlist_lock);
 
                seq_printf(m, "\t%d requests in queue\n", count);
                if (head_req) {
@@ -2409,7 +2420,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        u32 tmp, i;
 
-       if (!HAS_GUC_UCODE(dev_priv->dev))
+       if (!HAS_GUC_UCODE(dev_priv))
                return 0;
 
        seq_printf(m, "GuC firmware status:\n");
@@ -2483,7 +2494,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
        struct intel_engine_cs *engine;
        u64 total = 0;
 
-       if (!HAS_GUC_SCHED(dev_priv->dev))
+       if (!HAS_GUC_SCHED(dev_priv))
                return 0;
 
        if (mutex_lock_interruptible(&dev->struct_mutex))
@@ -2687,10 +2698,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!HAS_RUNTIME_PM(dev)) {
-               seq_puts(m, "not supported\n");
-               return 0;
-       }
+       if (!HAS_RUNTIME_PM(dev_priv))
+               seq_puts(m, "Runtime power management not supported\n");
 
        seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
        seq_printf(m, "IRQs disabled: %s\n",
@@ -2701,6 +2710,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 #else
        seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
 #endif
+       seq_printf(m, "PCI device power state: %s [%d]\n",
+                  pci_power_name(dev_priv->dev->pdev->current_state),
+                  dev_priv->dev->pdev->current_state);
 
        return 0;
 }
index 1ac1ea969eece260f41c0d36465812d3dea74ded..b377753717d1ca6623e52e204338d7c4fb752051 100644 (file)
@@ -493,9 +493,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
         * Some ports require correctly set-up hpd registers for detection to
         * work properly (leading to ghost connected connector status), e.g. VGA
         * on gm45.  Hence we can only set up the initial fbdev config after hpd
-        * irqs are fully enabled. We protect the fbdev initial config scanning
-        * against hotplug events by waiting in intel_fbdev_output_poll_changed
-        * until the asynchronous thread has finished.
+        * irqs are fully enabled. Now we should scan for the initial config
+        * only once hotplug handling is enabled, but due to screwed-up locking
+        * around kms/fbdev init we can't protect the fdbev initial config
+        * scanning against hotplug events. Hence do this first and ignore the
+        * tiny window where we will loose hotplug notifactions.
         */
        intel_fbdev_initial_config_async(dev);
 
@@ -527,6 +529,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
        struct apertures_struct *ap;
        struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool primary;
        int ret;
 
@@ -534,8 +537,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
        if (!ap)
                return -ENOMEM;
 
-       ap->ranges[0].base = dev_priv->ggtt.mappable_base;
-       ap->ranges[0].size = dev_priv->ggtt.mappable_end;
+       ap->ranges[0].base = ggtt->mappable_base;
+       ap->ranges[0].size = ggtt->mappable_end;
 
        primary =
                pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -1170,6 +1173,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        uint32_t aperture_size;
        int ret;
 
@@ -1178,7 +1182,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        intel_device_info_runtime_init(dev);
 
-       ret = i915_gem_gtt_init(dev);
+       ret = i915_ggtt_init_hw(dev);
        if (ret)
                return ret;
 
@@ -1187,13 +1191,13 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
        ret = i915_kick_out_firmware_fb(dev_priv);
        if (ret) {
                DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
-               goto out_gtt;
+               goto out_ggtt;
        }
 
        ret = i915_kick_out_vgacon(dev_priv);
        if (ret) {
                DRM_ERROR("failed to remove conflicting VGA console\n");
-               goto out_gtt;
+               goto out_ggtt;
        }
 
        pci_set_master(dev->pdev);
@@ -1213,17 +1217,17 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
        if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
 
-       aperture_size = dev_priv->ggtt.mappable_end;
+       aperture_size = ggtt->mappable_end;
 
-       dev_priv->ggtt.mappable =
-               io_mapping_create_wc(dev_priv->ggtt.mappable_base,
+       ggtt->mappable =
+               io_mapping_create_wc(ggtt->mappable_base,
                                     aperture_size);
-       if (dev_priv->ggtt.mappable == NULL) {
+       if (!ggtt->mappable) {
                ret = -EIO;
-               goto out_gtt;
+               goto out_ggtt;
        }
 
-       dev_priv->ggtt.mtrr = arch_phys_wc_add(dev_priv->ggtt.mappable_base,
+       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
                                              aperture_size);
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
@@ -1253,8 +1257,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        return 0;
 
-out_gtt:
-       i915_global_gtt_cleanup(dev);
+out_ggtt:
+       i915_ggtt_cleanup_hw(dev);
 
        return ret;
 }
@@ -1266,14 +1270,15 @@ out_gtt:
 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (dev->pdev->msi_enabled)
                pci_disable_msi(dev->pdev);
 
        pm_qos_remove_request(&dev_priv->pm_qos);
-       arch_phys_wc_del(dev_priv->ggtt.mtrr);
-       io_mapping_free(dev_priv->ggtt.mappable);
-       i915_global_gtt_cleanup(dev);
+       arch_phys_wc_del(ggtt->mtrr);
+       io_mapping_free(ggtt->mappable);
+       i915_ggtt_cleanup_hw(dev);
 }
 
 /**
index 349e17cc85409625304b8ae2492d03893091d3e2..29b4e79c85a68764cdba964659a2bdfba63a6731 100644 (file)
@@ -360,14 +360,12 @@ static const struct intel_device_info intel_broxton_info = {
 
 static const struct intel_device_info intel_kabylake_info = {
        BDW_FEATURES,
-       .is_preliminary = 1,
        .is_kabylake = 1,
        .gen = 9,
 };
 
 static const struct intel_device_info intel_kabylake_gt3_info = {
        BDW_FEATURES,
-       .is_preliminary = 1,
        .is_kabylake = 1,
        .gen = 9,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -1402,7 +1400,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
        if (err)
                goto err2;
 
-       if (!IS_CHERRYVIEW(dev_priv->dev))
+       if (!IS_CHERRYVIEW(dev_priv))
                vlv_save_gunit_s0ix_state(dev_priv);
 
        err = vlv_force_gfx_clock(dev_priv, false);
@@ -1434,7 +1432,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
         */
        ret = vlv_force_gfx_clock(dev_priv, true);
 
-       if (!IS_CHERRYVIEW(dev_priv->dev))
+       if (!IS_CHERRYVIEW(dev_priv))
                vlv_restore_gunit_s0ix_state(dev_priv);
 
        err = vlv_allow_gt_wake(dev_priv, true);
index f6d71590bd7b6648c0e1345371d4ba2fd35e12f6..a9c8211c8e5eda7d9650a72b67aa8e4e4b4705c4 100644 (file)
@@ -60,7 +60,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20160330"
+#define DRIVER_DATE            "20160411"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -495,6 +495,7 @@ struct drm_i915_error_state {
                u32 cpu_ring_head;
                u32 cpu_ring_tail;
 
+               u32 last_seqno;
                u32 semaphore_seqno[I915_NUM_ENGINES - 1];
 
                /* Register state */
@@ -612,8 +613,8 @@ struct drm_i915_display_funcs {
        /* display clock increase/decrease */
        /* pll clock increase/decrease */
 
-       void (*load_csc_matrix)(struct drm_crtc *crtc);
-       void (*load_luts)(struct drm_crtc *crtc);
+       void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
+       void (*load_luts)(struct drm_crtc_state *crtc_state);
 };
 
 enum forcewake_domain_id {
@@ -1118,6 +1119,7 @@ struct intel_gen6_power_mgmt {
        u8 efficient_freq;      /* AKA RPe. Pre-determined balanced frequency */
        u8 rp1_freq;            /* "less than" RP0 power/freqency */
        u8 rp0_freq;            /* Non-overclocked max frequency. */
+       u16 gpll_ref_freq;      /* vlv/chv GPLL reference frequency */
 
        u8 up_threshold; /* Current %busy required to uplock */
        u8 down_threshold; /* Current %busy required to downclock */
@@ -1257,6 +1259,7 @@ struct i915_gem_mm {
        struct i915_hw_ppgtt *aliasing_ppgtt;
 
        struct notifier_block oom_notifier;
+       struct notifier_block vmap_notifier;
        struct shrinker shrinker;
        bool shrinker_no_lock_stealing;
 
@@ -1837,6 +1840,13 @@ struct drm_i915_private {
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        const struct intel_dpll_mgr *dpll_mgr;
 
+       /*
+        * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
+        * Must be global rather than per dpll, because on some platforms
+        * plls share registers.
+        */
+       struct mutex dpll_lock;
+
        unsigned int active_crtcs;
        unsigned int min_pixclk[I915_MAX_PIPES];
 
@@ -1893,7 +1903,14 @@ struct drm_i915_private {
 
        u32 fdi_rx_config;
 
+       /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
        u32 chv_phy_control;
+       /*
+        * Shadows for CHV DPLL_MD regs to keep the state
+        * checker somewhat working in the presence hardware
+        * crappiness (can't read out DPLL_MD for pipes B & C).
+        */
+       u32 chv_dpll_md[I915_MAX_PIPES];
 
        u32 suspend_count;
        bool suspended_to_idle;
@@ -2152,10 +2169,7 @@ struct drm_i915_gem_object {
                struct scatterlist *sg;
                int last;
        } get_page;
-
-       /* prime dma-buf support */
-       void *dma_buf_vmapping;
-       int vmapping_count;
+       void *mapping;
 
        /** Breadcrumb of last rendering to the buffer.
         * There can only be one writer, but we allow for multiple readers.
@@ -2732,6 +2746,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_device *dev);
 extern int i915_reset(struct drm_device *dev);
+extern int intel_guc_reset(struct drm_i915_private *dev_priv);
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -2970,12 +2985,44 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
        BUG_ON(obj->pages == NULL);
        obj->pages_pin_count++;
 }
+
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
        BUG_ON(obj->pages_pin_count == 0);
        obj->pages_pin_count--;
 }
 
+/**
+ * i915_gem_object_pin_map - return a contiguous mapping of the entire object
+ * @obj - the object to map into kernel address space
+ *
+ * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
+ * pages and then returns a contiguous mapping of the backing storage into
+ * the kernel address space.
+ *
+ * The caller must hold the struct_mutex.
+ *
+ * Returns the pointer through which to access the backing storage.
+ */
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
+
+/**
+ * i915_gem_object_unpin_map - releases an earlier mapping
+ * @obj - the object to unmap
+ *
+ * After pinning the object and mapping its pages, once you are finished
+ * with your access, call i915_gem_object_unpin_map() to release the pin
+ * upon the mapping. Once the pin count reaches zero, that mapping may be
+ * removed.
+ *
+ * The caller must hold the struct_mutex.
+ */
+static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
+{
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       i915_gem_object_unpin_pages(obj);
+}
+
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_engine_cs *to,
@@ -2999,15 +3046,19 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
                                           bool lazy_coherency)
 {
-       u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency);
-       return i915_seqno_passed(seqno, req->previous_seqno);
+       if (!lazy_coherency && req->engine->irq_seqno_barrier)
+               req->engine->irq_seqno_barrier(req->engine);
+       return i915_seqno_passed(req->engine->get_seqno(req->engine),
+                                req->previous_seqno);
 }
 
 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
                                              bool lazy_coherency)
 {
-       u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency);
-       return i915_seqno_passed(seqno, req->seqno);
+       if (!lazy_coherency && req->engine->irq_seqno_barrier)
+               req->engine->irq_seqno_barrier(req->engine);
+       return i915_seqno_passed(req->engine->get_seqno(req->engine),
+                                req->seqno);
 }
 
 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
@@ -3147,13 +3198,9 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
 
 /* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
-       (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
-
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
-       WARN_ON(i915_is_ggtt(vm));
        return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
@@ -3166,7 +3213,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
 static inline unsigned long
 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+       return i915_gem_obj_size(obj, &ggtt->base);
 }
 
 static inline int __must_check
@@ -3174,7 +3224,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
                      uint32_t alignment,
                      unsigned flags)
 {
-       return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+       return i915_gem_object_pin(obj, &ggtt->base,
                                   alignment, flags | PIN_GLOBAL);
 }
 
@@ -3289,6 +3342,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
 #define I915_SHRINK_UNBOUND 0x2
 #define I915_SHRINK_BOUND 0x4
 #define I915_SHRINK_ACTIVE 0x8
+#define I915_SHRINK_VMAPS 0x10
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
@@ -3388,6 +3442,8 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+                                    enum port port);
 
 /* intel_opregion.c */
 #ifdef CONFIG_ACPI
index bb8fac5c7b0e6d1cdadd52e4006d48c33a239116..f4abf3abd572c3c8a8498ffc647a8692a105dbe2 100644 (file)
@@ -130,9 +130,9 @@ int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_get_aperture *args = data;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_i915_gem_get_aperture *args = data;
        struct i915_vma *vma;
        size_t pinned;
 
@@ -146,7 +146,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                        pinned += vma->node.size;
        mutex_unlock(&dev->struct_mutex);
 
-       args->aper_size = dev_priv->ggtt.base.total;
+       args->aper_size = ggtt->base.total;
        args->aper_available_size = args->aper_size - pinned;
 
        return 0;
@@ -765,7 +765,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
@@ -807,7 +808,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
                 */
-               if (fast_user_write(dev_priv->ggtt.mappable, page_base,
+               if (fast_user_write(ggtt->mappable, page_base,
                                    page_offset, user_data, page_length)) {
                        ret = -EFAULT;
                        goto out_flush;
@@ -1790,7 +1791,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_ggtt_view view = i915_ggtt_view_normal;
        pgoff_t page_offset;
        unsigned long pfn;
@@ -1825,7 +1827,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        /* Use a partial view if the object is bigger than the aperture. */
-       if (obj->base.size >= dev_priv->ggtt.mappable_end &&
+       if (obj->base.size >= ggtt->mappable_end &&
            obj->tiling_mode == I915_TILING_NONE) {
                static const unsigned int chunk_size = 256; // 1 MiB
 
@@ -1853,7 +1855,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto unpin;
 
        /* Finally, remap it using the new GTT offset */
-       pfn = dev_priv->ggtt.mappable_base +
+       pfn = ggtt->mappable_base +
                i915_gem_obj_ggtt_offset_view(obj, &view);
        pfn >>= PAGE_SHIFT;
 
@@ -2227,6 +2229,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
         * lists early. */
        list_del(&obj->global_list);
 
+       if (obj->mapping) {
+               if (is_vmalloc_addr(obj->mapping))
+                       vunmap(obj->mapping);
+               else
+                       kunmap(kmap_to_page(obj->mapping));
+               obj->mapping = NULL;
+       }
+
        ops->put_pages(obj);
        obj->pages = NULL;
 
@@ -2395,6 +2405,49 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+{
+       int ret;
+
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               return ERR_PTR(ret);
+
+       i915_gem_object_pin_pages(obj);
+
+       if (obj->mapping == NULL) {
+               struct page **pages;
+
+               pages = NULL;
+               if (obj->base.size == PAGE_SIZE)
+                       obj->mapping = kmap(sg_page(obj->pages->sgl));
+               else
+                       pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
+                                              sizeof(*pages),
+                                              GFP_TEMPORARY);
+               if (pages != NULL) {
+                       struct sg_page_iter sg_iter;
+                       int n;
+
+                       n = 0;
+                       for_each_sg_page(obj->pages->sgl, &sg_iter,
+                                        obj->pages->nents, 0)
+                               pages[n++] = sg_page_iter_page(&sg_iter);
+
+                       obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
+                       drm_free_large(pages);
+               }
+               if (obj->mapping == NULL) {
+                       i915_gem_object_unpin_pages(obj);
+                       return ERR_PTR(-ENOMEM);
+               }
+       }
+
+       return obj->mapping;
+}
+
 void i915_vma_move_to_active(struct i915_vma *vma,
                             struct drm_i915_gem_request *req)
 {
@@ -2463,7 +2516,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
-       int ret, j;
+       int ret;
 
        /* Carefully retire all requests without writing to the rings */
        for_each_engine(engine, dev_priv) {
@@ -2474,13 +2527,9 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
        i915_gem_retire_requests(dev);
 
        /* Finally reset hw state */
-       for_each_engine(engine, dev_priv) {
+       for_each_engine(engine, dev_priv)
                intel_ring_init_seqno(engine, seqno);
 
-               for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
-                       engine->semaphore.sync_seqno[j] = 0;
-       }
-
        return 0;
 }
 
@@ -2574,6 +2623,28 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
        }
 
+       trace_i915_gem_request_add(request);
+
+       request->head = request_start;
+
+       /* Whilst this request exists, batch_obj will be on the
+        * active_list, and so will hold the active reference. Only when this
+        * request is retired will the the batch_obj be moved onto the
+        * inactive_list and lose its active reference. Hence we do not need
+        * to explicitly hold another reference here.
+        */
+       request->batch_obj = obj;
+
+       /* Seal the request and mark it as pending execution. Note that
+        * we may inspect this state, without holding any locks, during
+        * hangcheck. Hence we apply the barrier to ensure that we do not
+        * see a more recent value in the hws than we are tracking.
+        */
+       request->emitted_jiffies = jiffies;
+       request->previous_seqno = engine->last_submitted_seqno;
+       smp_store_mb(engine->last_submitted_seqno, request->seqno);
+       list_add_tail(&request->list, &engine->request_list);
+
        /* Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
@@ -2591,23 +2662,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
 
-       request->head = request_start;
-
-       /* Whilst this request exists, batch_obj will be on the
-        * active_list, and so will hold the active reference. Only when this
-        * request is retired will the the batch_obj be moved onto the
-        * inactive_list and lose its active reference. Hence we do not need
-        * to explicitly hold another reference here.
-        */
-       request->batch_obj = obj;
-
-       request->emitted_jiffies = jiffies;
-       request->previous_seqno = engine->last_submitted_seqno;
-       engine->last_submitted_seqno = request->seqno;
-       list_add_tail(&request->list, &engine->request_list);
-
-       trace_i915_gem_request_add(request);
-
        i915_queue_hangcheck(engine->dev);
 
        queue_delayed_work(dev_priv->wq,
@@ -2837,13 +2891,15 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
         */
 
        if (i915.enable_execlists) {
-               spin_lock_irq(&engine->execlist_lock);
+               /* Ensure irq handler finishes or is cancelled. */
+               tasklet_kill(&engine->irq_tasklet);
 
+               spin_lock_bh(&engine->execlist_lock);
                /* list_splice_tail_init checks for empty lists */
                list_splice_tail_init(&engine->execlist_queue,
                                      &engine->execlist_retired_req_list);
+               spin_unlock_bh(&engine->execlist_lock);
 
-               spin_unlock_irq(&engine->execlist_lock);
                intel_execlists_retire_requests(engine);
        }
 
@@ -2875,6 +2931,8 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
                buffer->last_retired_head = buffer->tail;
                intel_ring_update_space(buffer);
        }
+
+       intel_ring_init_seqno(engine, engine->last_submitted_seqno);
 }
 
 void i915_gem_reset(struct drm_device *dev)
@@ -2963,9 +3021,9 @@ i915_gem_retire_requests(struct drm_device *dev)
                i915_gem_retire_requests_ring(engine);
                idle &= list_empty(&engine->request_list);
                if (i915.enable_execlists) {
-                       spin_lock_irq(&engine->execlist_lock);
+                       spin_lock_bh(&engine->execlist_lock);
                        idle &= list_empty(&engine->execlist_queue);
-                       spin_unlock_irq(&engine->execlist_lock);
+                       spin_unlock_bh(&engine->execlist_lock);
 
                        intel_execlists_retire_requests(engine);
                }
@@ -3455,7 +3513,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           uint64_t flags)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        u32 fence_alignment, unfenced_alignment;
        u32 search_flag, alloc_flag;
        u64 start, end;
@@ -3502,7 +3561,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
        end = vm->total;
        if (flags & PIN_MAPPABLE)
-               end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+               end = min_t(u64, end, ggtt->mappable_end);
        if (flags & PIN_ZONE_4G)
                end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
 
@@ -3709,6 +3768,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        uint32_t old_write_domain, old_read_domains;
        struct i915_vma *vma;
        int ret;
@@ -3763,7 +3825,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        vma = i915_gem_obj_to_ggtt(obj);
        if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
                list_move_tail(&vma->vm_link,
-                              &to_i915(obj->base.dev)->ggtt.base.inactive_list);
+                              &ggtt->base.inactive_list);
 
        return 0;
 }
@@ -4232,9 +4294,6 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
        vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
                          i915_gem_obj_to_vma(obj, vm);
 
-       if (IS_ERR(vma))
-               return PTR_ERR(vma);
-
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                        return -EBUSY;
@@ -4297,10 +4356,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         uint32_t alignment,
                         uint64_t flags)
 {
-       if (WARN_ONCE(!view, "no view specified"))
-               return -EINVAL;
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+       BUG_ON(!view);
 
-       return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+       return i915_gem_object_do_pin(obj, &ggtt->base, view,
                                      alignment, flags | PIN_GLOBAL);
 }
 
@@ -4612,14 +4674,15 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
                                           const struct i915_ggtt_view *view)
 {
-       struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
-       if (WARN_ONCE(!view, "no view specified"))
-               return ERR_PTR(-EINVAL);
+       BUG_ON(!view);
 
        list_for_each_entry(vma, &obj->vma_list, obj_link)
-               if (vma->vm == ggtt &&
+               if (vma->vm == &ggtt->base &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma;
        return NULL;
@@ -4964,7 +5027,7 @@ int i915_gem_init(struct drm_device *dev)
        if (ret)
                goto out_unlock;
 
-       i915_gem_init_global_gtt(dev);
+       i915_gem_init_ggtt(dev);
 
        ret = i915_gem_context_init(dev);
        if (ret)
@@ -5212,11 +5275,12 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
 {
-       struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == ggtt &&
+               if (vma->vm == &ggtt->base &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
 
@@ -5243,11 +5307,12 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
 {
-       struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == ggtt &&
+               if (vma->vm == &ggtt->base &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
                        return true;
index 0506016e18e0d7a7abf6ca4fe421fa18fa88960d..80bbe43a2e92a5b363d46d784fda9accd6f8d3f9 100644 (file)
@@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
 
-       mutex_lock(&obj->base.dev->struct_mutex);
-
        dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
        sg_free_table(sg);
        kfree(sg);
 
+       mutex_lock(&obj->base.dev->struct_mutex);
        i915_gem_object_unpin_pages(obj);
-
        mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
@@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        struct drm_device *dev = obj->base.dev;
-       struct sg_page_iter sg_iter;
-       struct page **pages;
-       int ret, i;
+       void *addr;
+       int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ERR_PTR(ret);
 
-       if (obj->dma_buf_vmapping) {
-               obj->vmapping_count++;
-               goto out_unlock;
-       }
-
-       ret = i915_gem_object_get_pages(obj);
-       if (ret)
-               goto err;
-
-       i915_gem_object_pin_pages(obj);
-
-       ret = -ENOMEM;
-
-       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-       if (pages == NULL)
-               goto err_unpin;
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-               pages[i++] = sg_page_iter_page(&sg_iter);
-
-       obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
-       drm_free_large(pages);
-
-       if (!obj->dma_buf_vmapping)
-               goto err_unpin;
-
-       obj->vmapping_count = 1;
-out_unlock:
+       addr = i915_gem_object_pin_map(obj);
        mutex_unlock(&dev->struct_mutex);
-       return obj->dma_buf_vmapping;
 
-err_unpin:
-       i915_gem_object_unpin_pages(obj);
-err:
-       mutex_unlock(&dev->struct_mutex);
-       return ERR_PTR(ret);
+       return addr;
 }
 
 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
        struct drm_device *dev = obj->base.dev;
 
        mutex_lock(&dev->struct_mutex);
-       if (--obj->vmapping_count == 0) {
-               vunmap(obj->dma_buf_vmapping);
-               obj->dma_buf_vmapping = NULL;
-
-               i915_gem_object_unpin_pages(obj);
-       }
+       i915_gem_object_unpin_map(obj);
        mutex_unlock(&dev->struct_mutex);
 }
 
index 374a0cb7a092f7f3581254653a598c7564b551cd..6ee4f00f620c0b45caf515720578ad4045ee5f33 100644 (file)
@@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
                   uint64_t target_offset)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        uint64_t delta = relocation_target(reloc, target_offset);
        uint64_t offset;
        void __iomem *reloc_page;
@@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
        /* Map the page containing the relocation we're going to perform.  */
        offset = i915_gem_obj_ggtt_offset(obj);
        offset += reloc->offset;
-       reloc_page = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+       reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
                                              offset & PAGE_MASK);
        iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
@@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
                if (offset_in_page(offset) == 0) {
                        io_mapping_unmap_atomic(reloc_page);
                        reloc_page =
-                               io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+                               io_mapping_map_atomic_wc(ggtt->mappable,
                                                         offset);
                }
 
@@ -1431,7 +1432,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_i915_gem_execbuffer2 *args,
                       struct drm_i915_gem_exec_object2 *exec)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_request *req = NULL;
        struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
@@ -1504,7 +1506,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ctx->ppgtt)
                vm = &ctx->ppgtt->base;
        else
-               vm = &dev_priv->ggtt.base;
+               vm = &ggtt->base;
 
        memset(&params_master, 0x00, sizeof(params_master));
 
@@ -1781,11 +1783,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
-                            GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
-       if (exec2_list == NULL)
-               exec2_list = drm_malloc_ab(sizeof(*exec2_list),
-                                          args->buffer_count);
+       exec2_list = drm_malloc_gfp(args->buffer_count,
+                                   sizeof(*exec2_list),
+                                   GFP_TEMPORARY);
        if (exec2_list == NULL) {
                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
                          args->buffer_count);
index 7cfafdc80b17f49dee7248a38b36ea802ad80ec3..c5cb04907525558ebf53c8ab3e7c2036c46e2411 100644 (file)
@@ -706,8 +706,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
                                       uint64_t length,
                                       gen8_pte_t scratch_pte)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        gen8_pte_t *pt_vaddr;
        unsigned pdpe = gen8_pdpe_index(start);
        unsigned pde = gen8_pde_index(start);
@@ -762,8 +761,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
                                   uint64_t length,
                                   bool use_scratch)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
                                                 I915_CACHE_LLC, use_scratch);
 
@@ -788,8 +786,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
                              uint64_t start,
                              enum i915_cache_level cache_level)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        gen8_pte_t *pt_vaddr;
        unsigned pdpe = gen8_pdpe_index(start);
        unsigned pde = gen8_pde_index(start);
@@ -829,8 +826,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
                                      enum i915_cache_level cache_level,
                                      u32 unused)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct sg_page_iter sg_iter;
 
        __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
@@ -981,8 +977,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 
        if (intel_vgpu_active(vm->dev))
                gen8_ppgtt_notify_vgt(ppgtt, false);
@@ -1216,8 +1211,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
                                    uint64_t start,
                                    uint64_t length)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        unsigned long *new_page_dirs, *new_page_tables;
        struct drm_device *dev = vm->dev;
        struct i915_page_directory *pd;
@@ -1329,8 +1323,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
                                    uint64_t length)
 {
        DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
-       struct i915_hw_ppgtt *ppgtt =
-                       container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_directory_pointer *pdp;
        uint64_t pml4e;
        int ret = 0;
@@ -1376,8 +1369,7 @@ err_out:
 static int gen8_alloc_va_range(struct i915_address_space *vm,
                               uint64_t start, uint64_t length)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 
        if (USES_FULL_48BIT_PPGTT(vm->dev))
                return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
@@ -1629,6 +1621,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
                                  struct i915_page_directory *pd,
                                  uint32_t start, uint32_t length)
 {
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_page_table *pt;
        uint32_t pde, temp;
 
@@ -1637,7 +1630,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
 
        /* Make sure write is complete before other code can use this page
         * table. Also require for WC mapped PTEs */
-       readl(dev_priv->ggtt.gsm);
+       readl(ggtt->gsm);
 }
 
 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
@@ -1794,8 +1787,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
                                   uint64_t length,
                                   bool use_scratch)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        gen6_pte_t *pt_vaddr, scratch_pte;
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
@@ -1829,8 +1821,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                                      uint64_t start,
                                      enum i915_cache_level cache_level, u32 flags)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        gen6_pte_t *pt_vaddr;
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned act_pt = first_entry / GEN6_PTES;
@@ -1862,9 +1853,9 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 {
        DECLARE_BITMAP(new_page_tables, I915_PDES);
        struct drm_device *dev = vm->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_hw_ppgtt *ppgtt =
-                               container_of(vm, struct i915_hw_ppgtt, base);
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_table *pt;
        uint32_t start, length, start_save, length_save;
        uint32_t pde, temp;
@@ -1930,7 +1921,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 
        /* Make sure write is complete before other code can use this page
         * table. Also require for WC mapped PTEs */
-       readl(dev_priv->ggtt.gsm);
+       readl(ggtt->gsm);
 
        mark_tlbs_dirty(ppgtt);
        return 0;
@@ -1976,8 +1967,7 @@ static void gen6_free_scratch(struct i915_address_space *vm)
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_table *pt;
        uint32_t pde;
 
@@ -1995,7 +1985,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
        struct i915_address_space *vm = &ppgtt->base;
        struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool retried = false;
        int ret;
 
@@ -2003,23 +1994,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
         * allocator works in address space sizes, so it's multiplied by page
         * size. We allocate at the top of the GTT to avoid fragmentation.
         */
-       BUG_ON(!drm_mm_initialized(&dev_priv->ggtt.base.mm));
+       BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
 
        ret = gen6_init_scratch(vm);
        if (ret)
                return ret;
 
 alloc:
-       ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
+       ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
                                                  &ppgtt->node, GEN6_PD_SIZE,
                                                  GEN6_PD_ALIGN, 0,
-                                                 0, dev_priv->ggtt.base.total,
+                                                 0, ggtt->base.total,
                                                  DRM_MM_TOPDOWN);
        if (ret == -ENOSPC && !retried) {
-               ret = i915_gem_evict_something(dev, &dev_priv->ggtt.base,
+               ret = i915_gem_evict_something(dev, &ggtt->base,
                                               GEN6_PD_SIZE, GEN6_PD_ALIGN,
                                               I915_CACHE_NONE,
-                                              0, dev_priv->ggtt.base.total,
+                                              0, ggtt->base.total,
                                               0);
                if (ret)
                        goto err_out;
@@ -2032,7 +2023,7 @@ alloc:
                goto err_out;
 
 
-       if (ppgtt->node.start < dev_priv->ggtt.mappable_end)
+       if (ppgtt->node.start < ggtt->mappable_end)
                DRM_DEBUG("Forced to use aperture for PDEs\n");
 
        return 0;
@@ -2060,10 +2051,11 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
        struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int ret;
 
-       ppgtt->base.pte_encode = dev_priv->ggtt.base.pte_encode;
+       ppgtt->base.pte_encode = ggtt->base.pte_encode;
        if (IS_GEN6(dev)) {
                ppgtt->switch_mm = gen6_mm_switch;
        } else if (IS_HASWELL(dev)) {
@@ -2093,7 +2085,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        ppgtt->pd.base.ggtt_offset =
                ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
-       ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
+       ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
                ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
        gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
@@ -2261,9 +2253,10 @@ static bool needs_idle_maps(struct drm_device *dev)
 
 static bool do_idling(struct drm_i915_private *dev_priv)
 {
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool ret = dev_priv->mm.interruptible;
 
-       if (unlikely(dev_priv->ggtt.do_idle_maps)) {
+       if (unlikely(ggtt->do_idle_maps)) {
                dev_priv->mm.interruptible = false;
                if (i915_gpu_idle(dev_priv->dev)) {
                        DRM_ERROR("Couldn't idle GPU\n");
@@ -2277,7 +2270,9 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
 {
-       if (unlikely(dev_priv->ggtt.do_idle_maps))
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+       if (unlikely(ggtt->do_idle_maps))
                dev_priv->mm.interruptible = interruptible;
 }
 
@@ -2311,7 +2306,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
 
 static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
 {
-       if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+       if (INTEL_INFO(dev_priv)->gen < 6) {
                intel_gtt_chipset_flush();
        } else {
                I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -2321,7 +2316,8 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
 
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        /* Don't bother messing with faults pre GEN6 as we have little
         * documentation supporting that it's a good idea.
@@ -2331,10 +2327,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 
        i915_check_and_clear_faults(dev);
 
-       dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
-                                      dev_priv->ggtt.base.start,
-                                      dev_priv->ggtt.base.total,
-                                      true);
+       ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
+                            true);
 
        i915_ggtt_flush(dev_priv);
 }
@@ -2364,10 +2358,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     uint64_t start,
                                     enum i915_cache_level level, u32 unused)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        unsigned first_entry = start >> PAGE_SHIFT;
        gen8_pte_t __iomem *gtt_entries =
-               (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry;
+               (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
        int i = 0;
        struct sg_page_iter sg_iter;
        dma_addr_t addr = 0; /* shut up gcc */
@@ -2441,10 +2436,11 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     uint64_t start,
                                     enum i915_cache_level level, u32 flags)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        unsigned first_entry = start >> PAGE_SHIFT;
        gen6_pte_t __iomem *gtt_entries =
-               (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry;
+               (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
        int i = 0;
        struct sg_page_iter sg_iter;
        dma_addr_t addr = 0;
@@ -2484,12 +2480,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                                  uint64_t length,
                                  bool use_scratch)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        gen8_pte_t scratch_pte, __iomem *gtt_base =
-               (gen8_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry;
-       const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry;
+               (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+       const int max_entries = ggtt_total_entries(ggtt) - first_entry;
        int i;
        int rpm_atomic_seq;
 
@@ -2515,12 +2512,13 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  uint64_t length,
                                  bool use_scratch)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        gen6_pte_t scratch_pte, __iomem *gtt_base =
-               (gen6_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry;
-       const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry;
+               (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+       const int max_entries = ggtt_total_entries(ggtt) - first_entry;
        int i;
        int rpm_atomic_seq;
 
@@ -2713,8 +2711,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
         * aperture.  One page should be enough to keep any prefetching inside
         * of the aperture.
         */
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_mm_node *entry;
        struct drm_i915_gem_object *obj;
        unsigned long hole_start, hole_end;
@@ -2722,13 +2720,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 
        BUG_ON(mappable_end > end);
 
-       ggtt_vm->start = start;
+       ggtt->base.start = start;
 
        /* Subtract the guard page before address space initialization to
         * shrink the range used by drm_mm */
-       ggtt_vm->total = end - start - PAGE_SIZE;
-       i915_address_space_init(ggtt_vm, dev_priv);
-       ggtt_vm->total += PAGE_SIZE;
+       ggtt->base.total = end - start - PAGE_SIZE;
+       i915_address_space_init(&ggtt->base, dev_priv);
+       ggtt->base.total += PAGE_SIZE;
 
        if (intel_vgpu_active(dev)) {
                ret = intel_vgt_balloon(dev);
@@ -2737,36 +2735,36 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
        }
 
        if (!HAS_LLC(dev))
-               ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
+               ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
 
        /* Mark any preallocated objects as occupied */
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
 
                DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
                              i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
                WARN_ON(i915_gem_obj_ggtt_bound(obj));
-               ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+               ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
                if (ret) {
                        DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
                        return ret;
                }
                vma->bound |= GLOBAL_BIND;
                __i915_vma_set_map_and_fenceable(vma);
-               list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
+               list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
        }
 
        /* Clear any non-preallocated blocks */
-       drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+       drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               ggtt_vm->clear_range(ggtt_vm, hole_start,
+               ggtt->base.clear_range(&ggtt->base, hole_start,
                                     hole_end - hole_start, true);
        }
 
        /* And finally clear the reserved guard page */
-       ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+       ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
 
        if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
                struct i915_hw_ppgtt *ppgtt;
@@ -2797,28 +2795,33 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
                                        true);
 
                dev_priv->mm.aliasing_ppgtt = ppgtt;
-               WARN_ON(dev_priv->ggtt.base.bind_vma != ggtt_bind_vma);
-               dev_priv->ggtt.base.bind_vma = aliasing_gtt_bind_vma;
+               WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+               ggtt->base.bind_vma = aliasing_gtt_bind_vma;
        }
 
        return 0;
 }
 
-void i915_gem_init_global_gtt(struct drm_device *dev)
+/**
+ * i915_gem_init_ggtt - Initialize GEM for Global GTT
+ * @dev: DRM device
+ */
+void i915_gem_init_ggtt(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u64 gtt_size, mappable_size;
-
-       gtt_size = dev_priv->ggtt.base.total;
-       mappable_size = dev_priv->ggtt.mappable_end;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-       i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+       i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
 }
 
-void i915_global_gtt_cleanup(struct drm_device *dev)
+/**
+ * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
+ * @dev: DRM device
+ */
+void i915_ggtt_cleanup_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &dev_priv->ggtt.base;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (dev_priv->mm.aliasing_ppgtt) {
                struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2828,15 +2831,15 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
 
        i915_gem_cleanup_stolen(dev);
 
-       if (drm_mm_initialized(&vm->mm)) {
+       if (drm_mm_initialized(&ggtt->base.mm)) {
                if (intel_vgpu_active(dev))
                        intel_vgt_deballoon();
 
-               drm_mm_takedown(&vm->mm);
-               list_del(&vm->global_link);
+               drm_mm_takedown(&ggtt->base.mm);
+               list_del(&ggtt->base.global_link);
        }
 
-       vm->cleanup(vm);
+       ggtt->base.cleanup(&ggtt->base);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2920,13 +2923,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
 static int ggtt_probe_common(struct drm_device *dev,
                             size_t gtt_size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_page_scratch *scratch_page;
-       phys_addr_t gtt_phys_addr;
+       phys_addr_t ggtt_phys_addr;
 
        /* For Modern GENs the PTEs and register space are split in the BAR */
-       gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
-               (pci_resource_len(dev->pdev, 0) / 2);
+       ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
+                        (pci_resource_len(dev->pdev, 0) / 2);
 
        /*
         * On BXT writes larger than 64 bit to the GTT pagetable range will be
@@ -2936,10 +2940,10 @@ static int ggtt_probe_common(struct drm_device *dev,
         * readback check when writing GTT PTE entries.
         */
        if (IS_BROXTON(dev))
-               dev_priv->ggtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+               ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
        else
-               dev_priv->ggtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
-       if (!dev_priv->ggtt.gsm) {
+               ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
+       if (!ggtt->gsm) {
                DRM_ERROR("Failed to map the gtt page table\n");
                return -ENOMEM;
        }
@@ -2948,11 +2952,11 @@ static int ggtt_probe_common(struct drm_device *dev,
        if (IS_ERR(scratch_page)) {
                DRM_ERROR("Scratch setup failed\n");
                /* iounmap will also get called at remove, but meh */
-               iounmap(dev_priv->ggtt.gsm);
+               iounmap(ggtt->gsm);
                return PTR_ERR(scratch_page);
        }
 
-       dev_priv->ggtt.base.scratch_page = scratch_page;
+       ggtt->base.scratch_page = scratch_page;
 
        return 0;
 }
@@ -2973,7 +2977,7 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
              GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
              GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
-       if (!USES_PPGTT(dev_priv->dev))
+       if (!USES_PPGTT(dev_priv))
                /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
                 * so RTL will always use the value corresponding to
                 * pat_sel = 000".
@@ -3033,7 +3037,7 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 {
        struct drm_device *dev = ggtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u16 snb_gmch_ctl;
        int ret;
 
@@ -3074,7 +3078,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
 
-
        return ret;
 }
 
@@ -3124,7 +3127,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
        struct drm_device *dev = ggtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
@@ -3153,9 +3156,13 @@ static void i915_gmch_remove(struct i915_address_space *vm)
        intel_gmch_remove();
 }
 
-int i915_gem_gtt_init(struct drm_device *dev)
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev: DRM device
+ */
+int i915_ggtt_init_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int ret;
 
@@ -3224,33 +3231,30 @@ int i915_gem_gtt_init(struct drm_device *dev)
        return 0;
 
 out_gtt_cleanup:
-       ggtt->base.cleanup(&dev_priv->ggtt.base);
+       ggtt->base.cleanup(&ggtt->base);
 
        return ret;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_object *obj;
-       struct i915_address_space *vm;
        struct i915_vma *vma;
        bool flush;
 
        i915_check_and_clear_faults(dev);
 
        /* First fill our portion of the GTT with scratch pages */
-       dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
-                                      dev_priv->ggtt.base.start,
-                                      dev_priv->ggtt.base.total,
-                                      true);
+       ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
+                              true);
 
        /* Cache flush objects bound into GGTT and rebind them. */
-       vm = &dev_priv->ggtt.base;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                flush = false;
                list_for_each_entry(vma, &obj->vma_list, obj_link) {
-                       if (vma->vm != vm)
+                       if (vma->vm != &ggtt->base)
                                continue;
 
                        WARN_ON(i915_vma_bind(vma, obj->cache_level,
@@ -3273,15 +3277,17 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        }
 
        if (USES_PPGTT(dev)) {
+               struct i915_address_space *vm;
+
                list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
                        /* TODO: Perhaps it shouldn't be gen6 specific */
 
-                       struct i915_hw_ppgtt *ppgtt =
-                                       container_of(vm, struct i915_hw_ppgtt,
-                                                    base);
+                       struct i915_hw_ppgtt *ppgtt;
 
-                       if (i915_is_ggtt(vm))
+                       if (vm->is_ggtt)
                                ppgtt = dev_priv->mm.aliasing_ppgtt;
+                       else
+                               ppgtt = i915_vm_to_ppgtt(vm);
 
                        gen6_write_page_range(dev_priv, &ppgtt->pd,
                                              0, ppgtt->base.total);
@@ -3340,19 +3346,13 @@ struct i915_vma *
 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
                                       const struct i915_ggtt_view *view)
 {
-       struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
-       struct i915_vma *vma;
-
-       if (WARN_ON(!view))
-               return ERR_PTR(-EINVAL);
-
-       vma = i915_gem_obj_to_ggtt_view(obj, view);
-
-       if (IS_ERR(vma))
-               return vma;
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
        if (!vma)
-               vma = __i915_gem_vma_create(obj, ggtt, view);
+               vma = __i915_gem_vma_create(obj, &ggtt->base, view);
 
        return vma;
 
@@ -3401,8 +3401,9 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
        int ret = -ENOMEM;
 
        /* Allocate a temporary list of source pages for random access. */
-       page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
-                                      sizeof(dma_addr_t));
+       page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
+                                       sizeof(dma_addr_t),
+                                       GFP_TEMPORARY);
        if (!page_addr_list)
                return ERR_PTR(ret);
 
index d804be00ab415345c557d9d93a8ac6c780eb55dd..d7dd3d8a87582c31f2dd95f7abf6852ae76e8369 100644 (file)
@@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t;
 typedef uint64_t gen8_ppgtt_pdpe_t;
 typedef uint64_t gen8_ppgtt_pml4e_t;
 
-#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
 
 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
@@ -513,10 +513,9 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
                px_dma(ppgtt->base.scratch_pd);
 }
 
-int i915_gem_gtt_init(struct drm_device *dev);
-void i915_gem_init_global_gtt(struct drm_device *dev);
-void i915_global_gtt_cleanup(struct drm_device *dev);
-
+int i915_ggtt_init_hw(struct drm_device *dev);
+void i915_gem_init_ggtt(struct drm_device *dev);
+void i915_ggtt_cleanup_hw(struct drm_device *dev);
 
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 int i915_ppgtt_init_hw(struct drm_device *dev);
index d3c473ffb90aaa90325d184777cd9626b6d57165..d46388f25e04e1df880c545c76cc41974d620419 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/swap.h>
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 
@@ -166,6 +167,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                            obj->madv != I915_MADV_DONTNEED)
                                continue;
 
+                       if (flags & I915_SHRINK_VMAPS &&
+                           !is_vmalloc_addr(obj->mapping))
+                               continue;
+
                        if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
                                continue;
 
@@ -246,7 +251,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 
        count = 0;
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
-               if (obj->pages_pin_count == 0)
+               if (can_release_pages(obj))
                        count += obj->base.size >> PAGE_SHIFT;
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -288,35 +293,56 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
        return freed;
 }
 
+struct shrinker_lock_uninterruptible {
+       bool was_interruptible;
+       bool unlock;
+};
+
+static bool
+i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
+                                      struct shrinker_lock_uninterruptible *slu,
+                                      int timeout_ms)
+{
+       unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
+
+       while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
+               schedule_timeout_killable(1);
+               if (fatal_signal_pending(current))
+                       return false;
+               if (--timeout == 0) {
+                       pr_err("Unable to lock GPU to purge memory.\n");
+                       return false;
+               }
+       }
+
+       slu->was_interruptible = dev_priv->mm.interruptible;
+       dev_priv->mm.interruptible = false;
+       return true;
+}
+
+static void
+i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
+                                        struct shrinker_lock_uninterruptible *slu)
+{
+       dev_priv->mm.interruptible = slu->was_interruptible;
+       if (slu->unlock)
+               mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
 static int
 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 {
        struct drm_i915_private *dev_priv =
                container_of(nb, struct drm_i915_private, mm.oom_notifier);
-       struct drm_device *dev = dev_priv->dev;
+       struct shrinker_lock_uninterruptible slu;
        struct drm_i915_gem_object *obj;
-       unsigned long timeout = msecs_to_jiffies(5000) + 1;
        unsigned long pinned, bound, unbound, freed_pages;
-       bool was_interruptible;
-       bool unlock;
 
-       while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
-               schedule_timeout_killable(1);
-               if (fatal_signal_pending(current))
-                       return NOTIFY_DONE;
-       }
-       if (timeout == 0) {
-               pr_err("Unable to purge GPU memory due lock contention.\n");
+       if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
                return NOTIFY_DONE;
-       }
-
-       was_interruptible = dev_priv->mm.interruptible;
-       dev_priv->mm.interruptible = false;
 
        freed_pages = i915_gem_shrink_all(dev_priv);
 
-       dev_priv->mm.interruptible = was_interruptible;
-
        /* Because we may be allocating inside our own driver, we cannot
         * assert that there are no objects with pinned pages that are not
         * being pointed to by hardware.
@@ -341,8 +367,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
                        bound += obj->base.size;
        }
 
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
+       i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
 
        if (freed_pages || unbound || bound)
                pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
@@ -356,6 +381,29 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        return NOTIFY_DONE;
 }
 
+static int
+i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(nb, struct drm_i915_private, mm.vmap_notifier);
+       struct shrinker_lock_uninterruptible slu;
+       unsigned long freed_pages;
+
+       if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
+               return NOTIFY_DONE;
+
+       freed_pages = i915_gem_shrink(dev_priv, -1UL,
+                                     I915_SHRINK_BOUND |
+                                     I915_SHRINK_UNBOUND |
+                                     I915_SHRINK_ACTIVE |
+                                     I915_SHRINK_VMAPS);
+
+       i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
+
+       *(unsigned long *)ptr += freed_pages;
+       return NOTIFY_DONE;
+}
+
 /**
  * i915_gem_shrinker_init - Initialize i915 shrinker
  * @dev_priv: i915 device
@@ -371,6 +419,9 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
 
        dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
        WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+
+       dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
+       WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
 }
 
 /**
@@ -381,6 +432,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
  */
 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
 {
+       WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
        WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
        unregister_shrinker(&dev_priv->mm.shrinker);
 }
index de891c928b2fe6a87b3d830d4ea5cb345b1f8422..ea06da012d32bf48243a449a4b0276d8ea99ce77 100644 (file)
@@ -72,9 +72,11 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
                                struct drm_mm_node *node, u64 size,
                                unsigned alignment)
 {
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
        return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
-                                       alignment, 0,
-                                       dev_priv->ggtt.stolen_usable_size);
+                                                   alignment, 0,
+                                                   ggtt->stolen_usable_size);
 }
 
 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
@@ -87,7 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct resource *r;
        u32 base;
 
@@ -134,7 +137,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                                         I85X_DRB3, &tmp);
                tom = tmp * MB(32);
 
-               base = tom - tseg_size - dev_priv->ggtt.stolen_size;
+               base = tom - tseg_size - ggtt->stolen_size;
        } else if (IS_845G(dev)) {
                u32 tseg_size = 0;
                u32 tom;
@@ -158,7 +161,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                                         I830_DRB3, &tmp);
                tom = tmp * MB(32);
 
-               base = tom - tseg_size - dev_priv->ggtt.stolen_size;
+               base = tom - tseg_size - ggtt->stolen_size;
        } else if (IS_I830(dev)) {
                u32 tseg_size = 0;
                u32 tom;
@@ -178,7 +181,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                                         I830_DRB3, &tmp);
                tom = tmp * MB(32);
 
-               base = tom - tseg_size - dev_priv->ggtt.stolen_size;
+               base = tom - tseg_size - ggtt->stolen_size;
        }
 
        if (base == 0)
@@ -189,41 +192,41 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                struct {
                        u32 start, end;
                } stolen[2] = {
-                       { .start = base, .end = base + dev_priv->ggtt.stolen_size, },
-                       { .start = base, .end = base + dev_priv->ggtt.stolen_size, },
+                       { .start = base, .end = base + ggtt->stolen_size, },
+                       { .start = base, .end = base + ggtt->stolen_size, },
                };
-               u64 gtt_start, gtt_end;
+               u64 ggtt_start, ggtt_end;
 
-               gtt_start = I915_READ(PGTBL_CTL);
+               ggtt_start = I915_READ(PGTBL_CTL);
                if (IS_GEN4(dev))
-                       gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
-                               (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+                       ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
+                                    (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
                else
-                       gtt_start &= PGTBL_ADDRESS_LO_MASK;
-               gtt_end = gtt_start + gtt_total_entries(dev_priv->ggtt) * 4;
+                       ggtt_start &= PGTBL_ADDRESS_LO_MASK;
+               ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
 
-               if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
-                       stolen[0].end = gtt_start;
-               if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
-                       stolen[1].start = gtt_end;
+               if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
+                       stolen[0].end = ggtt_start;
+               if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
+                       stolen[1].start = ggtt_end;
 
                /* pick the larger of the two chunks */
                if (stolen[0].end - stolen[0].start >
                    stolen[1].end - stolen[1].start) {
                        base = stolen[0].start;
-                       dev_priv->ggtt.stolen_size = stolen[0].end - stolen[0].start;
+                       ggtt->stolen_size = stolen[0].end - stolen[0].start;
                } else {
                        base = stolen[1].start;
-                       dev_priv->ggtt.stolen_size = stolen[1].end - stolen[1].start;
+                       ggtt->stolen_size = stolen[1].end - stolen[1].start;
                }
 
                if (stolen[0].start != stolen[1].start ||
                    stolen[0].end != stolen[1].end) {
                        DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
-                                     (unsigned long long) gtt_start,
-                                     (unsigned long long) gtt_end - 1);
+                                     (unsigned long long)ggtt_start,
+                                     (unsigned long long)ggtt_end - 1);
                        DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
-                                     base, base + (u32) dev_priv->ggtt.stolen_size - 1);
+                                     base, base + (u32)ggtt->stolen_size - 1);
                }
        }
 
@@ -233,7 +236,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
         * kernel. So if the region is already marked as busy, something
         * is seriously wrong.
         */
-       r = devm_request_mem_region(dev->dev, base, dev_priv->ggtt.stolen_size,
+       r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
                                    "Graphics Stolen Memory");
        if (r == NULL) {
                /*
@@ -245,7 +248,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                 * reservation starting from 1 instead of 0.
                 */
                r = devm_request_mem_region(dev->dev, base + 1,
-                                           dev_priv->ggtt.stolen_size - 1,
+                                           ggtt->stolen_size - 1,
                                            "Graphics Stolen Memory");
                /*
                 * GEN3 firmware likes to smash pci bridges into the stolen
@@ -253,7 +256,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                 */
                if (r == NULL && !IS_GEN3(dev)) {
                        DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
-                                 base, base + (uint32_t)dev_priv->ggtt.stolen_size);
+                                 base, base + (uint32_t)ggtt->stolen_size);
                        base = 0;
                }
        }
@@ -274,11 +277,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
                                    unsigned long *base, unsigned long *size)
 {
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
                                     CTG_STOLEN_RESERVED :
                                     ELK_STOLEN_RESERVED);
        unsigned long stolen_top = dev_priv->mm.stolen_base +
-               dev_priv->ggtt.stolen_size;
+                                  ggtt->stolen_size;
 
        *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
 
@@ -369,10 +373,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
                                    unsigned long *base, unsigned long *size)
 {
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
        unsigned long stolen_top;
 
-       stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
+       stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
 
        *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
 
@@ -388,7 +393,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
 
 int i915_gem_init_stolen(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        unsigned long reserved_total, reserved_base = 0, reserved_size;
        unsigned long stolen_top;
 
@@ -401,14 +407,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
        }
 #endif
 
-       if (dev_priv->ggtt.stolen_size == 0)
+       if (ggtt->stolen_size == 0)
                return 0;
 
        dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
        if (dev_priv->mm.stolen_base == 0)
                return 0;
 
-       stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
+       stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
 
        switch (INTEL_INFO(dev_priv)->gen) {
        case 2:
@@ -458,19 +464,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
                return 0;
        }
 
-       dev_priv->ggtt.stolen_reserved_base = reserved_base;
-       dev_priv->ggtt.stolen_reserved_size = reserved_size;
+       ggtt->stolen_reserved_base = reserved_base;
+       ggtt->stolen_reserved_size = reserved_size;
 
        /* It is possible for the reserved area to end before the end of stolen
         * memory, so just consider the start. */
        reserved_total = stolen_top - reserved_base;
 
        DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
-                     dev_priv->ggtt.stolen_size >> 10,
-                     (dev_priv->ggtt.stolen_size - reserved_total) >> 10);
+                     ggtt->stolen_size >> 10,
+                     (ggtt->stolen_size - reserved_total) >> 10);
 
-       dev_priv->ggtt.stolen_usable_size = dev_priv->ggtt.stolen_size -
-                                          reserved_total;
+       ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
 
        /*
         * Basic memrange allocator for stolen space.
@@ -483,7 +488,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
         * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
         * problem later.
         */
-       drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->ggtt.stolen_usable_size);
+       drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
 
        return 0;
 }
@@ -492,12 +497,13 @@ static struct sg_table *
 i915_pages_create_for_stolen(struct drm_device *dev,
                             u32 offset, u32 size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct sg_table *st;
        struct scatterlist *sg;
 
        DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
-       BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
+       BUG_ON(offset > ggtt->stolen_size - size);
 
        /* We hide that we have no struct page backing our stolen object
         * by wrapping the contiguous physical allocation with a fake
@@ -628,8 +634,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                                               u32 gtt_offset,
                                               u32 size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *ggtt = &dev_priv->ggtt.base;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
        struct i915_vma *vma;
@@ -675,7 +681,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        if (gtt_offset == I915_GTT_OFFSET_NONE)
                return obj;
 
-       vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+       vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err;
@@ -688,8 +694,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
         */
        vma->node.start = gtt_offset;
        vma->node.size = size;
-       if (drm_mm_initialized(&ggtt->mm)) {
-               ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+       if (drm_mm_initialized(&ggtt->base.mm)) {
+               ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
                if (ret) {
                        DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
                        goto err;
@@ -697,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
                vma->bound |= GLOBAL_BIND;
                __i915_vma_set_map_and_fenceable(vma);
-               list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+               list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
        }
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
index 962cb4c507cb2ee67e8de4dbce91273d58bbce31..0f94b6c5c9cc009346235189df9cfee17a231d49 100644 (file)
@@ -494,10 +494,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        ret = -ENOMEM;
        pinned = 0;
 
-       pvec = kmalloc(npages*sizeof(struct page *),
-                      GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
-       if (pvec == NULL)
-               pvec = drm_malloc_ab(npages, sizeof(struct page *));
+       pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
 
@@ -634,14 +631,11 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
        pvec = NULL;
        pinned = 0;
        if (obj->userptr.mm->mm == current->mm) {
-               pvec = kmalloc(num_pages*sizeof(struct page *),
-                              GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+               pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+                                     GFP_TEMPORARY);
                if (pvec == NULL) {
-                       pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
-                       if (pvec == NULL) {
-                               __i915_gem_userptr_set_active(obj, false);
-                               return -ENOMEM;
-                       }
+                       __i915_gem_userptr_set_active(obj, false);
+                       return -ENOMEM;
                }
 
                pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
index 54c208665b0d3ae4a155a7d9da8e2fa1dd5b3cb2..89725c9efc2580c2adc061365456b3624bfbffdb 100644 (file)
@@ -296,6 +296,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
                }
        }
        err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
+       err_printf(m, "  last_seqno: 0x%08x\n", ring->last_seqno);
        err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
        err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
        err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
@@ -627,6 +628,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                         struct drm_i915_gem_object *src,
                         struct i915_address_space *vm)
 {
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_error_object *dst;
        struct i915_vma *vma = NULL;
        int num_pages;
@@ -653,7 +655,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                vma = i915_gem_obj_to_ggtt(src);
        use_ggtt = (src->cache_level == I915_CACHE_NONE &&
                   vma && (vma->bound & GLOBAL_BIND) &&
-                  reloc_offset + num_pages * PAGE_SIZE <= dev_priv->ggtt.mappable_end);
+                  reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
 
        /* Cannot access stolen address directly, try to use the aperture */
        if (src->stolen) {
@@ -663,12 +665,13 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                        goto unwind;
 
                reloc_offset = i915_gem_obj_ggtt_offset(src);
-               if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->ggtt.mappable_end)
+               if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
                        goto unwind;
        }
 
        /* Cannot access snooped pages through the aperture */
-       if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
+       if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
+           !HAS_LLC(dev_priv))
                goto unwind;
 
        dst->page_count = num_pages;
@@ -689,7 +692,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                         * captures what the GPU read.
                         */
 
-                       s = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+                       s = io_mapping_map_atomic_wc(ggtt->mappable,
                                                     reloc_offset);
                        memcpy_fromio(d, s, PAGE_SIZE);
                        io_mapping_unmap_atomic(s);
@@ -883,7 +886,7 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
        ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
        ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
 
-       if (HAS_VEBOX(dev_priv->dev)) {
+       if (HAS_VEBOX(dev_priv)) {
                ering->semaphore_mboxes[2] =
                        I915_READ(RING_SYNC_2(engine->mmio_base));
                ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
@@ -928,8 +931,9 @@ static void i915_record_ring_state(struct drm_device *dev,
 
        ering->waiting = waitqueue_active(&engine->irq_queue);
        ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
-       ering->seqno = engine->get_seqno(engine, false);
        ering->acthd = intel_ring_get_active_head(engine);
+       ering->seqno = engine->get_seqno(engine);
+       ering->last_seqno = engine->last_submitted_seqno;
        ering->start = I915_READ_START(engine);
        ering->head = I915_READ_HEAD(engine);
        ering->tail = I915_READ_TAIL(engine);
@@ -1015,7 +1019,8 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
 static void i915_gem_record_rings(struct drm_device *dev,
                                  struct drm_i915_error_state *error)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_request *request;
        int i, count;
 
@@ -1038,7 +1043,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
 
                        vm = request->ctx && request->ctx->ppgtt ?
                                &request->ctx->ppgtt->base :
-                               &dev_priv->ggtt.base;
+                               &ggtt->base;
 
                        /* We need to copy these to an anonymous buffer
                         * as the simplest method to avoid being overwritten
@@ -1049,7 +1054,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
                                                         request->batch_obj,
                                                         vm);
 
-                       if (HAS_BROKEN_CS_TLB(dev_priv->dev))
+                       if (HAS_BROKEN_CS_TLB(dev_priv))
                                error->ring[i].wa_batchbuffer =
                                        i915_error_ggtt_object_create(dev_priv,
                                                             engine->scratch.obj);
index e4ba5822289bb6dcbc0a225c02e87d62fe48175a..80786d9f9ad365c83de4d12b2713608894051cfb 100644 (file)
 /* Definitions of GuC H/W registers, bits, etc */
 
 #define GUC_STATUS                     _MMIO(0xc000)
+#define   GS_RESET_SHIFT               0
+#define   GS_MIA_IN_RESET                (0x01 << GS_RESET_SHIFT)
 #define   GS_BOOTROM_SHIFT             1
 #define   GS_BOOTROM_MASK                (0x7F << GS_BOOTROM_SHIFT)
 #define   GS_BOOTROM_RSA_FAILED                  (0x50 << GS_BOOTROM_SHIFT)
+#define   GS_BOOTROM_JUMP_PASSED         (0x76 << GS_BOOTROM_SHIFT)
 #define   GS_UKERNEL_SHIFT             8
 #define   GS_UKERNEL_MASK                (0xFF << GS_UKERNEL_SHIFT)
 #define   GS_UKERNEL_LAPIC_DONE                  (0x30 << GS_UKERNEL_SHIFT)
 #define   GS_UKERNEL_READY               (0xF0 << GS_UKERNEL_SHIFT)
 #define   GS_MIA_SHIFT                 16
 #define   GS_MIA_MASK                    (0x07 << GS_MIA_SHIFT)
-#define   GS_MIA_CORE_STATE              (1 << GS_MIA_SHIFT)
+#define   GS_MIA_CORE_STATE              (0x01 << GS_MIA_SHIFT)
+#define   GS_MIA_HALT_REQUESTED                  (0x02 << GS_MIA_SHIFT)
+#define   GS_MIA_ISR_ENTRY               (0x04 << GS_MIA_SHIFT)
+#define   GS_AUTH_STATUS_SHIFT         30
+#define   GS_AUTH_STATUS_MASK            (0x03 << GS_AUTH_STATUS_SHIFT)
+#define   GS_AUTH_STATUS_BAD             (0x01 << GS_AUTH_STATUS_SHIFT)
+#define   GS_AUTH_STATUS_GOOD            (0x02 << GS_AUTH_STATUS_SHIFT)
 
 #define SOFT_SCRATCH(n)                        _MMIO(0xc180 + (n) * 4)
 #define SOFT_SCRATCH_COUNT             16
index 5aa42395241d7b7d1e525637dece3012914d2e94..679f08c944ef6cc9be8a56818ac73d4729437409 100644 (file)
@@ -1000,6 +1000,7 @@ static void notify_ring(struct intel_engine_cs *engine)
                return;
 
        trace_i915_gem_request_notify(engine);
+       engine->user_interrupts++;
 
        wake_up_all(&engine->irq_queue);
 }
@@ -1218,7 +1219,7 @@ static void ivybridge_parity_work(struct work_struct *work)
                i915_reg_t reg;
 
                slice--;
-               if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+               if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
                        break;
 
                dev_priv->l3_parity.which_slice &= ~(1<<slice);
@@ -1257,7 +1258,7 @@ static void ivybridge_parity_work(struct work_struct *work)
 out:
        WARN_ON(dev_priv->l3_parity.which_slice);
        spin_lock_irq(&dev_priv->irq_lock);
-       gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+       gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
        spin_unlock_irq(&dev_priv->irq_lock);
 
        mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1323,7 +1324,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
                notify_ring(engine);
        if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
-               intel_lrc_irq_handler(engine);
+               tasklet_schedule(&engine->irq_tasklet);
 }
 
 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
@@ -1626,7 +1627,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
        if (INTEL_INFO(dev_priv)->gen >= 8)
                return;
 
-       if (HAS_VEBOX(dev_priv->dev)) {
+       if (HAS_VEBOX(dev_priv)) {
                if (pm_iir & PM_VEBOX_USER_INTERRUPT)
                        notify_ring(&dev_priv->engine[VECS]);
 
@@ -1828,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
        disable_rpm_wakeref_asserts(dev_priv);
 
-       for (;;) {
+       do {
                master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
                iir = I915_READ(VLV_IIR);
 
@@ -1856,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 
                I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
                POSTING_READ(GEN8_MASTER_IRQ);
-       }
+       } while (0);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
@@ -2805,8 +2806,8 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 static bool
 ring_idle(struct intel_engine_cs *engine, u32 seqno)
 {
-       return (list_empty(&engine->request_list) ||
-               i915_seqno_passed(seqno, engine->last_submitted_seqno));
+       return i915_seqno_passed(seqno,
+                                READ_ONCE(engine->last_submitted_seqno));
 }
 
 static bool
@@ -2828,7 +2829,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
        struct drm_i915_private *dev_priv = engine->dev->dev_private;
        struct intel_engine_cs *signaller;
 
-       if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
+       if (INTEL_INFO(dev_priv)->gen >= 8) {
                for_each_engine(signaller, dev_priv) {
                        if (engine == signaller)
                                continue;
@@ -2941,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
                return -1;
 
-       if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+       if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
                return 1;
 
        /* cursory check for an unkickable deadlock */
@@ -3054,6 +3055,24 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
        return HANGCHECK_HUNG;
 }
 
+static unsigned kick_waiters(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *i915 = to_i915(engine->dev);
+       unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
+
+       if (engine->hangcheck.user_interrupts == user_interrupts &&
+           !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
+               if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
+                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+                                 engine->name);
+               else
+                       DRM_INFO("Fake missed irq on %s\n",
+                                engine->name);
+               wake_up_all(&engine->irq_queue);
+       }
+
+       return user_interrupts;
+}
 /*
  * This is called when the chip hasn't reported back with completed
  * batchbuffers in a long time. We keep track per ring seqno progress and
@@ -3096,29 +3115,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
        for_each_engine_id(engine, dev_priv, id) {
                u64 acthd;
                u32 seqno;
+               unsigned user_interrupts;
                bool busy = true;
 
                semaphore_clear_deadlocks(dev_priv);
 
-               seqno = engine->get_seqno(engine, false);
+               /* We don't strictly need an irq-barrier here, as we are not
+                * serving an interrupt request, be paranoid in case the
+                * barrier has side-effects (such as preventing a broken
+                * cacheline snoop) and so be sure that we can see the seqno
+                * advance. If the seqno should stick, due to a stale
+                * cacheline, we would erroneously declare the GPU hung.
+                */
+               if (engine->irq_seqno_barrier)
+                       engine->irq_seqno_barrier(engine);
+
                acthd = intel_ring_get_active_head(engine);
+               seqno = engine->get_seqno(engine);
+
+               /* Reset stuck interrupts between batch advances */
+               user_interrupts = 0;
 
                if (engine->hangcheck.seqno == seqno) {
                        if (ring_idle(engine, seqno)) {
                                engine->hangcheck.action = HANGCHECK_IDLE;
-
                                if (waitqueue_active(&engine->irq_queue)) {
-                                       /* Issue a wake-up to catch stuck h/w. */
-                                       if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
-                                               if (!(dev_priv->gpu_error.test_irq_rings & intel_engine_flag(engine)))
-                                                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-                                                                 engine->name);
-                                               else
-                                                       DRM_INFO("Fake missed irq on %s\n",
-                                                                engine->name);
-                                               wake_up_all(&engine->irq_queue);
-                                       }
                                        /* Safeguard against driver failure */
+                                       user_interrupts = kick_waiters(engine);
                                        engine->hangcheck.score += BUSY;
                                } else
                                        busy = false;
@@ -3169,7 +3192,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                                engine->hangcheck.score = 0;
 
                        /* Clear head and subunit states on seqno movement */
-                       engine->hangcheck.acthd = 0;
+                       acthd = 0;
 
                        memset(engine->hangcheck.instdone, 0,
                               sizeof(engine->hangcheck.instdone));
@@ -3177,6 +3200,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 
                engine->hangcheck.seqno = seqno;
                engine->hangcheck.acthd = acthd;
+               engine->hangcheck.user_interrupts = user_interrupts;
                busy_count += busy;
        }
 
@@ -3500,6 +3524,26 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
        hotplug = I915_READ(PCH_PORT_HOTPLUG);
        hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
                PORTA_HOTPLUG_ENABLE;
+
+       DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
+                     hotplug, enabled_irqs);
+       hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
+
+       /*
+        * For BXT invert bit has to be set based on AOB design
+        * for HPD detection logic, update it based on VBT fields.
+        */
+
+       if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
+           intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
+               hotplug |= BXT_DDIA_HPD_INVERT;
+       if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
+           intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
+               hotplug |= BXT_DDIB_HPD_INVERT;
+       if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
+           intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
+               hotplug |= BXT_DDIC_HPD_INVERT;
+
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 }
 
index c839ce952a5045f776a50c9ddf3577f62edbdf30..cea5a390d8c980cd71f1a102e3153ea77b208da2 100644 (file)
@@ -165,6 +165,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN6_GRDOM_MEDIA              (1 << 2)
 #define  GEN6_GRDOM_BLT                        (1 << 3)
 #define  GEN6_GRDOM_VECS               (1 << 4)
+#define  GEN9_GRDOM_GUC                        (1 << 5)
 #define  GEN8_GRDOM_MEDIA2             (1 << 7)
 
 #define RING_PP_DIR_BASE(ring)         _MMIO((ring)->mmio_base+0x228)
@@ -627,6 +628,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   IOSF_PORT_GPIO_SC                    0x48
 #define   IOSF_PORT_GPIO_SUS                   0xa8
 #define   IOSF_PORT_CCU                                0xa9
+#define   CHV_IOSF_PORT_GPIO_N                 0x13
+#define   CHV_IOSF_PORT_GPIO_SE                        0x48
+#define   CHV_IOSF_PORT_GPIO_E                 0xa8
+#define   CHV_IOSF_PORT_GPIO_SW                        0xb2
 #define VLV_IOSF_DATA                          _MMIO(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR                          _MMIO(VLV_DISPLAY_BASE + 0x2108)
 
@@ -791,6 +796,7 @@ enum skl_disp_power_wells {
 #define  DSI_PLL_M1_DIV_SHIFT                  0
 #define  DSI_PLL_M1_DIV_MASK                   (0x1ff << 0)
 #define CCK_CZ_CLOCK_CONTROL                   0x62
+#define CCK_GPLL_CLOCK_CONTROL                 0x67
 #define CCK_DISPLAY_CLOCK_CONTROL              0x6b
 #define CCK_DISPLAY_REF_CLOCK_CONTROL          0x6c
 #define  CCK_TRUNK_FORCE_ON                    (1 << 17)
@@ -1324,6 +1330,7 @@ enum skl_disp_power_wells {
 #define _PORT_CL1CM_DW0_A              0x162000
 #define _PORT_CL1CM_DW0_BC             0x6C000
 #define   PHY_POWER_GOOD               (1 << 16)
+#define   PHY_RESERVED                 (1 << 7)
 #define BXT_PORT_CL1CM_DW0(phy)                _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
                                                        _PORT_CL1CM_DW0_A)
 
@@ -1783,6 +1790,18 @@ enum skl_disp_power_wells {
 #define   GEN9_IZ_HASHING_MASK(slice)                  (0x3 << ((slice) * 2))
 #define   GEN9_IZ_HASHING(slice, val)                  ((val) << ((slice) * 2))
 
+/* WaClearTdlStateAckDirtyBits */
+#define GEN8_STATE_ACK         _MMIO(0x20F0)
+#define GEN9_STATE_ACK_SLICE1  _MMIO(0x20F8)
+#define GEN9_STATE_ACK_SLICE2  _MMIO(0x2100)
+#define   GEN9_STATE_ACK_TDL0 (1 << 12)
+#define   GEN9_STATE_ACK_TDL1 (1 << 13)
+#define   GEN9_STATE_ACK_TDL2 (1 << 14)
+#define   GEN9_STATE_ACK_TDL3 (1 << 15)
+#define   GEN9_SUBSLICE_TDL_ACK_BITS \
+       (GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
+        GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
+
 #define GFX_MODE       _MMIO(0x2520)
 #define GFX_MODE_GEN7  _MMIO(0x229c)
 #define RING_MODE_GEN7(ring)   _MMIO((ring)->mmio_base+0x29c)
@@ -4785,6 +4804,10 @@ enum skl_disp_power_wells {
 #define  CBR_PND_DEADLINE_DISABLE      (1<<31)
 #define  CBR_PWM_CLOCK_MUX_SELECT      (1<<30)
 
+#define CBR4_VLV                       _MMIO(VLV_DISPLAY_BASE + 0x70450)
+#define  CBR_DPLLBMD_PIPE_C            (1<<29)
+#define  CBR_DPLLBMD_PIPE_B            (1<<18)
+
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE     64
 #define I915_FIFO_LINE_SIZE    64
@@ -6185,6 +6208,7 @@ enum skl_disp_power_wells {
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG               _MMIO(0xc4030)  /* SHOTPLUG_CTL */
 #define  PORTA_HOTPLUG_ENABLE          (1 << 28) /* LPT:LP+ & BXT */
+#define  BXT_DDIA_HPD_INVERT            (1 << 27)
 #define  PORTA_HOTPLUG_STATUS_MASK     (3 << 24) /* SPT+ & BXT */
 #define  PORTA_HOTPLUG_NO_DETECT       (0 << 24) /* SPT+ & BXT */
 #define  PORTA_HOTPLUG_SHORT_DETECT    (1 << 24) /* SPT+ & BXT */
@@ -6200,6 +6224,7 @@ enum skl_disp_power_wells {
 #define  PORTD_HOTPLUG_SHORT_DETECT    (1 << 16)
 #define  PORTD_HOTPLUG_LONG_DETECT     (2 << 16)
 #define  PORTC_HOTPLUG_ENABLE          (1 << 12)
+#define  BXT_DDIC_HPD_INVERT            (1 << 11)
 #define  PORTC_PULSE_DURATION_2ms      (0 << 10) /* pre-LPT */
 #define  PORTC_PULSE_DURATION_4_5ms    (1 << 10) /* pre-LPT */
 #define  PORTC_PULSE_DURATION_6ms      (2 << 10) /* pre-LPT */
@@ -6210,6 +6235,7 @@ enum skl_disp_power_wells {
 #define  PORTC_HOTPLUG_SHORT_DETECT    (1 << 8)
 #define  PORTC_HOTPLUG_LONG_DETECT     (2 << 8)
 #define  PORTB_HOTPLUG_ENABLE          (1 << 4)
+#define  BXT_DDIB_HPD_INVERT            (1 << 3)
 #define  PORTB_PULSE_DURATION_2ms      (0 << 2) /* pre-LPT */
 #define  PORTB_PULSE_DURATION_4_5ms    (1 << 2) /* pre-LPT */
 #define  PORTB_PULSE_DURATION_6ms      (2 << 2) /* pre-LPT */
@@ -6219,6 +6245,9 @@ enum skl_disp_power_wells {
 #define  PORTB_HOTPLUG_NO_DETECT       (0 << 0)
 #define  PORTB_HOTPLUG_SHORT_DETECT    (1 << 0)
 #define  PORTB_HOTPLUG_LONG_DETECT     (2 << 0)
+#define  BXT_DDI_HPD_INVERT_MASK       (BXT_DDIA_HPD_INVERT | \
+                                       BXT_DDIB_HPD_INVERT | \
+                                       BXT_DDIC_HPD_INVERT)
 
 #define PCH_PORT_HOTPLUG2              _MMIO(0xc403C)  /* SHOTPLUG_CTL2 SPT+ */
 #define  PORTE_HOTPLUG_ENABLE          (1 << 4)
index afdd8aefb5b75fe8b5d96791b6310ac3f2612e73..dc0def210097eeae4311b2777b0776473d69cfc3 100644 (file)
@@ -562,7 +562,7 @@ TRACE_EVENT(i915_gem_request_notify,
            TP_fast_assign(
                           __entry->dev = engine->dev->primary->index;
                           __entry->ring = engine->id;
-                          __entry->seqno = engine->get_seqno(engine, false);
+                          __entry->seqno = engine->get_seqno(engine);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
index 2891bcfcd71e29adeecc96c3053d9996fca13588..d02efb8cad4dccd7779e3d9897646cc0e7c5f049 100644 (file)
@@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm,
 int intel_vgt_balloon(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
-       unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
 
        unsigned long mappable_base, mappable_size, mappable_end;
        unsigned long unmappable_base, unmappable_size, unmappable_end;
@@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev)
        DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
                 unmappable_base, unmappable_size / 1024);
 
-       if (mappable_base < ggtt_vm->start ||
-           mappable_end > dev_priv->ggtt.mappable_end ||
-           unmappable_base < dev_priv->ggtt.mappable_end ||
-           unmappable_end > ggtt_vm_end) {
+       if (mappable_base < ggtt->base.start ||
+           mappable_end > ggtt->mappable_end ||
+           unmappable_base < ggtt->mappable_end ||
+           unmappable_end > ggtt_end) {
                DRM_ERROR("Invalid ballooning configuration!\n");
                return -EINVAL;
        }
 
        /* Unmappable graphic memory ballooning */
-       if (unmappable_base > dev_priv->ggtt.mappable_end) {
-               ret = vgt_balloon_space(&ggtt_vm->mm,
+       if (unmappable_base > ggtt->mappable_end) {
+               ret = vgt_balloon_space(&ggtt->base.mm,
                                        &bl_info.space[2],
-                                       dev_priv->ggtt.mappable_end,
+                                       ggtt->mappable_end,
                                        unmappable_base);
 
                if (ret)
@@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev)
         * No need to partition out the last physical page,
         * because it is reserved to the guard page.
         */
-       if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
-               ret = vgt_balloon_space(&ggtt_vm->mm,
+       if (unmappable_end < ggtt_end - PAGE_SIZE) {
+               ret = vgt_balloon_space(&ggtt->base.mm,
                                        &bl_info.space[3],
                                        unmappable_end,
-                                       ggtt_vm_end - PAGE_SIZE);
+                                       ggtt_end - PAGE_SIZE);
                if (ret)
                        goto err;
        }
 
        /* Mappable graphic memory ballooning */
-       if (mappable_base > ggtt_vm->start) {
-               ret = vgt_balloon_space(&ggtt_vm->mm,
+       if (mappable_base > ggtt->base.start) {
+               ret = vgt_balloon_space(&ggtt->base.mm,
                                        &bl_info.space[0],
-                                       ggtt_vm->start, mappable_base);
+                                       ggtt->base.start, mappable_base);
 
                if (ret)
                        goto err;
        }
 
-       if (mappable_end < dev_priv->ggtt.mappable_end) {
-               ret = vgt_balloon_space(&ggtt_vm->mm,
+       if (mappable_end < ggtt->mappable_end) {
+               ret = vgt_balloon_space(&ggtt->base.mm,
                                        &bl_info.space[1],
                                        mappable_end,
-                                       dev_priv->ggtt.mappable_end);
+                                       ggtt->mappable_end);
 
                if (ret)
                        goto err;
index fdc8b2a1d0ae9c68fa667f691bc79d4fc767d4a3..56ba8765816ef0ff3fccd265bffd8803cbf1b4a9 100644 (file)
@@ -373,7 +373,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
        if (WARN_ON(port == PORT_A))
                return;
 
-       if (HAS_PCH_IBX(dev_priv->dev)) {
+       if (HAS_PCH_IBX(dev_priv)) {
                aud_config = IBX_AUD_CFG(pipe);
                aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
index 9c406b0f4173bd0507d9cc08be84fbc6658d8a24..eb756c41d9e1343116a285fb1bb250dab80c661e 100644 (file)
@@ -1123,7 +1123,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
        }
 
        /* Parse the I_boost config for SKL and above */
-       if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+       if (bdb->version >= 196 && child->common.iboost) {
                info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
                DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
                              port_name(port), info->dp_boost_level);
@@ -1241,6 +1241,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
                 */
                memcpy(child_dev_ptr, p_child,
                       min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
+
+               /*
+                * copied full block, now init values when they are not
+                * available in current version
+                */
+               if (bdb->version < 196) {
+                       /* Set default values for bits added from v196 */
+                       child_dev_ptr->common.iboost = 0;
+                       child_dev_ptr->common.hpd_invert = 0;
+               }
+
+               if (bdb->version < 192)
+                       child_dev_ptr->common.lspcon = 0;
        }
        return;
 }
@@ -1585,3 +1598,47 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
 
        return false;
 }
+
+/**
+ * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
+ * @dev_priv:  i915 device instance
+ * @port:      port to check
+ *
+ * Return true if HPD should be inverted for %port.
+ */
+bool
+intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+                               enum port port)
+{
+       int i;
+
+       if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
+               return false;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
+                       continue;
+
+               switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+               case DVO_PORT_DPA:
+               case DVO_PORT_HDMIA:
+                       if (port == PORT_A)
+                               return true;
+                       break;
+               case DVO_PORT_DPB:
+               case DVO_PORT_HDMIB:
+                       if (port == PORT_B)
+                               return true;
+                       break;
+               case DVO_PORT_DPC:
+               case DVO_PORT_HDMIC:
+                       if (port == PORT_C)
+                               return true;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return false;
+}
index aa0b20dcb834c59fa68b53a36eeb0d146ed2e814..1b3f97449395fe0a4f2c2c53f34ec1b0d54161c2 100644 (file)
@@ -92,10 +92,10 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
 }
 
 /* Set up the pipe CSC unit. */
-static void i9xx_load_csc_matrix(struct drm_crtc *crtc)
+static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
 {
+       struct drm_crtc *crtc = crtc_state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_crtc_state *crtc_state = crtc->state;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int i, pipe = intel_crtc->pipe;
@@ -203,10 +203,10 @@ static void i9xx_load_csc_matrix(struct drm_crtc *crtc)
 /*
  * Set up the pipe CSC unit on CherryView.
  */
-static void cherryview_load_csc_matrix(struct drm_crtc *crtc)
+static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
 {
+       struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_crtc_state *state = crtc->state;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = to_intel_crtc(crtc)->pipe;
        uint32_t mode;
@@ -252,13 +252,13 @@ static void cherryview_load_csc_matrix(struct drm_crtc *crtc)
        I915_WRITE(CGM_PIPE_MODE(pipe), mode);
 }
 
-void intel_color_set_csc(struct drm_crtc *crtc)
+void intel_color_set_csc(struct drm_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc_state->crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (dev_priv->display.load_csc_matrix)
-               dev_priv->display.load_csc_matrix(crtc);
+               dev_priv->display.load_csc_matrix(crtc_state);
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC. */
@@ -303,19 +303,20 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
        }
 }
 
-static void i9xx_load_luts(struct drm_crtc *crtc)
+static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
 {
-       i9xx_load_luts_internal(crtc, crtc->state->gamma_lut);
+       i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
-static void haswell_load_luts(struct drm_crtc *crtc)
+static void haswell_load_luts(struct drm_crtc_state *crtc_state)
 {
+       struct drm_crtc *crtc = crtc_state->crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *intel_crtc_state =
-               to_intel_crtc_state(crtc->state);
+               to_intel_crtc_state(crtc_state);
        bool reenable_ips = false;
 
        /*
@@ -331,24 +332,24 @@ static void haswell_load_luts(struct drm_crtc *crtc)
        intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
        I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
 
-       i9xx_load_luts(crtc);
+       i9xx_load_luts(crtc_state);
 
        if (reenable_ips)
                hsw_enable_ips(intel_crtc);
 }
 
 /* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(struct drm_crtc *crtc)
+static void broadwell_load_luts(struct drm_crtc_state *state)
 {
+       struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_crtc_state *state = crtc->state;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
 
        if (crtc_state_is_legacy(state)) {
-               haswell_load_luts(crtc);
+               haswell_load_luts(state);
                return;
        }
 
@@ -421,11 +422,11 @@ static void broadwell_load_luts(struct drm_crtc *crtc)
 }
 
 /* Loads the palette/gamma unit for the CRTC on CherryView. */
-static void cherryview_load_luts(struct drm_crtc *crtc)
+static void cherryview_load_luts(struct drm_crtc_state *state)
 {
+       struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc_state *state = crtc->state;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        struct drm_color_lut *lut;
        uint32_t i, lut_size;
@@ -481,16 +482,12 @@ static void cherryview_load_luts(struct drm_crtc *crtc)
        i9xx_load_luts_internal(crtc, NULL);
 }
 
-void intel_color_load_luts(struct drm_crtc *crtc)
+void intel_color_load_luts(struct drm_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = crtc_state->crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /* The clocks have to be on to load the palette. */
-       if (!crtc->state->active)
-               return;
-
-       dev_priv->display.load_luts(crtc);
+       dev_priv->display.load_luts(crtc_state);
 }
 
 int intel_color_check(struct drm_crtc *crtc,
index 1e083853c70d55e3f23a1878dcf1d90b0fda879f..921edf183d226f9b2606be88f8367542347052e0 100644 (file)
@@ -315,6 +315,9 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
                *dig_port = enc_to_mst(encoder)->primary;
                *port = (*dig_port)->port;
                break;
+       default:
+               WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
+               /* fallthrough and treat as unknown */
        case INTEL_OUTPUT_DISPLAYPORT:
        case INTEL_OUTPUT_EDP:
        case INTEL_OUTPUT_HDMI:
@@ -326,9 +329,6 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
                *dig_port = NULL;
                *port = PORT_E;
                break;
-       default:
-               WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
-               break;
        }
 }
 
@@ -629,6 +629,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
                        break;
                }
 
+               rx_ctl_val &= ~FDI_RX_ENABLE;
+               I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+               POSTING_READ(FDI_RX_CTL(PIPE_A));
+
                temp = I915_READ(DDI_BUF_CTL(PORT_E));
                temp &= ~DDI_BUF_CTL_ENABLE;
                I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
@@ -643,10 +647,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
 
                intel_wait_ddi_buf_idle(dev_priv, PORT_E);
 
-               rx_ctl_val &= ~FDI_RX_ENABLE;
-               I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
-               POSTING_READ(FDI_RX_CTL(PIPE_A));
-
                /* Reset FDI_RX_MISC pwrdn lanes */
                temp = I915_READ(FDI_RX_MISC(PIPE_A));
                temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -1726,18 +1726,31 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
                             enum dpio_phy phy)
 {
        enum port port;
-       uint32_t val;
+       u32 ports, val;
 
        val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
        val |= GT_DISPLAY_POWER_ON(phy);
        I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
 
-       /* Considering 10ms timeout until BSpec is updated */
-       if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10))
+       /*
+        * The PHY registers start out inaccessible and respond to reads with
+        * all 1s.  Eventually they become accessible as they power up, then
+        * the reserved bit will give the default 0.  Poll on the reserved bit
+        * becoming 0 to find when the PHY is accessible.
+        * HW team confirmed that the time to reach phypowergood status is
+        * anywhere between 50 us and 100us.
+        */
+       if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+               (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
                DRM_ERROR("timeout during PHY%d power on\n", phy);
+       }
+
+       if (phy == DPIO_PHY0)
+               ports = BIT(PORT_B) | BIT(PORT_C);
+       else
+               ports = BIT(PORT_A);
 
-       for (port =  (phy == DPIO_PHY0 ? PORT_B : PORT_A);
-            port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
+       for_each_port_masked(port, ports) {
                int lane;
 
                for (lane = 0; lane < 4; lane++) {
@@ -1898,12 +1911,18 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        uint32_t val;
 
-       intel_ddi_post_disable(intel_encoder);
-
+       /*
+        * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+        * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+        * step 13 is the correct place for it. Step 18 is where it was
+        * originally before the BUN.
+        */
        val = I915_READ(FDI_RX_CTL(PIPE_A));
        val &= ~FDI_RX_ENABLE;
        I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 
+       intel_ddi_post_disable(intel_encoder);
+
        val = I915_READ(FDI_RX_MISC(PIPE_A));
        val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
        val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
index 29aa64be1f03b385e13ca2f4d5a65d803d9f9eaf..551541b3038c27c0287e0a7c8c28f6a69a746a73 100644 (file)
@@ -147,15 +147,12 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv)
        return vco_freq[hpll_freq] * 1000;
 }
 
-static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
-                                 const char *name, u32 reg)
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+                     const char *name, u32 reg, int ref_freq)
 {
        u32 val;
        int divider;
 
-       if (dev_priv->hpll_freq == 0)
-               dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
-
        mutex_lock(&dev_priv->sb_lock);
        val = vlv_cck_read(dev_priv, reg);
        mutex_unlock(&dev_priv->sb_lock);
@@ -166,7 +163,17 @@ static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
             (divider << CCK_FREQUENCY_STATUS_SHIFT),
             "%s change in progress\n", name);
 
-       return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
+       return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
+}
+
+static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+                                 const char *name, u32 reg)
+{
+       if (dev_priv->hpll_freq == 0)
+               dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
+
+       return vlv_get_cck_clock(dev_priv, name, reg,
+                                dev_priv->hpll_freq);
 }
 
 static int
@@ -1160,7 +1167,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
 
-       if (HAS_DDI(dev_priv->dev)) {
+       if (HAS_DDI(dev_priv)) {
                /* DDI does not have a specific FDI_TX register */
                u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
                cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1196,11 +1203,11 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
        u32 val;
 
        /* ILK FDI PLL is always enabled */
-       if (INTEL_INFO(dev_priv->dev)->gen == 5)
+       if (INTEL_INFO(dev_priv)->gen == 5)
                return;
 
        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
-       if (HAS_DDI(dev_priv->dev))
+       if (HAS_DDI(dev_priv))
                return;
 
        val = I915_READ(FDI_TX_CTL(pipe));
@@ -1408,11 +1415,11 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
        if ((val & DP_PORT_EN) == 0)
                return false;
 
-       if (HAS_PCH_CPT(dev_priv->dev)) {
+       if (HAS_PCH_CPT(dev_priv)) {
                u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
                if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
                        return false;
-       } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+       } else if (IS_CHERRYVIEW(dev_priv)) {
                if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
                        return false;
        } else {
@@ -1428,10 +1435,10 @@ static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
        if ((val & SDVO_ENABLE) == 0)
                return false;
 
-       if (HAS_PCH_CPT(dev_priv->dev)) {
+       if (HAS_PCH_CPT(dev_priv)) {
                if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
                        return false;
-       } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+       } else if (IS_CHERRYVIEW(dev_priv)) {
                if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
                        return false;
        } else {
@@ -1447,7 +1454,7 @@ static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
        if ((val & LVDS_PORT_EN) == 0)
                return false;
 
-       if (HAS_PCH_CPT(dev_priv->dev)) {
+       if (HAS_PCH_CPT(dev_priv)) {
                if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
                        return false;
        } else {
@@ -1462,7 +1469,7 @@ static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
 {
        if ((val & ADPA_DAC_ENABLE) == 0)
                return false;
-       if (HAS_PCH_CPT(dev_priv->dev)) {
+       if (HAS_PCH_CPT(dev_priv)) {
                if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
                        return false;
        } else {
@@ -1481,7 +1488,7 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
             "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
             i915_mmio_reg_offset(reg), pipe_name(pipe));
 
-       I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
             && (val & DP_PIPEB_SELECT),
             "IBX PCH dp port still using transcoder B\n");
 }
@@ -1494,7 +1501,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
             "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
             i915_mmio_reg_offset(reg), pipe_name(pipe));
 
-       I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
             && (val & SDVO_PIPE_B_SELECT),
             "IBX PCH hdmi port still using transcoder B\n");
 }
@@ -1528,35 +1535,24 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       i915_reg_t reg = DPLL(crtc->pipe);
+       enum pipe pipe = crtc->pipe;
+       i915_reg_t reg = DPLL(pipe);
        u32 dpll = pipe_config->dpll_hw_state.dpll;
 
-       assert_pipe_disabled(dev_priv, crtc->pipe);
+       assert_pipe_disabled(dev_priv, pipe);
 
        /* PLL is protected by panel, make sure we can write it */
-       if (IS_MOBILE(dev_priv->dev))
-               assert_panel_unlocked(dev_priv, crtc->pipe);
+       assert_panel_unlocked(dev_priv, pipe);
 
        I915_WRITE(reg, dpll);
        POSTING_READ(reg);
        udelay(150);
 
        if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
-               DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
-
-       I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
-       POSTING_READ(DPLL_MD(crtc->pipe));
+               DRM_ERROR("DPLL %d failed to lock\n", pipe);
 
-       /* We do this three times for luck */
-       I915_WRITE(reg, dpll);
-       POSTING_READ(reg);
-       udelay(150); /* wait for warmup */
-       I915_WRITE(reg, dpll);
-       POSTING_READ(reg);
-       udelay(150); /* wait for warmup */
-       I915_WRITE(reg, dpll);
-       POSTING_READ(reg);
-       udelay(150); /* wait for warmup */
+       I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+       POSTING_READ(DPLL_MD(pipe));
 }
 
 static void chv_enable_pll(struct intel_crtc *crtc,
@@ -1564,11 +1560,14 @@ static void chv_enable_pll(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe = crtc->pipe;
+       enum pipe pipe = crtc->pipe;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
        u32 tmp;
 
-       assert_pipe_disabled(dev_priv, crtc->pipe);
+       assert_pipe_disabled(dev_priv, pipe);
+
+       /* PLL is protected by panel, make sure we can write it */
+       assert_panel_unlocked(dev_priv, pipe);
 
        mutex_lock(&dev_priv->sb_lock);
 
@@ -1591,9 +1590,27 @@ static void chv_enable_pll(struct intel_crtc *crtc,
        if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
                DRM_ERROR("PLL %d failed to lock\n", pipe);
 
-       /* not sure when this should be written */
-       I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
-       POSTING_READ(DPLL_MD(pipe));
+       if (pipe != PIPE_A) {
+               /*
+                * WaPixelRepeatModeFixForC0:chv
+                *
+                * DPLLCMD is AWOL. Use chicken bits to propagate
+                * the value from DPLLBMD to either pipe B or C.
+                */
+               I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
+               I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
+               I915_WRITE(CBR4_VLV, 0);
+               dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
+
+               /*
+                * DPLLB VGA mode also seems to cause problems.
+                * We should always have it disabled.
+                */
+               WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+       } else {
+               I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+               POSTING_READ(DPLL_MD(pipe));
+       }
 }
 
 static int intel_num_dvo_pipes(struct drm_device *dev)
@@ -1617,9 +1634,6 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
 
        assert_pipe_disabled(dev_priv, crtc->pipe);
 
-       /* No really, not for ILK+ */
-       BUG_ON(INTEL_INFO(dev)->gen >= 5);
-
        /* PLL is protected by panel, make sure we can write it */
        if (IS_MOBILE(dev) && !IS_I830(dev))
                assert_panel_unlocked(dev_priv, crtc->pipe);
@@ -1718,16 +1732,13 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        /* Make sure the pipe isn't still relying on us */
        assert_pipe_disabled(dev_priv, pipe);
 
-       /*
-        * Leave integrated clock source and reference clock enabled for pipe B.
-        * The latter is needed for VGA hotplug / manual detection.
-        */
-       val = DPLL_VGA_MODE_DIS;
-       if (pipe == PIPE_B)
-               val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
+       val = DPLL_INTEGRATED_REF_CLK_VLV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+       if (pipe != PIPE_A)
+               val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
        I915_WRITE(DPLL(pipe), val);
        POSTING_READ(DPLL(pipe));
-
 }
 
 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1738,11 +1749,11 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        /* Make sure the pipe isn't still relying on us */
        assert_pipe_disabled(dev_priv, pipe);
 
-       /* Set PLL en = 0 */
        val = DPLL_SSC_REF_CLK_CHV |
                DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
        if (pipe != PIPE_A)
                val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
        I915_WRITE(DPLL(pipe), val);
        POSTING_READ(DPLL(pipe));
 
@@ -1795,9 +1806,6 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
        i915_reg_t reg;
        uint32_t val, pipeconf_val;
 
-       /* PCH only available on ILK+ */
-       BUG_ON(!HAS_PCH_SPLIT(dev));
-
        /* Make sure PCH DPLL is enabled */
        assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
 
@@ -1818,7 +1826,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
        val = I915_READ(reg);
        pipeconf_val = I915_READ(PIPECONF(pipe));
 
-       if (HAS_PCH_IBX(dev_priv->dev)) {
+       if (HAS_PCH_IBX(dev_priv)) {
                /*
                 * Make the BPC in transcoder be consistent with
                 * that in pipeconf reg. For HDMI we must use 8bpc
@@ -1833,7 +1841,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 
        val &= ~TRANS_INTERLACE_MASK;
        if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
-               if (HAS_PCH_IBX(dev_priv->dev) &&
+               if (HAS_PCH_IBX(dev_priv) &&
                    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
                        val |= TRANS_LEGACY_INTERLACED_ILK;
                else
@@ -1851,9 +1859,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 {
        u32 val, pipeconf_val;
 
-       /* PCH only available on ILK+ */
-       BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
-
        /* FDI must be feeding us bits for PCH ports */
        assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
        assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
@@ -1948,7 +1953,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
        assert_cursor_disabled(dev_priv, pipe);
        assert_sprites_disabled(dev_priv, pipe);
 
-       if (HAS_PCH_LPT(dev_priv->dev))
+       if (HAS_PCH_LPT(dev_priv))
                pch_transcoder = TRANSCODER_A;
        else
                pch_transcoder = pipe;
@@ -1958,7 +1963,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
         * need the check.
         */
-       if (HAS_GMCH_DISPLAY(dev_priv->dev))
+       if (HAS_GMCH_DISPLAY(dev_priv))
                if (crtc->config->has_dsi_encoder)
                        assert_dsi_pll_enabled(dev_priv);
                else
@@ -2444,6 +2449,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_object *obj = NULL;
        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
        struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -2459,7 +2465,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
        /* If the FB is too big, just don't use it since fbdev is not very
         * important and we should probably use that space with FBC or other
         * features. */
-       if (size_aligned * 2 > dev_priv->ggtt.stolen_usable_size)
+       if (size_aligned * 2 > ggtt->stolen_usable_size)
                return false;
 
        mutex_lock(&dev->struct_mutex);
@@ -3222,9 +3228,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
                      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
                      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
 
-       if (HAS_DDI(dev))
-               intel_color_set_csc(&crtc->base);
-
        /*
         * Update pipe size and adjust fitter if needed: the reason for this is
         * that in compute_mode_changes we check the native mode (not the pfit
@@ -4723,6 +4726,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
+       struct intel_crtc_state *pipe_config =
+               to_intel_crtc_state(crtc->state);
 
        if (WARN_ON(intel_crtc->active))
                return;
@@ -4770,7 +4775,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
         * On ILK+ LUT must be loaded before the pipe is running but with
         * clocks enabled
         */
-       intel_color_load_luts(crtc);
+       intel_color_load_luts(&pipe_config->base);
 
        if (dev_priv->display.initial_watermarks != NULL)
                dev_priv->display.initial_watermarks(intel_crtc->config);
@@ -4845,7 +4850,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 
        haswell_set_pipemisc(crtc);
 
-       intel_color_set_csc(crtc);
+       intel_color_set_csc(&pipe_config->base);
 
        intel_crtc->active = true;
 
@@ -4874,7 +4879,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
         * On ILK+ LUT must be loaded before the pipe is running but with
         * clocks enabled
         */
-       intel_color_load_luts(crtc);
+       intel_color_load_luts(&pipe_config->base);
 
        intel_ddi_set_pipe_settings(crtc);
        if (!intel_crtc->config->has_dsi_encoder)
@@ -5263,6 +5268,8 @@ static void intel_update_max_cdclk(struct drm_device *dev)
                        dev_priv->max_cdclk_freq = 450000;
                else
                        dev_priv->max_cdclk_freq = 337500;
+       } else if (IS_BROXTON(dev)) {
+               dev_priv->max_cdclk_freq = 624000;
        } else if (IS_BROADWELL(dev))  {
                /*
                 * FIXME with extra cooling we can allow
@@ -6035,6 +6042,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
+       struct intel_crtc_state *pipe_config =
+               to_intel_crtc_state(crtc->state);
        int pipe = intel_crtc->pipe;
 
        if (WARN_ON(intel_crtc->active))
@@ -6079,7 +6088,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        i9xx_pfit_enable(intel_crtc);
 
-       intel_color_load_luts(crtc);
+       intel_color_load_luts(&pipe_config->base);
 
        intel_update_watermarks(crtc);
        intel_enable_pipe(intel_crtc);
@@ -6106,6 +6115,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
+       struct intel_crtc_state *pipe_config =
+               to_intel_crtc_state(crtc->state);
        int pipe = intel_crtc->pipe;
 
        if (WARN_ON(intel_crtc->active))
@@ -6134,7 +6145,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 
        i9xx_pfit_enable(intel_crtc);
 
-       intel_color_load_luts(crtc);
+       intel_color_load_luts(&pipe_config->base);
 
        intel_update_watermarks(crtc);
        intel_enable_pipe(intel_crtc);
@@ -6284,7 +6295,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
 
 /* Cross check the actual hw state with our own modeset state tracking (and it's
  * internal consistency). */
-static void intel_connector_check_state(struct intel_connector *connector)
+static void intel_connector_verify_state(struct intel_connector *connector)
 {
        struct drm_crtc *crtc = connector->base.state->crtc;
 
@@ -6490,7 +6501,7 @@ static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
                return false;
 
        /* HSW can handle pixel rate up to cdclk? */
-       if (IS_HASWELL(dev_priv->dev))
+       if (IS_HASWELL(dev_priv))
                return true;
 
        /*
@@ -7161,24 +7172,27 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
 static void vlv_compute_dpll(struct intel_crtc *crtc,
                             struct intel_crtc_state *pipe_config)
 {
-       u32 dpll, dpll_md;
+       pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+               DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
+       if (crtc->pipe != PIPE_A)
+               pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
-       /*
-        * Enable DPIO clock input. We should never disable the reference
-        * clock for pipe B, since VGA hotplug / manual detection depends
-        * on it.
-        */
-       dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
-               DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
-       /* We should never disable this, set it here for state tracking */
-       if (crtc->pipe == PIPE_B)
-               dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-       dpll |= DPLL_VCO_ENABLE;
-       pipe_config->dpll_hw_state.dpll = dpll;
+       pipe_config->dpll_hw_state.dpll_md =
+               (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static void chv_compute_dpll(struct intel_crtc *crtc,
+                            struct intel_crtc_state *pipe_config)
+{
+       pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+               DPLL_VCO_ENABLE;
+       if (crtc->pipe != PIPE_A)
+               pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
-       dpll_md = (pipe_config->pixel_multiplier - 1)
-               << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-       pipe_config->dpll_hw_state.dpll_md = dpll_md;
+       pipe_config->dpll_hw_state.dpll_md =
+               (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 }
 
 static void vlv_prepare_pll(struct intel_crtc *crtc,
@@ -7272,19 +7286,6 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
        mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_compute_dpll(struct intel_crtc *crtc,
-                            struct intel_crtc_state *pipe_config)
-{
-       pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
-               DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
-               DPLL_VCO_ENABLE;
-       if (crtc->pipe != PIPE_A)
-               pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
-       pipe_config->dpll_hw_state.dpll_md =
-               (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
 static void chv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config)
 {
@@ -8174,7 +8175,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        i9xx_get_pfit_config(crtc, pipe_config);
 
        if (INTEL_INFO(dev)->gen >= 4) {
-               tmp = I915_READ(DPLL_MD(crtc->pipe));
+               /* No way to read it out on pipes B and C */
+               if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+                       tmp = dev_priv->chv_dpll_md[crtc->pipe];
+               else
+                       tmp = I915_READ(DPLL_MD(crtc->pipe));
                pipe_config->pixel_multiplier =
                        ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
                         >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
@@ -9260,7 +9265,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 
                ironlake_get_fdi_m_n_config(crtc, pipe_config);
 
-               if (HAS_PCH_IBX(dev_priv->dev)) {
+               if (HAS_PCH_IBX(dev_priv)) {
                        pll_id = (enum intel_dpll_id) crtc->pipe;
                } else {
                        tmp = I915_READ(PCH_DPLL_SEL);
@@ -12569,6 +12574,11 @@ intel_pipe_config_compare(struct drm_device *dev,
                ret = false; \
        }
 
+/* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
        if (!intel_compare_link_m_n(&current_config->name, \
                                    &pipe_config->name, adjust) && \
@@ -12596,22 +12606,6 @@ intel_pipe_config_compare(struct drm_device *dev,
                ret = false; \
        }
 
-/* This is required for BDW+ where there is only one set of registers for
- * switching between high and low RR.
- * This macro can be used whenever a comparison has to be made between one
- * hw state and multiple sw state variables.
- */
-#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
-       if ((current_config->name != pipe_config->name) && \
-               (current_config->alt_name != pipe_config->name)) { \
-                       INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
-                                 "(expected %i or %i, found %i)\n", \
-                                 current_config->name, \
-                                 current_config->alt_name, \
-                                 pipe_config->name); \
-                       ret = false; \
-       }
-
 #define PIPE_CONF_CHECK_FLAGS(name, mask)      \
        if ((current_config->name ^ pipe_config->name) & (mask)) { \
                INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
@@ -12736,7 +12730,6 @@ intel_pipe_config_compare(struct drm_device *dev,
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_P
-#undef PIPE_CONF_CHECK_I_ALT
 #undef PIPE_CONF_CHECK_FLAGS
 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
@@ -12763,48 +12756,43 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
        }
 }
 
-static void check_wm_state(struct drm_device *dev)
+static void verify_wm_state(struct drm_crtc *crtc,
+                           struct drm_crtc_state *new_state)
 {
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct skl_ddb_allocation hw_ddb, *sw_ddb;
-       struct intel_crtc *intel_crtc;
+       struct skl_ddb_entry *hw_entry, *sw_entry;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       const enum pipe pipe = intel_crtc->pipe;
        int plane;
 
-       if (INTEL_INFO(dev)->gen < 9)
+       if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
                return;
 
        skl_ddb_get_hw_state(dev_priv, &hw_ddb);
        sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
-       for_each_intel_crtc(dev, intel_crtc) {
-               struct skl_ddb_entry *hw_entry, *sw_entry;
-               const enum pipe pipe = intel_crtc->pipe;
+       /* planes */
+       for_each_plane(dev_priv, pipe, plane) {
+               hw_entry = &hw_ddb.plane[pipe][plane];
+               sw_entry = &sw_ddb->plane[pipe][plane];
 
-               if (!intel_crtc->active)
+               if (skl_ddb_entry_equal(hw_entry, sw_entry))
                        continue;
 
-               /* planes */
-               for_each_plane(dev_priv, pipe, plane) {
-                       hw_entry = &hw_ddb.plane[pipe][plane];
-                       sw_entry = &sw_ddb->plane[pipe][plane];
-
-                       if (skl_ddb_entry_equal(hw_entry, sw_entry))
-                               continue;
-
-                       DRM_ERROR("mismatch in DDB state pipe %c plane %d "
-                                 "(expected (%u,%u), found (%u,%u))\n",
-                                 pipe_name(pipe), plane + 1,
-                                 sw_entry->start, sw_entry->end,
-                                 hw_entry->start, hw_entry->end);
-               }
-
-               /* cursor */
-               hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-               sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+               DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+                         "(expected (%u,%u), found (%u,%u))\n",
+                         pipe_name(pipe), plane + 1,
+                         sw_entry->start, sw_entry->end,
+                         hw_entry->start, hw_entry->end);
+       }
 
-               if (skl_ddb_entry_equal(hw_entry, sw_entry))
-                       continue;
+       /* cursor */
+       hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+       sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
 
+       if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
                DRM_ERROR("mismatch in DDB state pipe %c cursor "
                          "(expected (%u,%u), found (%u,%u))\n",
                          pipe_name(pipe),
@@ -12814,20 +12802,18 @@ static void check_wm_state(struct drm_device *dev)
 }
 
 static void
-check_connector_state(struct drm_device *dev,
-                     struct drm_atomic_state *old_state)
+verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
 {
-       struct drm_connector_state *old_conn_state;
        struct drm_connector *connector;
-       int i;
 
-       for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+       drm_for_each_connector(connector, dev) {
                struct drm_encoder *encoder = connector->encoder;
                struct drm_connector_state *state = connector->state;
 
-               /* This also checks the encoder/connector hw state with the
-                * ->get_hw_state callbacks. */
-               intel_connector_check_state(to_intel_connector(connector));
+               if (state->crtc != crtc)
+                       continue;
+
+               intel_connector_verify_state(to_intel_connector(connector));
 
                I915_STATE_WARN(state->best_encoder != encoder,
                     "connector's atomic encoder doesn't match legacy encoder\n");
@@ -12835,7 +12821,7 @@ check_connector_state(struct drm_device *dev,
 }
 
 static void
-check_encoder_state(struct drm_device *dev)
+verify_encoder_state(struct drm_device *dev)
 {
        struct intel_encoder *encoder;
        struct intel_connector *connector;
@@ -12875,144 +12861,186 @@ check_encoder_state(struct drm_device *dev)
 }
 
 static void
-check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
+verify_crtc_state(struct drm_crtc *crtc,
+                 struct drm_crtc_state *old_crtc_state,
+                 struct drm_crtc_state *new_crtc_state)
 {
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *encoder;
-       struct drm_crtc_state *old_crtc_state;
-       struct drm_crtc *crtc;
-       int i;
-
-       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               struct intel_crtc_state *pipe_config, *sw_config;
-               bool active;
-
-               if (!needs_modeset(crtc->state) &&
-                   !to_intel_crtc_state(crtc->state)->update_pipe)
-                       continue;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *pipe_config, *sw_config;
+       struct drm_atomic_state *old_state;
+       bool active;
 
-               __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
-               pipe_config = to_intel_crtc_state(old_crtc_state);
-               memset(pipe_config, 0, sizeof(*pipe_config));
-               pipe_config->base.crtc = crtc;
-               pipe_config->base.state = old_state;
+       old_state = old_crtc_state->state;
+       __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
+       pipe_config = to_intel_crtc_state(old_crtc_state);
+       memset(pipe_config, 0, sizeof(*pipe_config));
+       pipe_config->base.crtc = crtc;
+       pipe_config->base.state = old_state;
 
-               DRM_DEBUG_KMS("[CRTC:%d]\n",
-                             crtc->base.id);
+       DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
-               active = dev_priv->display.get_pipe_config(intel_crtc,
-                                                          pipe_config);
+       active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
 
-               /* hw state is inconsistent with the pipe quirk */
-               if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
-                   (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
-                       active = crtc->state->active;
+       /* hw state is inconsistent with the pipe quirk */
+       if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+           (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+               active = new_crtc_state->active;
 
-               I915_STATE_WARN(crtc->state->active != active,
-                    "crtc active state doesn't match with hw state "
-                    "(expected %i, found %i)\n", crtc->state->active, active);
+       I915_STATE_WARN(new_crtc_state->active != active,
+            "crtc active state doesn't match with hw state "
+            "(expected %i, found %i)\n", new_crtc_state->active, active);
 
-               I915_STATE_WARN(intel_crtc->active != crtc->state->active,
-                    "transitional active state does not match atomic hw state "
-                    "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
+       I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
+            "transitional active state does not match atomic hw state "
+            "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
 
-               for_each_encoder_on_crtc(dev, crtc, encoder) {
-                       enum pipe pipe;
+       for_each_encoder_on_crtc(dev, crtc, encoder) {
+               enum pipe pipe;
 
-                       active = encoder->get_hw_state(encoder, &pipe);
-                       I915_STATE_WARN(active != crtc->state->active,
-                               "[ENCODER:%i] active %i with crtc active %i\n",
-                               encoder->base.base.id, active, crtc->state->active);
+               active = encoder->get_hw_state(encoder, &pipe);
+               I915_STATE_WARN(active != new_crtc_state->active,
+                       "[ENCODER:%i] active %i with crtc active %i\n",
+                       encoder->base.base.id, active, new_crtc_state->active);
 
-                       I915_STATE_WARN(active && intel_crtc->pipe != pipe,
-                                       "Encoder connected to wrong pipe %c\n",
-                                       pipe_name(pipe));
+               I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+                               "Encoder connected to wrong pipe %c\n",
+                               pipe_name(pipe));
 
-                       if (active)
-                               encoder->get_config(encoder, pipe_config);
-               }
+               if (active)
+                       encoder->get_config(encoder, pipe_config);
+       }
 
-               if (!crtc->state->active)
-                       continue;
+       if (!new_crtc_state->active)
+               return;
 
-               intel_pipe_config_sanity_check(dev_priv, pipe_config);
+       intel_pipe_config_sanity_check(dev_priv, pipe_config);
 
-               sw_config = to_intel_crtc_state(crtc->state);
-               if (!intel_pipe_config_compare(dev, sw_config,
-                                              pipe_config, false)) {
-                       I915_STATE_WARN(1, "pipe state doesn't match!\n");
-                       intel_dump_pipe_config(intel_crtc, pipe_config,
-                                              "[hw state]");
-                       intel_dump_pipe_config(intel_crtc, sw_config,
-                                              "[sw state]");
-               }
+       sw_config = to_intel_crtc_state(crtc->state);
+       if (!intel_pipe_config_compare(dev, sw_config,
+                                      pipe_config, false)) {
+               I915_STATE_WARN(1, "pipe state doesn't match!\n");
+               intel_dump_pipe_config(intel_crtc, pipe_config,
+                                      "[hw state]");
+               intel_dump_pipe_config(intel_crtc, sw_config,
+                                      "[sw state]");
        }
 }
 
 static void
-check_shared_dpll_state(struct drm_device *dev)
+verify_single_dpll_state(struct drm_i915_private *dev_priv,
+                        struct intel_shared_dpll *pll,
+                        struct drm_crtc *crtc,
+                        struct drm_crtc_state *new_state)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *crtc;
        struct intel_dpll_hw_state dpll_hw_state;
-       int i;
+       unsigned crtc_mask;
+       bool active;
 
-       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-               struct intel_shared_dpll *pll =
-                       intel_get_shared_dpll_by_id(dev_priv, i);
-               unsigned enabled_crtcs = 0, active_crtcs = 0;
-               bool active;
+       memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
 
-               memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+       DRM_DEBUG_KMS("%s\n", pll->name);
 
-               DRM_DEBUG_KMS("%s\n", pll->name);
+       active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
 
-               active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
+       if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
+               I915_STATE_WARN(!pll->on && pll->active_mask,
+                    "pll in active use but not on in sw tracking\n");
+               I915_STATE_WARN(pll->on && !pll->active_mask,
+                    "pll is on but not used by any active crtc\n");
+               I915_STATE_WARN(pll->on != active,
+                    "pll on state mismatch (expected %i, found %i)\n",
+                    pll->on, active);
+       }
 
+       if (!crtc) {
                I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
-                    "more active pll users than references: %x vs %x\n",
-                    pll->active_mask, pll->config.crtc_mask);
+                               "more active pll users than references: %x vs %x\n",
+                               pll->active_mask, pll->config.crtc_mask);
 
-               if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
-                       I915_STATE_WARN(!pll->on && pll->active_mask,
-                            "pll in active use but not on in sw tracking\n");
-                       I915_STATE_WARN(pll->on && !pll->active_mask,
-                            "pll is on but not used by any active crtc\n");
-                       I915_STATE_WARN(pll->on != active,
-                            "pll on state mismatch (expected %i, found %i)\n",
-                            pll->on, active);
-               }
+               return;
+       }
 
-               for_each_intel_crtc(dev, crtc) {
-                       if (crtc->base.state->enable && crtc->config->shared_dpll == pll)
-                               enabled_crtcs |= 1 << drm_crtc_index(&crtc->base);
-                       if (crtc->base.state->active && crtc->config->shared_dpll == pll)
-                               active_crtcs |= 1 << drm_crtc_index(&crtc->base);
-               }
+       crtc_mask = 1 << drm_crtc_index(crtc);
 
-               I915_STATE_WARN(pll->active_mask != active_crtcs,
-                    "pll active crtcs mismatch (expected %x, found %x)\n",
-                    pll->active_mask, active_crtcs);
-               I915_STATE_WARN(pll->config.crtc_mask != enabled_crtcs,
-                    "pll enabled crtcs mismatch (expected %x, found %x)\n",
-                    pll->config.crtc_mask, enabled_crtcs);
+       if (new_state->active)
+               I915_STATE_WARN(!(pll->active_mask & crtc_mask),
+                               "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
+                               pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+       else
+               I915_STATE_WARN(pll->active_mask & crtc_mask,
+                               "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
+                               pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+
+       I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
+                       "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
+                       crtc_mask, pll->config.crtc_mask);
+
+       I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
+                                         &dpll_hw_state,
+                                         sizeof(dpll_hw_state)),
+                       "pll hw state mismatch\n");
+}
+
+static void
+verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
+                        struct drm_crtc_state *old_crtc_state,
+                        struct drm_crtc_state *new_crtc_state)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
+       struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
 
-               I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
-                                      sizeof(dpll_hw_state)),
-                    "pll hw state mismatch\n");
+       if (new_state->shared_dpll)
+               verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+
+       if (old_state->shared_dpll &&
+           old_state->shared_dpll != new_state->shared_dpll) {
+               unsigned crtc_mask = 1 << drm_crtc_index(crtc);
+               struct intel_shared_dpll *pll = old_state->shared_dpll;
+
+               I915_STATE_WARN(pll->active_mask & crtc_mask,
+                               "pll active mismatch (didn't expect pipe %c in active mask)\n",
+                               pipe_name(drm_crtc_index(crtc)));
+               I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
+                               "pll enabled crtcs mismatch (found %x in enabled mask)\n",
+                               pipe_name(drm_crtc_index(crtc)));
        }
 }
 
 static void
-intel_modeset_check_state(struct drm_device *dev,
-                         struct drm_atomic_state *old_state)
+intel_modeset_verify_crtc(struct drm_crtc *crtc,
+                        struct drm_crtc_state *old_state,
+                        struct drm_crtc_state *new_state)
+{
+       if (!needs_modeset(new_state) &&
+           !to_intel_crtc_state(new_state)->update_pipe)
+               return;
+
+       verify_wm_state(crtc, new_state);
+       verify_connector_state(crtc->dev, crtc);
+       verify_crtc_state(crtc, old_state, new_state);
+       verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
+}
+
+static void
+verify_disabled_dpll_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < dev_priv->num_shared_dpll; i++)
+               verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+}
+
+static void
+intel_modeset_verify_disabled(struct drm_device *dev)
 {
-       check_wm_state(dev);
-       check_connector_state(dev, old_state);
-       check_encoder_state(dev);
-       check_crtc_state(dev, old_state);
-       check_shared_dpll_state(dev);
+       verify_encoder_state(dev);
+       verify_connector_state(dev, NULL);
+       verify_disabled_dpll_state(dev);
 }
 
 static void update_scanline_offset(struct intel_crtc *crtc)
@@ -13582,6 +13610,8 @@ static int intel_atomic_commit(struct drm_device *dev,
                if (dev_priv->display.modeset_commit_cdclk &&
                    intel_state->dev_cdclk != dev_priv->cdclk_freq)
                        dev_priv->display.modeset_commit_cdclk(state);
+
+               intel_modeset_verify_disabled(dev);
        }
 
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -13597,18 +13627,6 @@ static int intel_atomic_commit(struct drm_device *dev,
                        dev_priv->display.crtc_enable(crtc);
                }
 
-               if (!modeset &&
-                   crtc->state->active &&
-                   crtc->state->color_mgmt_changed) {
-                       /*
-                        * Only update color management when not doing
-                        * a modeset as this will be done by
-                        * crtc_enable already.
-                        */
-                       intel_color_set_csc(crtc);
-                       intel_color_load_luts(crtc);
-               }
-
                if (!modeset)
                        intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
 
@@ -13648,6 +13666,8 @@ static int intel_atomic_commit(struct drm_device *dev,
 
                if (put_domains[i])
                        modeset_put_power_domains(dev_priv, put_domains[i]);
+
+               intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
        }
 
        if (intel_state->modeset)
@@ -13657,9 +13677,6 @@ static int intel_atomic_commit(struct drm_device *dev,
        drm_atomic_helper_cleanup_planes(dev, state);
        mutex_unlock(&dev->struct_mutex);
 
-       if (hw_check)
-               intel_modeset_check_state(dev, state);
-
        drm_atomic_state_free(state);
 
        /* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13927,6 +13944,11 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
        if (modeset)
                return;
 
+       if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
+               intel_color_set_csc(crtc->state);
+               intel_color_load_luts(crtc->state);
+       }
+
        if (to_intel_crtc_state(crtc->state)->update_pipe)
                intel_update_pipe_config(intel_crtc, old_intel_state);
        else if (INTEL_INFO(dev)->gen >= 9)
@@ -13970,20 +13992,19 @@ const struct drm_plane_funcs intel_plane_funcs = {
 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
                                                    int pipe)
 {
-       struct intel_plane *primary;
-       struct intel_plane_state *state;
+       struct intel_plane *primary = NULL;
+       struct intel_plane_state *state = NULL;
        const uint32_t *intel_primary_formats;
        unsigned int num_formats;
+       int ret;
 
        primary = kzalloc(sizeof(*primary), GFP_KERNEL);
-       if (primary == NULL)
-               return NULL;
+       if (!primary)
+               goto fail;
 
        state = intel_create_plane_state(&primary->base);
-       if (!state) {
-               kfree(primary);
-               return NULL;
-       }
+       if (!state)
+               goto fail;
        primary->base.state = &state->base;
 
        primary->can_scale = false;
@@ -14025,10 +14046,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
                primary->disable_plane = i9xx_disable_primary_plane;
        }
 
-       drm_universal_plane_init(dev, &primary->base, 0,
-                                &intel_plane_funcs,
-                                intel_primary_formats, num_formats,
-                                DRM_PLANE_TYPE_PRIMARY, NULL);
+       ret = drm_universal_plane_init(dev, &primary->base, 0,
+                                      &intel_plane_funcs,
+                                      intel_primary_formats, num_formats,
+                                      DRM_PLANE_TYPE_PRIMARY, NULL);
+       if (ret)
+               goto fail;
 
        if (INTEL_INFO(dev)->gen >= 4)
                intel_create_rotation_property(dev, primary);
@@ -14036,6 +14059,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
        drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
 
        return &primary->base;
+
+fail:
+       kfree(state);
+       kfree(primary);
+
+       return NULL;
 }
 
 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
@@ -14152,18 +14181,17 @@ intel_update_cursor_plane(struct drm_plane *plane,
 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
                                                   int pipe)
 {
-       struct intel_plane *cursor;
-       struct intel_plane_state *state;
+       struct intel_plane *cursor = NULL;
+       struct intel_plane_state *state = NULL;
+       int ret;
 
        cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
-       if (cursor == NULL)
-               return NULL;
+       if (!cursor)
+               goto fail;
 
        state = intel_create_plane_state(&cursor->base);
-       if (!state) {
-               kfree(cursor);
-               return NULL;
-       }
+       if (!state)
+               goto fail;
        cursor->base.state = &state->base;
 
        cursor->can_scale = false;
@@ -14175,11 +14203,13 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
        cursor->update_plane = intel_update_cursor_plane;
        cursor->disable_plane = intel_disable_cursor_plane;
 
-       drm_universal_plane_init(dev, &cursor->base, 0,
-                                &intel_plane_funcs,
-                                intel_cursor_formats,
-                                ARRAY_SIZE(intel_cursor_formats),
-                                DRM_PLANE_TYPE_CURSOR, NULL);
+       ret = drm_universal_plane_init(dev, &cursor->base, 0,
+                                      &intel_plane_funcs,
+                                      intel_cursor_formats,
+                                      ARRAY_SIZE(intel_cursor_formats),
+                                      DRM_PLANE_TYPE_CURSOR, NULL);
+       if (ret)
+               goto fail;
 
        if (INTEL_INFO(dev)->gen >= 4) {
                if (!dev->mode_config.rotation_property)
@@ -14199,6 +14229,12 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
        drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
 
        return &cursor->base;
+
+fail:
+       kfree(state);
+       kfree(cursor);
+
+       return NULL;
 }
 
 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
@@ -15298,7 +15334,8 @@ fail:
 
 void intel_modeset_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int sprite, ret;
        enum pipe pipe;
        struct intel_crtc *crtc;
@@ -15362,7 +15399,7 @@ void intel_modeset_init(struct drm_device *dev)
                dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
        }
 
-       dev->mode_config.fb_base = dev_priv->ggtt.mappable_base;
+       dev->mode_config.fb_base = ggtt->mappable_base;
 
        DRM_DEBUG_KMS("%d display pipe%s available.\n",
                      INTEL_INFO(dev)->num_pipes,
@@ -16173,7 +16210,7 @@ intel_display_capture_error_state(struct drm_device *dev)
 
        /* Note: this does not include DSI transcoders. */
        error->num_transcoders = INTEL_INFO(dev)->num_pipes;
-       if (HAS_DDI(dev_priv->dev))
+       if (HAS_DDI(dev_priv))
                error->num_transcoders++; /* Account for eDP. */
 
        for (i = 0; i < error->num_transcoders; i++) {
index 3bdd8ba59b8c8644d8e4df57d6316c89d5a20c23..da0c3d29fda8e44a27c39871a2752b9402dd1b74 100644 (file)
@@ -129,6 +129,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
 static void vlv_steal_power_sequencer(struct drm_device *dev,
                                      enum pipe pipe);
+static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 
 static unsigned int intel_dp_unused_lane_mask(int lane_count)
 {
@@ -3787,6 +3788,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
        if (intel_dp->dpcd[DP_DPCD_REV] == 0)
                return false; /* DPCD not present */
 
+       if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
+                                   &intel_dp->sink_count, 1) < 0)
+               return false;
+
+       /*
+        * Sink count can change between short pulse hpd hence
+        * a member variable in intel_dp will track any changes
+        * between short pulse interrupts.
+        */
+       intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+
+       /*
+        * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+        * a dongle is present but no display. Unless we require to know
+        * if a dongle is present or not, we don't need to update
+        * downstream port information. So, an early return here saves
+        * time from performing other operations which are not required.
+        */
+       if (!intel_dp->sink_count)
+               return false;
+
        /* Check if the panel supports PSR */
        memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
        if (is_edp(intel_dp)) {
@@ -4214,6 +4236,36 @@ go_again:
        return -EINVAL;
 }
 
+static void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+       struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       u8 link_status[DP_LINK_STATUS_SIZE];
+
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+       if (!intel_dp_get_link_status(intel_dp, link_status)) {
+               DRM_ERROR("Failed to get link status\n");
+               return;
+       }
+
+       if (!intel_encoder->base.crtc)
+               return;
+
+       if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+               return;
+
+       /* if link training is requested we should perform it always */
+       if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
+           (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
+               DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+                             intel_encoder->base.name);
+               intel_dp_start_link_train(intel_dp);
+               intel_dp_stop_link_train(intel_dp);
+       }
+}
+
 /*
  * According to DP spec
  * 5.1.2:
@@ -4221,16 +4273,19 @@ go_again:
  *  2. Configure link according to Receiver Capabilities
  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
  *  4. Check link status on receipt of hot-plug interrupt
+ *
+ * intel_dp_short_pulse -  handles short pulse interrupts
+ * when full detection is not required.
+ * Returns %true if short pulse is handled and full detection
+ * is NOT required and %false otherwise.
  */
-static void
-intel_dp_check_link_status(struct intel_dp *intel_dp)
+static bool
+intel_dp_short_pulse(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
        u8 sink_irq_vector;
-       u8 link_status[DP_LINK_STATUS_SIZE];
-
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       u8 old_sink_count = intel_dp->sink_count;
+       bool ret;
 
        /*
         * Clearing compliance test variables to allow capturing
@@ -4240,20 +4295,17 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
        intel_dp->compliance_test_type = 0;
        intel_dp->compliance_test_data = 0;
 
-       if (!intel_encoder->base.crtc)
-               return;
-
-       if (!to_intel_crtc(intel_encoder->base.crtc)->active)
-               return;
-
-       /* Try to read receiver status if the link appears to be up */
-       if (!intel_dp_get_link_status(intel_dp, link_status)) {
-               return;
-       }
+       /*
+        * Now read the DPCD to see if it's actually running
+        * If the current value of sink count doesn't match with
+        * the value that was stored earlier or dpcd read failed
+        * we need to do full detection
+        */
+       ret = intel_dp_get_dpcd(intel_dp);
 
-       /* Now read the DPCD to see if it's actually running */
-       if (!intel_dp_get_dpcd(intel_dp)) {
-               return;
+       if ((old_sink_count != intel_dp->sink_count) || !ret) {
+               /* No need to proceed if we are going to do full detect */
+               return false;
        }
 
        /* Try to read the source of the interrupt */
@@ -4270,14 +4322,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                        DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
        }
 
-       /* if link training is requested we should perform it always */
-       if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
-               (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
-               DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
-                             intel_encoder->base.name);
-               intel_dp_start_link_train(intel_dp);
-               intel_dp_stop_link_train(intel_dp);
-       }
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       intel_dp_check_link_status(intel_dp);
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+       return true;
 }
 
 /* XXX this is probably wrong for multiple downstream ports */
@@ -4297,14 +4346,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
        /* If we're HPD-aware, SINK_COUNT changes dynamically */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
            intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
-               uint8_t reg;
-
-               if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
-                                           &reg, 1) < 0)
-                       return connector_status_unknown;
 
-               return DP_GET_SINK_COUNT(reg) ? connector_status_connected
-                                             : connector_status_disconnected;
+               return intel_dp->sink_count ?
+               connector_status_connected : connector_status_disconnected;
        }
 
        /* If no HPD, poke DDC gently */
@@ -4513,6 +4557,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        struct edid *edid;
 
+       intel_dp_unset_edid(intel_dp);
        edid = intel_dp_get_edid(intel_dp);
        intel_connector->detect_edid = edid;
 
@@ -4533,9 +4578,10 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
        intel_dp->has_audio = false;
 }
 
-static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector, bool force)
+static void
+intel_dp_long_pulse(struct intel_connector *intel_connector)
 {
+       struct drm_connector *connector = &intel_connector->base;
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -4545,17 +4591,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        bool ret;
        u8 sink_irq_vector;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
-       intel_dp_unset_edid(intel_dp);
-
-       if (intel_dp->is_mst) {
-               /* MST devices are disconnected from a monitor POV */
-               if (intel_encoder->type != INTEL_OUTPUT_EDP)
-                       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
-               return connector_status_disconnected;
-       }
-
        power_domain = intel_display_port_aux_power_domain(intel_encoder);
        intel_display_power_get(to_i915(dev), power_domain);
 
@@ -4576,16 +4611,30 @@ intel_dp_detect(struct drm_connector *connector, bool force)
                goto out;
        }
 
+       if (intel_encoder->type != INTEL_OUTPUT_EDP)
+               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+
        intel_dp_probe_oui(intel_dp);
 
        ret = intel_dp_probe_mst(intel_dp);
        if (ret) {
-               /* if we are in MST mode then this connector
-                  won't appear connected or have anything with EDID on it */
-               if (intel_encoder->type != INTEL_OUTPUT_EDP)
-                       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+               /*
+                * If we are in MST mode then this connector
+                * won't appear connected or have anything
+                * with EDID on it
+                */
                status = connector_status_disconnected;
                goto out;
+       } else if (connector->status == connector_status_connected) {
+               /*
+                * If display was connected already and is still connected
+                * check links status, there has been known issues of
+                * link loss triggerring long pulse!!!!
+                */
+               drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+               intel_dp_check_link_status(intel_dp);
+               drm_modeset_unlock(&dev->mode_config.connection_mutex);
+               goto out;
        }
 
        /*
@@ -4598,9 +4647,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 
        intel_dp_set_edid(intel_dp);
 
-       if (intel_encoder->type != INTEL_OUTPUT_EDP)
-               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
        status = connector_status_connected;
+       intel_dp->detect_done = true;
 
        /* Try to read the source of the interrupt */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
@@ -4617,8 +4665,54 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        }
 
 out:
+       if (status != connector_status_connected) {
+               intel_dp_unset_edid(intel_dp);
+               /*
+                * If we were in MST mode, and device is not there,
+                * get out of MST mode
+                */
+               if (intel_dp->is_mst) {
+                       DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+                                     intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+                       intel_dp->is_mst = false;
+                       drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+                                                       intel_dp->is_mst);
+               }
+       }
+
        intel_display_power_put(to_i915(dev), power_domain);
-       return status;
+       return;
+}
+
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, connector->name);
+
+       if (intel_dp->is_mst) {
+               /* MST devices are disconnected from a monitor POV */
+               intel_dp_unset_edid(intel_dp);
+               if (intel_encoder->type != INTEL_OUTPUT_EDP)
+                       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+               return connector_status_disconnected;
+       }
+
+       /* If full detect is not performed yet, do a full detect */
+       if (!intel_dp->detect_done)
+               intel_dp_long_pulse(intel_dp->attached_connector);
+
+       intel_dp->detect_done = false;
+
+       if (intel_connector->detect_edid)
+               return connector_status_connected;
+       else
+               return connector_status_disconnected;
 }
 
 static void
@@ -4945,44 +5039,37 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                /* indicate that we need to restart link training */
                intel_dp->train_set_valid = false;
 
-               if (!intel_digital_port_connected(dev_priv, intel_dig_port))
-                       goto mst_fail;
+               intel_dp_long_pulse(intel_dp->attached_connector);
+               if (intel_dp->is_mst)
+                       ret = IRQ_HANDLED;
+               goto put_power;
 
-               if (!intel_dp_get_dpcd(intel_dp)) {
-                       goto mst_fail;
-               }
-
-               intel_dp_probe_oui(intel_dp);
-
-               if (!intel_dp_probe_mst(intel_dp)) {
-                       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-                       intel_dp_check_link_status(intel_dp);
-                       drm_modeset_unlock(&dev->mode_config.connection_mutex);
-                       goto mst_fail;
-               }
        } else {
                if (intel_dp->is_mst) {
-                       if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
-                               goto mst_fail;
+                       if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+                               /*
+                                * If we were in MST mode, and device is not
+                                * there, get out of MST mode
+                                */
+                               DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+                                             intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+                               intel_dp->is_mst = false;
+                               drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+                                                               intel_dp->is_mst);
+                               goto put_power;
+                       }
                }
 
                if (!intel_dp->is_mst) {
-                       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-                       intel_dp_check_link_status(intel_dp);
-                       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+                       if (!intel_dp_short_pulse(intel_dp)) {
+                               intel_dp_long_pulse(intel_dp->attached_connector);
+                               goto put_power;
+                       }
                }
        }
 
        ret = IRQ_HANDLED;
 
-       goto put_power;
-mst_fail:
-       /* if we were in MST mode, and device is not there get out of MST mode */
-       if (intel_dp->is_mst) {
-               DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
-               intel_dp->is_mst = false;
-               drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
-       }
 put_power:
        intel_display_power_put(dev_priv, power_domain);
 
index 19bfe6743ef23f8577a44670e1bfe86218942689..0bde6a4259fd4526cba5bd8642fb590ced3ab482 100644 (file)
@@ -89,14 +89,16 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
        if (WARN_ON(pll == NULL))
                return;
 
+       mutex_lock(&dev_priv->dpll_lock);
        WARN_ON(!pll->config.crtc_mask);
-       if (pll->active_mask == 0) {
+       if (!pll->active_mask) {
                DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
                WARN_ON(pll->on);
                assert_shared_dpll_disabled(dev_priv, pll);
 
                pll->funcs.mode_set(dev_priv, pll);
        }
+       mutex_unlock(&dev_priv->dpll_lock);
 }
 
 /**
@@ -113,14 +115,17 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
        unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
-       unsigned old_mask = pll->active_mask;
+       unsigned old_mask;
 
        if (WARN_ON(pll == NULL))
                return;
 
+       mutex_lock(&dev_priv->dpll_lock);
+       old_mask = pll->active_mask;
+
        if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
            WARN_ON(pll->active_mask & crtc_mask))
-               return;
+               goto out;
 
        pll->active_mask |= crtc_mask;
 
@@ -131,13 +136,16 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
        if (old_mask) {
                WARN_ON(!pll->on);
                assert_shared_dpll_enabled(dev_priv, pll);
-               return;
+               goto out;
        }
        WARN_ON(pll->on);
 
        DRM_DEBUG_KMS("enabling %s\n", pll->name);
        pll->funcs.enable(dev_priv, pll);
        pll->on = true;
+
+out:
+       mutex_unlock(&dev_priv->dpll_lock);
 }
 
 void intel_disable_shared_dpll(struct intel_crtc *crtc)
@@ -154,8 +162,9 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
        if (pll == NULL)
                return;
 
+       mutex_lock(&dev_priv->dpll_lock);
        if (WARN_ON(!(pll->active_mask & crtc_mask)))
-               return;
+               goto out;
 
        DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
                      pll->name, pll->active_mask, pll->on,
@@ -166,11 +175,14 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
 
        pll->active_mask &= ~crtc_mask;
        if (pll->active_mask)
-               return;
+               goto out;
 
        DRM_DEBUG_KMS("disabling %s\n", pll->name);
        pll->funcs.disable(dev_priv, pll);
        pll->on = false;
+
+out:
+       mutex_unlock(&dev_priv->dpll_lock);
 }
 
 static struct intel_shared_dpll *
@@ -286,7 +298,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
        u32 val;
        bool enabled;
 
-       I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
+       I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
 
        val = I915_READ(PCH_DREF_CONTROL);
        enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
@@ -1284,7 +1296,15 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
        enum port port = (enum port)pll->id;    /* 1:1 port->PLL mapping */
 
        temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
-       temp &= ~PORT_PLL_REF_SEL;
+       /*
+        * Definition of each bit polarity has been changed
+        * after A1 stepping
+        */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+               temp &= ~PORT_PLL_REF_SEL;
+       else
+               temp |= PORT_PLL_REF_SEL;
+
        /* Non-SSC reference */
        I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
 
@@ -1750,6 +1770,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
 
        dev_priv->dpll_mgr = dpll_mgr;
        dev_priv->num_shared_dpll = i;
+       mutex_init(&dev_priv->dpll_lock);
 
        BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
 
index c87b4503435d7d0635f70c6a073e6c2a4269fdf2..e0fcfa1683cc5359d7d71d351fca82fff4108fa1 100644 (file)
@@ -796,7 +796,9 @@ struct intel_dp {
        uint32_t DP;
        int link_rate;
        uint8_t lane_count;
+       uint8_t sink_count;
        bool has_audio;
+       bool detect_done;
        enum hdmi_force_audio force_audio;
        bool limited_color_range;
        bool color_range_auto;
@@ -1102,6 +1104,8 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
 void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
 
 /* intel_display.c */
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+                     const char *name, u32 reg, int ref_freq);
 extern const struct drm_plane_funcs intel_plane_funcs;
 void intel_init_display_hooks(struct drm_i915_private *dev_priv);
 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
@@ -1669,7 +1673,7 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
 /* intel_color.c */
 void intel_color_init(struct drm_crtc *crtc);
 int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void intel_color_set_csc(struct drm_crtc *crtc);
-void intel_color_load_luts(struct drm_crtc *crtc);
+void intel_color_set_csc(struct drm_crtc_state *crtc_state);
+void intel_color_load_luts(struct drm_crtc_state *crtc_state);
 
 #endif /* __INTEL_DRV_H__ */
index 0de74e1b7ab393ce143b247c5f775d401f577c38..a1e0547484aefc72c5963a2ad3b8c070c03bf200 100644 (file)
@@ -46,6 +46,24 @@ static const struct {
        },
 };
 
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+{
+       /* It just so happens the VBT matches register contents. */
+       switch (fmt) {
+       case VID_MODE_FORMAT_RGB888:
+               return MIPI_DSI_FMT_RGB888;
+       case VID_MODE_FORMAT_RGB666:
+               return MIPI_DSI_FMT_RGB666;
+       case VID_MODE_FORMAT_RGB666_PACKED:
+               return MIPI_DSI_FMT_RGB666_PACKED;
+       case VID_MODE_FORMAT_RGB565:
+               return MIPI_DSI_FMT_RGB565;
+       default:
+               MISSING_CASE(fmt);
+               return MIPI_DSI_FMT_RGB666;
+       }
+}
+
 static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
 {
        struct drm_encoder *encoder = &intel_dsi->base.base;
@@ -740,14 +758,74 @@ out_put_power:
        return active;
 }
 
+static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
+                                struct intel_crtc_state *pipe_config)
+{
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *adjusted_mode =
+                                       &pipe_config->base.adjusted_mode;
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       unsigned int bpp, fmt;
+       enum port port;
+       u16 vfp, vsync, vbp;
+
+       /*
+        * Atleast one port is active as encoder->get_config called only if
+        * encoder->get_hw_state() returns true.
+        */
+       for_each_dsi_port(port, intel_dsi->ports) {
+               if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+                       break;
+       }
+
+       fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+       pipe_config->pipe_bpp =
+                       mipi_dsi_pixel_format_to_bpp(
+                               pixel_format_from_register_bits(fmt));
+       bpp = pipe_config->pipe_bpp;
+
+       /* In terms of pixels */
+       adjusted_mode->crtc_hdisplay =
+                               I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
+       adjusted_mode->crtc_vdisplay =
+                               I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
+       adjusted_mode->crtc_vtotal =
+                               I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+
+       /*
+        * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and
+        * calculate hsync_start, hsync_end, htotal and hblank_end
+        */
+
+       /* vertical values are in terms of lines */
+       vfp = I915_READ(MIPI_VFP_COUNT(port));
+       vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
+       vbp = I915_READ(MIPI_VBP_COUNT(port));
+
+       adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+
+       adjusted_mode->crtc_vsync_start =
+                               vfp + adjusted_mode->crtc_vdisplay;
+       adjusted_mode->crtc_vsync_end =
+                               vsync + adjusted_mode->crtc_vsync_start;
+       adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
+       adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
+}
+
+
 static void intel_dsi_get_config(struct intel_encoder *encoder,
                                 struct intel_crtc_state *pipe_config)
 {
+       struct drm_device *dev = encoder->base.dev;
        u32 pclk;
        DRM_DEBUG_KMS("\n");
 
        pipe_config->has_dsi_encoder = true;
 
+       if (IS_BROXTON(dev))
+               bxt_dsi_get_pipe_config(encoder, pipe_config);
+
        /*
         * DPLL_MD is not used in case of DSI, reading will get some default value
         * set dpll_md = 0
index ec58ead9ccd1a956bb5c3614635954d9452f3c92..dabde19ee8aad0a4c0cb0044670e83d172a51bbe 100644 (file)
@@ -134,5 +134,6 @@ extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
                                                        enum port port);
 
 struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
 
 #endif /* _INTEL_DSI_H */
index 8302a972d2d446abbea54866885549ba7022eeba..e498f1c3221e368a5d2d53b20c609b7633baa744 100644 (file)
@@ -58,50 +58,41 @@ static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
 
 #define NS_KHZ_RATIO 1000000
 
-#define GPI0_NC_0_HV_DDI0_HPD           0x4130
-#define GPIO_NC_0_HV_DDI0_PAD           0x4138
-#define GPIO_NC_1_HV_DDI0_DDC_SDA       0x4120
-#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD   0x4128
-#define GPIO_NC_2_HV_DDI0_DDC_SCL       0x4110
-#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD   0x4118
-#define GPIO_NC_3_PANEL0_VDDEN          0x4140
-#define GPIO_NC_3_PANEL0_VDDEN_PAD      0x4148
-#define GPIO_NC_4_PANEL0_BLKEN          0x4150
-#define GPIO_NC_4_PANEL0_BLKEN_PAD      0x4158
-#define GPIO_NC_5_PANEL0_BLKCTL         0x4160
-#define GPIO_NC_5_PANEL0_BLKCTL_PAD     0x4168
-#define GPIO_NC_6_PCONF0                0x4180
-#define GPIO_NC_6_PAD                   0x4188
-#define GPIO_NC_7_PCONF0                0x4190
-#define GPIO_NC_7_PAD                   0x4198
-#define GPIO_NC_8_PCONF0                0x4170
-#define GPIO_NC_8_PAD                   0x4178
-#define GPIO_NC_9_PCONF0                0x4100
-#define GPIO_NC_9_PAD                   0x4108
-#define GPIO_NC_10_PCONF0               0x40E0
-#define GPIO_NC_10_PAD                  0x40E8
-#define GPIO_NC_11_PCONF0               0x40F0
-#define GPIO_NC_11_PAD                  0x40F8
-
-struct gpio_table {
-       u16 function_reg;
-       u16 pad_reg;
-       u8 init;
+/* base offsets for gpio pads */
+#define VLV_GPIO_NC_0_HV_DDI0_HPD      0x4130
+#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA  0x4120
+#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL  0x4110
+#define VLV_GPIO_NC_3_PANEL0_VDDEN     0x4140
+#define VLV_GPIO_NC_4_PANEL0_BKLTEN    0x4150
+#define VLV_GPIO_NC_5_PANEL0_BKLTCTL   0x4160
+#define VLV_GPIO_NC_6_HV_DDI1_HPD      0x4180
+#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA  0x4190
+#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL  0x4170
+#define VLV_GPIO_NC_9_PANEL1_VDDEN     0x4100
+#define VLV_GPIO_NC_10_PANEL1_BKLTEN   0x40E0
+#define VLV_GPIO_NC_11_PANEL1_BKLTCTL  0x40F0
+
+#define VLV_GPIO_PCONF0(base_offset)   (base_offset)
+#define VLV_GPIO_PAD_VAL(base_offset)  ((base_offset) + 8)
+
+struct gpio_map {
+       u16 base_offset;
+       bool init;
 };
 
-static struct gpio_table gtable[] = {
-       { GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
-       { GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
-       { GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
-       { GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
-       { GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
-       { GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
-       { GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
-       { GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
-       { GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
-       { GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
-       { GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
-       { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
+static struct gpio_map vlv_gpio_table[] = {
+       { VLV_GPIO_NC_0_HV_DDI0_HPD },
+       { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
+       { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
+       { VLV_GPIO_NC_3_PANEL0_VDDEN },
+       { VLV_GPIO_NC_4_PANEL0_BKLTEN },
+       { VLV_GPIO_NC_5_PANEL0_BKLTCTL },
+       { VLV_GPIO_NC_6_HV_DDI1_HPD },
+       { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
+       { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
+       { VLV_GPIO_NC_9_PANEL1_VDDEN },
+       { VLV_GPIO_NC_10_PANEL1_BKLTEN },
+       { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
 };
 
 static inline enum port intel_dsi_seq_port_to_port(u8 port)
@@ -196,56 +187,76 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
        return data;
 }
 
-static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+                         u8 gpio_source, u8 gpio_index, bool value)
 {
-       u8 gpio, action;
-       u16 function, pad;
-       u32 val;
-       struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (dev_priv->vbt.dsi.seq_version >= 3)
-               data++;
+       struct gpio_map *map;
+       u16 pconf0, padval;
+       u32 tmp;
+       u8 port;
 
-       gpio = *data++;
-
-       /* pull up/down */
-       action = *data++ & 1;
-
-       if (gpio >= ARRAY_SIZE(gtable)) {
-               DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
-               goto out;
+       if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
+               DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+               return;
        }
 
-       if (!IS_VALLEYVIEW(dev_priv)) {
-               DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
-               goto out;
-       }
+       map = &vlv_gpio_table[gpio_index];
 
        if (dev_priv->vbt.dsi.seq_version >= 3) {
                DRM_DEBUG_KMS("GPIO element v3 not supported\n");
-               goto out;
+               return;
+       } else {
+               if (gpio_source == 0) {
+                       port = IOSF_PORT_GPIO_NC;
+               } else if (gpio_source == 1) {
+                       port = IOSF_PORT_GPIO_SC;
+               } else {
+                       DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+                       return;
+               }
        }
 
-       function = gtable[gpio].function_reg;
-       pad = gtable[gpio].pad_reg;
+       pconf0 = VLV_GPIO_PCONF0(map->base_offset);
+       padval = VLV_GPIO_PAD_VAL(map->base_offset);
 
        mutex_lock(&dev_priv->sb_lock);
-       if (!gtable[gpio].init) {
-               /* program the function */
+       if (!map->init) {
                /* FIXME: remove constant below */
-               vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
-                                 0x2000CC00);
-               gtable[gpio].init = 1;
+               vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
+               map->init = true;
        }
 
-       val = 0x4 | action;
+       tmp = 0x4 | value;
+       vlv_iosf_sb_write(dev_priv, port, padval, tmp);
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+{
+       struct drm_device *dev = intel_dsi->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u8 gpio_source, gpio_index;
+       bool value;
+
+       if (dev_priv->vbt.dsi.seq_version >= 3)
+               data++;
+
+       gpio_index = *data++;
+
+       /* gpio source in sequence v2 only */
+       if (dev_priv->vbt.dsi.seq_version == 2)
+               gpio_source = (*data >> 1) & 3;
+       else
+               gpio_source = 0;
 
        /* pull up/down */
-       vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
-       mutex_unlock(&dev_priv->sb_lock);
+       value = *data++ & 1;
+
+       if (IS_VALLEYVIEW(dev_priv))
+               vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+       else
+               DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
 
-out:
        return data;
 }
 
@@ -412,25 +423,6 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
        .get_modes = vbt_panel_get_modes,
 };
 
-/* XXX: This should be done when parsing the VBT in intel_bios.c */
-static enum mipi_dsi_pixel_format pixel_format_from_vbt(u32 fmt)
-{
-       /* It just so happens the VBT matches register contents. */
-       switch (fmt) {
-       case VID_MODE_FORMAT_RGB888:
-               return MIPI_DSI_FMT_RGB888;
-       case VID_MODE_FORMAT_RGB666:
-               return MIPI_DSI_FMT_RGB666;
-       case VID_MODE_FORMAT_RGB666_PACKED:
-               return MIPI_DSI_FMT_RGB666_PACKED;
-       case VID_MODE_FORMAT_RGB565:
-               return MIPI_DSI_FMT_RGB565;
-       default:
-               MISSING_CASE(fmt);
-               return MIPI_DSI_FMT_RGB666;
-       }
-}
-
 struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
 {
        struct drm_device *dev = intel_dsi->base.base.dev;
@@ -455,7 +447,9 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
        intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
        intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
        intel_dsi->lane_count = mipi_config->lane_cnt + 1;
-       intel_dsi->pixel_format = pixel_format_from_vbt(mipi_config->videomode_color_format << 7);
+       intel_dsi->pixel_format =
+                       pixel_format_from_register_bits(
+                               mipi_config->videomode_color_format << 7);
        bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 
        intel_dsi->dual_link = mipi_config->dual_link;
index 2e571f5f3b229e224e2cfea5f955478b6f211eac..d5a7cfec589b6f0c2e5b30f744eb7809ea2604a8 100644 (file)
@@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
                                      int size,
                                      int fb_cpp)
 {
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int compression_threshold = 1;
        int ret;
        u64 end;
@@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
         * underruns, even if that range is not reserved by the BIOS. */
        if (IS_BROADWELL(dev_priv) ||
            IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-               end = dev_priv->ggtt.stolen_size - 8 * 1024 * 1024;
+               end = ggtt->stolen_size - 8 * 1024 * 1024;
        else
-               end = dev_priv->ggtt.stolen_usable_size;
+               end = ggtt->stolen_usable_size;
 
        /* HACK: This code depends on what we will do in *_enable_fbc. If that
         * code changes, this code needs to change as well.
index 153ea7a3fcf696a1182b7b9459c831a58ce3c085..79ac202f38701d266d4fd15826bfa4ac61cfdf8c 100644 (file)
@@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
        struct drm_framebuffer *fb;
        struct drm_device *dev = helper->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_mode_fb_cmd2 mode_cmd = {};
        struct drm_i915_gem_object *obj = NULL;
        int size, ret;
@@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
        /* If the FB is too big, just don't use it since fbdev is not very
         * important and we should probably use that space with FBC or other
         * features. */
-       if (size * 2 < dev_priv->ggtt.stolen_usable_size)
+       if (size * 2 < ggtt->stolen_usable_size)
                obj = i915_gem_object_create_stolen(dev, size);
        if (obj == NULL)
                obj = i915_gem_alloc_object(dev, size);
@@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
                container_of(helper, struct intel_fbdev, helper);
        struct intel_framebuffer *intel_fb = ifbdev->fb;
        struct drm_device *dev = helper->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct fb_info *info;
        struct drm_framebuffer *fb;
        struct drm_i915_gem_object *obj;
@@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        /* setup aperture base/size for vesafb takeover */
        info->apertures->ranges[0].base = dev->mode_config.fb_base;
-       info->apertures->ranges[0].size = dev_priv->ggtt.mappable_end;
+       info->apertures->ranges[0].size = ggtt->mappable_end;
 
        info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
        info->fix.smem_len = size;
 
        info->screen_base =
-               ioremap_wc(dev_priv->ggtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+               ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
                           size);
        if (!info->screen_base) {
                DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -808,8 +810,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-
-       async_synchronize_full();
        if (dev_priv->fbdev)
                drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
 }
@@ -821,7 +821,6 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
        struct intel_fbdev *ifbdev = dev_priv->fbdev;
        struct drm_fb_helper *fb_helper;
 
-       async_synchronize_full();
        if (!ifbdev)
                return;
 
index 19e50fdf9a910b429349363254273ae5506dd292..9be839a242f942752ea44c1278a036c0f94f9443 100644 (file)
@@ -333,7 +333,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
        old = !intel_crtc->pch_fifo_underrun_disabled;
        intel_crtc->pch_fifo_underrun_disabled = !enable;
 
-       if (HAS_PCH_IBX(dev_priv->dev))
+       if (HAS_PCH_IBX(dev_priv))
                ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
                                                enable);
        else
@@ -363,7 +363,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
                return;
 
        /* GMCH can't disable fifo underruns, filter them. */
-       if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+       if (HAS_GMCH_DISPLAY(dev_priv) &&
            to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
                return;
 
index b4976f985369156043f87a3a0408ec2ef0e1d457..876e5da44c4e01ec1cfe225f1b3bc8fc844a939e 100644 (file)
@@ -353,6 +353,24 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
        return ret;
 }
 
+static int i915_reset_guc(struct drm_i915_private *dev_priv)
+{
+       int ret;
+       u32 guc_status;
+
+       ret = intel_guc_reset(dev_priv);
+       if (ret) {
+               DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+               return ret;
+       }
+
+       guc_status = I915_READ(GUC_STATUS);
+       WARN(!(guc_status & GS_MIA_IN_RESET),
+            "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
+
+       return ret;
+}
+
 /**
  * intel_guc_ucode_load() - load GuC uCode into the device
  * @dev:       drm device
@@ -369,7 +387,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-       int err = 0;
+       int retries, err = 0;
 
        if (!i915.enable_guc_submission)
                return 0;
@@ -417,9 +435,33 @@ int intel_guc_ucode_load(struct drm_device *dev)
        if (err)
                goto fail;
 
-       err = guc_ucode_xfer(dev_priv);
-       if (err)
-               goto fail;
+       /*
+        * WaEnableuKernelHeaderValidFix:skl,bxt
+        * For BXT, this is only upto B0 but below WA is required for later
+        * steppings also so this is extended as well.
+        */
+       /* WaEnableGuCBootHashCheckNotSet:skl,bxt */
+       for (retries = 3; ; ) {
+               /*
+                * Always reset the GuC just before (re)loading, so
+                * that the state and timing are fairly predictable
+                */
+               err = i915_reset_guc(dev_priv);
+               if (err) {
+                       DRM_ERROR("GuC reset failed, err %d\n", err);
+                       goto fail;
+               }
+
+               err = guc_ucode_xfer(dev_priv);
+               if (!err)
+                       break;
+
+               if (--retries == 0)
+                       goto fail;
+
+               DRM_INFO("GuC fw load failed, err %d; will reset and "
+                       "retry %d more time(s)\n", err, retries);
+       }
 
        guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
 
@@ -440,6 +482,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
        return 0;
 
 fail:
+       DRM_ERROR("GuC firmware load failed, err %d\n", err);
        if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
                guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
 
index e2dab4828508d7778a82e5e86bc6ca6e6fb09f9c..b199ede08f72922f5ed640d4d00aa74b1898696a 100644 (file)
@@ -638,7 +638,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
                reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
-       else if (HAS_PCH_SPLIT(dev_priv->dev))
+       else if (HAS_PCH_SPLIT(dev_priv))
                reg = TVIDEO_DIP_GCP(crtc->pipe);
        else
                return false;
index 52fbe530fc9eac207093271a05d44dbf3515e11c..6dbe73ecb41ad0b16ff9472370a9250841955c2e 100644 (file)
@@ -124,7 +124,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
        u32 val;
 
        /* When using bit bashing for I2C, this bit needs to be set to 1 */
-       if (!IS_PINEVIEW(dev_priv->dev))
+       if (!IS_PINEVIEW(dev_priv))
                return;
 
        val = I915_READ(DSPCLK_GATE_D);
@@ -264,7 +264,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
        u32 gmbus2 = 0;
        DEFINE_WAIT(wait);
 
-       if (!HAS_GMBUS_IRQ(dev_priv->dev))
+       if (!HAS_GMBUS_IRQ(dev_priv))
                gmbus4_irq_en = 0;
 
        /* Important: The hw handles only the first bit, so set only one! Since
@@ -300,7 +300,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
 
 #define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
 
-       if (!HAS_GMBUS_IRQ(dev_priv->dev))
+       if (!HAS_GMBUS_IRQ(dev_priv))
                return wait_for(C, 10);
 
        /* Important: The hw handles only the first bit, so set only one! */
index 5d4ca3b11ae2b10d2a07305755bb6ff94fdb04bb..0d6dc5ec4a46332259a79f8ae5ab658eb5808241 100644 (file)
  * preemption, but just sampling the new tail pointer).
  *
  */
+#include <linux/interrupt.h>
 
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
@@ -418,20 +419,18 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
 {
        struct drm_i915_private *dev_priv = rq0->i915;
 
-       /* BUG_ON(!irqs_disabled());  */
-
        execlists_update_context(rq0);
 
        if (rq1)
                execlists_update_context(rq1);
 
-       spin_lock(&dev_priv->uncore.lock);
+       spin_lock_irq(&dev_priv->uncore.lock);
        intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
 
        execlists_elsp_write(rq0, rq1);
 
        intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
-       spin_unlock(&dev_priv->uncore.lock);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
 static void execlists_context_unqueue(struct intel_engine_cs *engine)
@@ -538,13 +537,14 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
 
 /**
  * intel_lrc_irq_handler() - handle Context Switch interrupts
- * @ring: Engine Command Streamer to handle.
+ * @engine: Engine Command Streamer to handle.
  *
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
  */
-void intel_lrc_irq_handler(struct intel_engine_cs *engine)
+static void intel_lrc_irq_handler(unsigned long data)
 {
+       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
        struct drm_i915_private *dev_priv = engine->dev->dev_private;
        u32 status_pointer;
        unsigned int read_pointer, write_pointer;
@@ -552,8 +552,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *engine)
        unsigned int csb_read = 0, i;
        unsigned int submit_contexts = 0;
 
-       spin_lock(&dev_priv->uncore.lock);
-       intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
 
@@ -578,8 +577,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *engine)
                      _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
                                    engine->next_context_status_buffer << 8));
 
-       intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
-       spin_unlock(&dev_priv->uncore.lock);
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
        spin_lock(&engine->execlist_lock);
 
@@ -621,7 +619,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
 
        i915_gem_request_reference(request);
 
-       spin_lock_irq(&engine->execlist_lock);
+       spin_lock_bh(&engine->execlist_lock);
 
        list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
                if (++num_elements > 2)
@@ -646,7 +644,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
        if (num_elements == 0)
                execlists_context_unqueue(engine);
 
-       spin_unlock_irq(&engine->execlist_lock);
+       spin_unlock_bh(&engine->execlist_lock);
 }
 
 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
@@ -856,11 +854,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
@@ -1033,9 +1031,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
                return;
 
        INIT_LIST_HEAD(&retired_list);
-       spin_lock_irq(&engine->execlist_lock);
+       spin_lock_bh(&engine->execlist_lock);
        list_replace_init(&engine->execlist_retired_req_list, &retired_list);
-       spin_unlock_irq(&engine->execlist_lock);
+       spin_unlock_bh(&engine->execlist_lock);
 
        list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
                struct intel_context *ctx = req->ctx;
@@ -1450,6 +1448,25 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
                wa_ctx_emit(batch, index, MI_NOOP);
        }
 
+       /* WaClearTdlStateAckDirtyBits:bxt */
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+               wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
+
+               wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
+               wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+               wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
+               wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+               wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
+               wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+               wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
+               /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
+               wa_ctx_emit(batch, index, 0x0);
+               wa_ctx_emit(batch, index, MI_NOOP);
+       }
+
        /* WaDisableCtxRestoreArbitration:skl,bxt */
        if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
            IS_BXT_REVID(dev, 0, BXT_REVID_A1))
@@ -1854,7 +1871,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
+static u32 gen8_get_seqno(struct intel_engine_cs *engine)
 {
        return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
@@ -1864,10 +1881,8 @@ static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
        intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 }
 
-static u32 bxt_a_get_seqno(struct intel_engine_cs *engine,
-                          bool lazy_coherency)
+static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 {
-
        /*
         * On BXT A steppings there is a HW coherency issue whereby the
         * MI_STORE_DATA_IMM storing the completed request's seqno
@@ -1878,11 +1893,7 @@ static u32 bxt_a_get_seqno(struct intel_engine_cs *engine,
         * bxt_a_set_seqno(), where we also do a clflush after the write. So
         * this clflush in practice becomes an invalidate operation.
         */
-
-       if (!lazy_coherency)
-               intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
-
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+       intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
 static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
@@ -2016,6 +2027,13 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        if (!intel_engine_initialized(engine))
                return;
 
+       /*
+        * Tasklet cannot be active at this point due intel_mark_active/idle
+        * so this is just for documentation.
+        */
+       if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
+               tasklet_kill(&engine->irq_tasklet);
+
        dev_priv = engine->dev->dev_private;
 
        if (engine->buffer) {
@@ -2053,12 +2071,11 @@ logical_ring_default_vfuncs(struct drm_device *dev,
        engine->irq_get = gen8_logical_ring_get_irq;
        engine->irq_put = gen8_logical_ring_put_irq;
        engine->emit_bb_start = gen8_emit_bb_start;
+       engine->get_seqno = gen8_get_seqno;
+       engine->set_seqno = gen8_set_seqno;
        if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               engine->get_seqno = bxt_a_get_seqno;
+               engine->irq_seqno_barrier = bxt_a_seqno_barrier;
                engine->set_seqno = bxt_a_set_seqno;
-       } else {
-               engine->get_seqno = gen8_get_seqno;
-               engine->set_seqno = gen8_set_seqno;
        }
 }
 
@@ -2089,6 +2106,9 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
        INIT_LIST_HEAD(&engine->execlist_retired_req_list);
        spin_lock_init(&engine->execlist_lock);
 
+       tasklet_init(&engine->irq_tasklet,
+                    intel_lrc_irq_handler, (unsigned long)engine);
+
        logical_ring_init_platform_invariants(engine);
 
        ret = i915_cmd_parser_init_ring(engine);
index a17cb12221bad0af70ff1474701d8e99e0001b46..0b0853eee91e1325d2f86167a17b4ab63217a09f 100644 (file)
@@ -118,7 +118,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
                               struct list_head *vmas);
 
-void intel_lrc_irq_handler(struct intel_engine_cs *engine);
 void intel_execlists_retire_requests(struct intel_engine_cs *engine);
 
 #endif /* _INTEL_LRC_H_ */
index 5d2b2575de335eeee64560e64da48c869481aaaf..66e832beeb37e570f505b4549bc9e9b01234dd35 100644 (file)
@@ -472,11 +472,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
         * and as part of the cleanup in the hw state restore we also redisable
         * the vga plane.
         */
-       if (!HAS_PCH_SPLIT(dev)) {
-               drm_modeset_lock_all(dev);
+       if (!HAS_PCH_SPLIT(dev))
                intel_display_resume(dev);
-               drm_modeset_unlock_all(dev);
-       }
 
        dev_priv->modeset_restore = MODESET_DONE;
 
index e1acb41f187a15870e6b2af306d98b745d859997..6694e9230cd5bea860740bf1d5934d0a8db1ede1 100644 (file)
@@ -190,13 +190,14 @@ struct intel_overlay {
 static struct overlay_registers __iomem *
 intel_overlay_map_regs(struct intel_overlay *overlay)
 {
-       struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct overlay_registers __iomem *regs;
 
        if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+               regs = io_mapping_map_wc(ggtt->mappable,
                                         i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
        return regs;
@@ -1481,7 +1482,8 @@ struct intel_overlay_error_state {
 static struct overlay_registers __iomem *
 intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 {
-       struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct overlay_registers __iomem *regs;
 
        if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -1490,7 +1492,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
                regs = (struct overlay_registers __iomem *)
                        overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+               regs = io_mapping_map_atomic_wc(ggtt->mappable,
                                                i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
        return regs;
index 6a04761ddc0f5268c4edab9961cf70acd73f7824..43b24a1f5ee612fe2f8d8fd6a2da1500444accda 100644 (file)
@@ -2937,25 +2937,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
                             const struct drm_plane_state *pstate,
                             int y)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+       struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
        struct drm_framebuffer *fb = pstate->fb;
+       uint32_t width = 0, height = 0;
+
+       width = drm_rect_width(&intel_pstate->src) >> 16;
+       height = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(pstate->rotation))
+               swap(width, height);
 
        /* for planar format */
        if (fb->pixel_format == DRM_FORMAT_NV12) {
                if (y)  /* y-plane data rate */
-                       return intel_crtc->config->pipe_src_w *
-                               intel_crtc->config->pipe_src_h *
+                       return width * height *
                                drm_format_plane_cpp(fb->pixel_format, 0);
                else    /* uv-plane data rate */
-                       return (intel_crtc->config->pipe_src_w/2) *
-                               (intel_crtc->config->pipe_src_h/2) *
+                       return (width / 2) * (height / 2) *
                                drm_format_plane_cpp(fb->pixel_format, 1);
        }
 
        /* for packed formats */
-       return intel_crtc->config->pipe_src_w *
-               intel_crtc->config->pipe_src_h *
-               drm_format_plane_cpp(fb->pixel_format, 0);
+       return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
 }
 
 /*
@@ -3034,8 +3037,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                struct drm_framebuffer *fb = plane->state->fb;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (fb == NULL)
+               if (!to_intel_plane_state(plane->state)->visible)
                        continue;
+
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
 
@@ -3061,7 +3065,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                uint16_t plane_blocks, y_plane_blocks = 0;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (pstate->fb == NULL)
+               if (!to_intel_plane_state(pstate)->visible)
                        continue;
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
@@ -3184,26 +3188,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 {
        struct drm_plane *plane = &intel_plane->base;
        struct drm_framebuffer *fb = plane->state->fb;
+       struct intel_plane_state *intel_pstate =
+                                       to_intel_plane_state(plane->state);
        uint32_t latency = dev_priv->wm.skl_latency[level];
        uint32_t method1, method2;
        uint32_t plane_bytes_per_line, plane_blocks_per_line;
        uint32_t res_blocks, res_lines;
        uint32_t selected_result;
        uint8_t cpp;
+       uint32_t width = 0, height = 0;
 
-       if (latency == 0 || !cstate->base.active || !fb)
+       if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
                return false;
 
+       width = drm_rect_width(&intel_pstate->src) >> 16;
+       height = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(plane->state->rotation))
+               swap(width, height);
+
        cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
                                 cpp, latency);
        method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
-                                cstate->pipe_src_w,
-                                cpp, fb->modifier[0],
+                                width,
+                                cpp,
+                                fb->modifier[0],
                                 latency);
 
-       plane_bytes_per_line = cstate->pipe_src_w * cpp;
+       plane_bytes_per_line = width * cpp;
        plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
        if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@@ -4288,7 +4302,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
         * the hw runs at the minimal clock before selecting the desired
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt. */
-       if (IS_GEN9(dev_priv->dev)) {
+       if (IS_GEN9(dev_priv)) {
                limits = (dev_priv->rps.max_freq_softlimit) << 23;
                if (val <= dev_priv->rps.min_freq_softlimit)
                        limits |= (dev_priv->rps.min_freq_softlimit) << 14;
@@ -4630,7 +4644,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
 
 static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool enable_rc6 = true;
        unsigned long rc6_ctx_base;
 
@@ -4644,9 +4659,9 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
         * for this check.
         */
        rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
-       if (!((rc6_ctx_base >= dev_priv->ggtt.stolen_reserved_base) &&
-             (rc6_ctx_base + PAGE_SIZE <= dev_priv->ggtt.stolen_reserved_base +
-                                       dev_priv->ggtt.stolen_reserved_size))) {
+       if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
+             (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
+                                       ggtt->stolen_reserved_size))) {
                DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
                enable_rc6 = false;
        }
@@ -4807,7 +4822,7 @@ static void gen9_enable_rps(struct drm_device *dev)
         * Up/Down EI & threshold registers, as well as the RP_CONTROL,
         * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
        dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+       gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5287,9 +5302,9 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
 
 static void cherryview_setup_pctx(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long pctx_paddr, paddr;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       unsigned long pctx_paddr, paddr;
        u32 pcbr;
        int pctx_size = 32*1024;
 
@@ -5365,6 +5380,17 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
        dev_priv->vlv_pctx = NULL;
 }
 
+static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
+{
+       dev_priv->rps.gpll_ref_freq =
+               vlv_get_cck_clock(dev_priv, "GPLL ref",
+                                 CCK_GPLL_CLOCK_CONTROL,
+                                 dev_priv->czclk_freq);
+
+       DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
+                        dev_priv->rps.gpll_ref_freq);
+}
+
 static void valleyview_init_gt_powersave(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5372,6 +5398,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
 
        valleyview_setup_pctx(dev);
 
+       vlv_init_gpll_ref_freq(dev_priv);
+
        mutex_lock(&dev_priv->rps.hw_lock);
 
        val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -5429,6 +5457,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
 
        cherryview_setup_pctx(dev);
 
+       vlv_init_gpll_ref_freq(dev_priv);
+
        mutex_lock(&dev_priv->rps.hw_lock);
 
        mutex_lock(&dev_priv->sb_lock);
@@ -5579,10 +5609,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
                         dev_priv->rps.cur_freq);
 
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+                        intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+                        dev_priv->rps.idle_freq);
 
-       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5668,10 +5698,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
                         dev_priv->rps.cur_freq);
 
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
-                        dev_priv->rps.efficient_freq);
+                        intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+                        dev_priv->rps.idle_freq);
 
-       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -7279,78 +7309,43 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val
        return 0;
 }
 
-static int vlv_gpu_freq_div(unsigned int czclk_freq)
-{
-       switch (czclk_freq) {
-       case 200:
-               return 10;
-       case 267:
-               return 12;
-       case 320:
-       case 333:
-               return 16;
-       case 400:
-               return 20;
-       default:
-               return -1;
-       }
-}
-
 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-       int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
-       div = vlv_gpu_freq_div(czclk_freq);
-       if (div < 0)
-               return div;
-
-       return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
+       /*
+        * N = val - 0xb7
+        * Slow = Fast = GPLL ref * N
+        */
+       return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
 }
 
 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-       int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
-       mul = vlv_gpu_freq_div(czclk_freq);
-       if (mul < 0)
-               return mul;
-
-       return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
+       return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
 }
 
 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-       int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
-       div = vlv_gpu_freq_div(czclk_freq);
-       if (div < 0)
-               return div;
-       div /= 2;
-
-       return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
+       /*
+        * N = val / 2
+        * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+        */
+       return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
 }
 
 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-       int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
-       mul = vlv_gpu_freq_div(czclk_freq);
-       if (mul < 0)
-               return mul;
-       mul /= 2;
-
        /* CHV needs even values */
-       return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
+       return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
 }
 
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
-       if (IS_GEN9(dev_priv->dev))
+       if (IS_GEN9(dev_priv))
                return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
                                         GEN9_FREQ_SCALER);
-       else if (IS_CHERRYVIEW(dev_priv->dev))
+       else if (IS_CHERRYVIEW(dev_priv))
                return chv_gpu_freq(dev_priv, val);
-       else if (IS_VALLEYVIEW(dev_priv->dev))
+       else if (IS_VALLEYVIEW(dev_priv))
                return byt_gpu_freq(dev_priv, val);
        else
                return val * GT_FREQUENCY_MULTIPLIER;
@@ -7358,12 +7353,12 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
 
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
-       if (IS_GEN9(dev_priv->dev))
+       if (IS_GEN9(dev_priv))
                return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
                                         GT_FREQUENCY_MULTIPLIER);
-       else if (IS_CHERRYVIEW(dev_priv->dev))
+       else if (IS_CHERRYVIEW(dev_priv))
                return chv_freq_opcode(dev_priv, val);
-       else if (IS_VALLEYVIEW(dev_priv->dev))
+       else if (IS_VALLEYVIEW(dev_priv))
                return byt_freq_opcode(dev_priv, val);
        else
                return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
index 38e95185d9c6a995387c2fee23bb4873af282f3d..c3abae4bc59644f2952f030a839c40e635ef11de 100644 (file)
@@ -563,7 +563,7 @@ static void intel_psr_work(struct work_struct *work)
         * PSR might take some time to get fully disabled
         * and be ready for re-enable.
         */
-       if (HAS_DDI(dev_priv->dev)) {
+       if (HAS_DDI(dev_priv)) {
                if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
                              EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
                        DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
index a492bcabd30dc1a93f6513b5323fa426b00112af..41b604e69db79daf63e263fc035cc6a7b547d02f 100644 (file)
@@ -1568,22 +1568,27 @@ pc_render_add_request(struct drm_i915_gem_request *req)
        return 0;
 }
 
-static u32
-gen6_ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
+static void
+gen6_seqno_barrier(struct intel_engine_cs *engine)
 {
        /* Workaround to force correct ordering between irq and seqno writes on
         * ivb (and maybe also on snb) by reading from a CS register (like
-        * ACTHD) before reading the status page. */
-       if (!lazy_coherency) {
-               struct drm_i915_private *dev_priv = engine->dev->dev_private;
-               POSTING_READ(RING_ACTHD(engine->mmio_base));
-       }
-
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+        * ACTHD) before reading the status page.
+        *
+        * Note that this effectively stalls the read by the time it takes to
+        * do a memory transaction, which more or less ensures that the write
+        * from the GPU has sufficient time to invalidate the CPU cacheline.
+        * Alternatively we could delay the interrupt from the CS ring to give
+        * the write time to land, but that would incur a delay after every
+        * batch i.e. much more frequent than a delay when waiting for the
+        * interrupt (with the same net latency).
+        */
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
 }
 
 static u32
-ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *engine)
 {
        return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
@@ -1595,7 +1600,7 @@ ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 }
 
 static u32
-pc_render_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *engine)
 {
        return engine->scratch.cpu_page[0];
 }
@@ -2078,39 +2083,18 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
        if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-               vunmap(ringbuf->virtual_start);
+               i915_gem_object_unpin_map(ringbuf->obj);
        else
                iounmap(ringbuf->virtual_start);
-       ringbuf->virtual_start = NULL;
        ringbuf->vma = NULL;
        i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
 
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
-       struct sg_page_iter sg_iter;
-       struct page **pages;
-       void *addr;
-       int i;
-
-       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-       if (pages == NULL)
-               return NULL;
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-               pages[i++] = sg_page_iter_page(&sg_iter);
-
-       addr = vmap(pages, i, 0, PAGE_KERNEL);
-       drm_free_large(pages);
-
-       return addr;
-}
-
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                                     struct intel_ringbuffer *ringbuf)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_object *obj = ringbuf->obj;
        int ret;
 
@@ -2120,15 +2104,13 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                        return ret;
 
                ret = i915_gem_object_set_to_cpu_domain(obj, true);
-               if (ret) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return ret;
-               }
+               if (ret)
+                       goto err_unpin;
 
-               ringbuf->virtual_start = vmap_obj(obj);
+               ringbuf->virtual_start = i915_gem_object_pin_map(obj);
                if (ringbuf->virtual_start == NULL) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto err_unpin;
                }
        } else {
                ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
@@ -2136,25 +2118,26 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                        return ret;
 
                ret = i915_gem_object_set_to_gtt_domain(obj, true);
-               if (ret) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return ret;
-               }
+               if (ret)
+                       goto err_unpin;
 
                /* Access through the GTT requires the device to be awake. */
                assert_rpm_wakelock_held(dev_priv);
 
-               ringbuf->virtual_start = ioremap_wc(dev_priv->ggtt.mappable_base +
+               ringbuf->virtual_start = ioremap_wc(ggtt->mappable_base +
                                                    i915_gem_obj_ggtt_offset(obj), ringbuf->size);
                if (ringbuf->virtual_start == NULL) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return -EINVAL;
+                       ret = -ENOMEM;
+                       goto err_unpin;
                }
        }
 
        ringbuf->vma = i915_gem_obj_to_ggtt(obj);
-
        return 0;
+
+err_unpin:
+       i915_gem_object_ggtt_unpin(obj);
+       return ret;
 }
 
 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
@@ -2477,11 +2460,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
@@ -2549,17 +2532,36 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine->dev);
 
-       if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
+       /* Our semaphore implementation is strictly monotonic (i.e. we proceed
+        * so long as the semaphore value in the register/page is greater
+        * than the sync value), so whenever we reset the seqno,
+        * so long as we reset the tracking semaphore value to 0, it will
+        * always be before the next request's seqno. If we don't reset
+        * the semaphore value, then when the seqno moves backwards all
+        * future waits will complete instantly (causing rendering corruption).
+        */
+       if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
                I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
                I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
-               if (HAS_VEBOX(dev))
+               if (HAS_VEBOX(dev_priv))
                        I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
        }
+       if (dev_priv->semaphore_obj) {
+               struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
+               struct page *page = i915_gem_object_get_dirty_page(obj, 0);
+               void *semaphores = kmap(page);
+               memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+                      0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+               kunmap(page);
+       }
+       memset(engine->semaphore.sync_seqno, 0,
+              sizeof(engine->semaphore.sync_seqno));
 
        engine->set_seqno(engine, seqno);
+       engine->last_submitted_seqno = seqno;
+
        engine->hangcheck.seqno = seqno;
 }
 
@@ -2799,7 +2801,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                engine->irq_get = gen8_ring_get_irq;
                engine->irq_put = gen8_ring_put_irq;
                engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               engine->get_seqno = gen6_ring_get_seqno;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+               engine->get_seqno = ring_get_seqno;
                engine->set_seqno = ring_set_seqno;
                if (i915_semaphore_is_enabled(dev)) {
                        WARN_ON(!dev_priv->semaphore_obj);
@@ -2816,7 +2819,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                engine->irq_get = gen6_ring_get_irq;
                engine->irq_put = gen6_ring_put_irq;
                engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               engine->get_seqno = gen6_ring_get_seqno;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+               engine->get_seqno = ring_get_seqno;
                engine->set_seqno = ring_set_seqno;
                if (i915_semaphore_is_enabled(dev)) {
                        engine->semaphore.sync_to = gen6_ring_sync;
@@ -2931,7 +2935,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
                        engine->write_tail = gen6_bsd_ring_write_tail;
                engine->flush = gen6_bsd_ring_flush;
                engine->add_request = gen6_add_request;
-               engine->get_seqno = gen6_ring_get_seqno;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+               engine->get_seqno = ring_get_seqno;
                engine->set_seqno = ring_set_seqno;
                if (INTEL_INFO(dev)->gen >= 8) {
                        engine->irq_enable_mask =
@@ -3004,7 +3009,8 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
        engine->mmio_base = GEN8_BSD2_RING_BASE;
        engine->flush = gen6_bsd_ring_flush;
        engine->add_request = gen6_add_request;
-       engine->get_seqno = gen6_ring_get_seqno;
+       engine->irq_seqno_barrier = gen6_seqno_barrier;
+       engine->get_seqno = ring_get_seqno;
        engine->set_seqno = ring_set_seqno;
        engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
@@ -3035,7 +3041,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
        engine->write_tail = ring_write_tail;
        engine->flush = gen6_ring_flush;
        engine->add_request = gen6_add_request;
-       engine->get_seqno = gen6_ring_get_seqno;
+       engine->irq_seqno_barrier = gen6_seqno_barrier;
+       engine->get_seqno = ring_get_seqno;
        engine->set_seqno = ring_set_seqno;
        if (INTEL_INFO(dev)->gen >= 8) {
                engine->irq_enable_mask =
@@ -3093,7 +3100,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
        engine->write_tail = ring_write_tail;
        engine->flush = gen6_ring_flush;
        engine->add_request = gen6_add_request;
-       engine->get_seqno = gen6_ring_get_seqno;
+       engine->irq_seqno_barrier = gen6_seqno_barrier;
+       engine->get_seqno = ring_get_seqno;
        engine->set_seqno = ring_set_seqno;
 
        if (INTEL_INFO(dev)->gen >= 8) {
index 221a94627aab5d09b42a8a990d016f08d42d8e3b..78dc46864a1021d95567a496b45f5dfd5a0a6fd6 100644 (file)
@@ -52,16 +52,15 @@ struct  intel_hw_status_page {
 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  */
-#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define gen8_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SEMAPHORE_OFFSET(__from, __to)                         \
+       (((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
 #define GEN8_SIGNAL_OFFSET(__ring, to)                      \
        (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
-       ((__ring)->id * I915_NUM_ENGINES * i915_semaphore_seqno_size) + \
-       (i915_semaphore_seqno_size * (to)))
-
+        GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
 #define GEN8_WAIT_OFFSET(__ring, from)                      \
        (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
-       ((from) * I915_NUM_ENGINES * i915_semaphore_seqno_size) + \
-       (i915_semaphore_seqno_size * (__ring)->id))
+        GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 
 #define GEN8_RING_SEMAPHORE_INIT(e) do { \
        if (!dev_priv->semaphore_obj) { \
@@ -88,6 +87,7 @@ enum intel_ring_hangcheck_action {
 struct intel_ring_hangcheck {
        u64 acthd;
        u32 seqno;
+       unsigned user_interrupts;
        int score;
        enum intel_ring_hangcheck_action action;
        int deadlock;
@@ -194,8 +194,8 @@ struct  intel_engine_cs {
         * seen value is good enough. Note that the seqno will always be
         * monotonic, even if not coherent.
         */
-       u32             (*get_seqno)(struct intel_engine_cs *ring,
-                                    bool lazy_coherency);
+       void            (*irq_seqno_barrier)(struct intel_engine_cs *ring);
+       u32             (*get_seqno)(struct intel_engine_cs *ring);
        void            (*set_seqno)(struct intel_engine_cs *ring,
                                     u32 seqno);
        int             (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
@@ -266,7 +266,8 @@ struct  intel_engine_cs {
        } semaphore;
 
        /* Execlists */
-       spinlock_t execlist_lock;
+       struct tasklet_struct irq_tasklet;
+       spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
        struct list_head execlist_queue;
        struct list_head execlist_retired_req_list;
        unsigned int next_context_status_buffer;
@@ -305,6 +306,7 @@ struct  intel_engine_cs {
         * inspecting request list.
         */
        u32 last_submitted_seqno;
+       unsigned user_interrupts;
 
        bool gpu_caches_dirty;
 
@@ -383,17 +385,16 @@ intel_ring_sync_index(struct intel_engine_cs *engine,
 static inline void
 intel_flush_status_page(struct intel_engine_cs *engine, int reg)
 {
-       drm_clflush_virt_range(&engine->status_page.page_addr[reg],
-                              sizeof(uint32_t));
+       mb();
+       clflush(&engine->status_page.page_addr[reg]);
+       mb();
 }
 
 static inline u32
-intel_read_status_page(struct intel_engine_cs *engine,
-                      int reg)
+intel_read_status_page(struct intel_engine_cs *engine, int reg)
 {
        /* Ensure that the compiler doesn't optimize away the load. */
-       barrier();
-       return engine->status_page.page_addr[reg];
+       return READ_ONCE(engine->status_page.page_addr[reg]);
 }
 
 static inline void
index d189a0012277ec510d2372b7a2405967cd4f3e6c..80e8bd4b43b52115f056dbf091ec5ed8cf2a05cf 100644 (file)
@@ -2039,17 +2039,17 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
         * The enabling order will be from lower to higher indexed wells,
         * the disabling order is reversed.
         */
-       if (IS_HASWELL(dev_priv->dev)) {
+       if (IS_HASWELL(dev_priv)) {
                set_power_wells(power_domains, hsw_power_wells);
-       } else if (IS_BROADWELL(dev_priv->dev)) {
+       } else if (IS_BROADWELL(dev_priv)) {
                set_power_wells(power_domains, bdw_power_wells);
-       } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
+       } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                set_power_wells(power_domains, skl_power_wells);
-       } else if (IS_BROXTON(dev_priv->dev)) {
+       } else if (IS_BROXTON(dev_priv)) {
                set_power_wells(power_domains, bxt_power_wells);
-       } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+       } else if (IS_CHERRYVIEW(dev_priv)) {
                set_power_wells(power_domains, chv_power_wells);
-       } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+       } else if (IS_VALLEYVIEW(dev_priv)) {
                set_power_wells(power_domains, vlv_power_wells);
        } else {
                set_power_wells(power_domains, i9xx_always_on_power_well);
index 8821533561b125412326196a269829e05e5d3a13..0f3e2303e0e9b08229d29a7cc47172bf0efbeef5 100644 (file)
@@ -1025,8 +1025,8 @@ static uint32_t skl_plane_formats[] = {
 int
 intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 {
-       struct intel_plane *intel_plane;
-       struct intel_plane_state *state;
+       struct intel_plane *intel_plane = NULL;
+       struct intel_plane_state *state = NULL;
        unsigned long possible_crtcs;
        const uint32_t *plane_formats;
        int num_plane_formats;
@@ -1036,13 +1036,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
                return -ENODEV;
 
        intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
-       if (!intel_plane)
-               return -ENOMEM;
+       if (!intel_plane) {
+               ret = -ENOMEM;
+               goto fail;
+       }
 
        state = intel_create_plane_state(&intel_plane->base);
        if (!state) {
-               kfree(intel_plane);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto fail;
        }
        intel_plane->base.state = &state->base;
 
@@ -1097,28 +1099,34 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
                num_plane_formats = ARRAY_SIZE(skl_plane_formats);
                break;
        default:
-               kfree(intel_plane);
-               return -ENODEV;
+               MISSING_CASE(INTEL_INFO(dev)->gen);
+               ret = -ENODEV;
+               goto fail;
        }
 
        intel_plane->pipe = pipe;
        intel_plane->plane = plane;
        intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
        intel_plane->check_plane = intel_check_sprite_plane;
+
        possible_crtcs = (1 << pipe);
+
        ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
                                       &intel_plane_funcs,
                                       plane_formats, num_plane_formats,
                                       DRM_PLANE_TYPE_OVERLAY, NULL);
-       if (ret) {
-               kfree(intel_plane);
-               goto out;
-       }
+       if (ret)
+               goto fail;
 
        intel_create_rotation_property(dev, intel_plane);
 
        drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
 
-out:
+       return 0;
+
+fail:
+       kfree(state);
+       kfree(intel_plane);
+
        return ret;
 }
index 512b7faedefd9552d04ca943b07698b05b5da8e9..ac2ac07b505b069a318ecb4bf5c8a8f13b836cba 100644 (file)
@@ -204,7 +204,7 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 
        /* On VLV, FIFO will be shared by both SW and HW.
         * So, we need to read the FREE_ENTRIES everytime */
-       if (IS_VALLEYVIEW(dev_priv->dev))
+       if (IS_VALLEYVIEW(dev_priv))
                dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
 
        if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
@@ -716,8 +716,8 @@ __gen2_read(64)
        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
        return val
 
-static inline void __force_wake_get(struct drm_i915_private *dev_priv,
-                                   enum forcewake_domains fw_domains)
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+                                    enum forcewake_domains fw_domains)
 {
        struct intel_uncore_forcewake_domain *domain;
        enum forcewake_domain_id id;
@@ -745,7 +745,7 @@ static u##x \
 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
        GEN6_READ_HEADER(x); \
        if (NEEDS_FORCE_WAKE(offset)) \
-               __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+               __force_wake_auto(dev_priv, FORCEWAKE_RENDER); \
        val = __raw_i915_read##x(dev_priv, reg); \
        GEN6_READ_FOOTER; \
 }
@@ -762,7 +762,7 @@ vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
        else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
                fw_engine = FORCEWAKE_MEDIA; \
        if (fw_engine) \
-               __force_wake_get(dev_priv, fw_engine); \
+               __force_wake_auto(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
        GEN6_READ_FOOTER; \
 }
@@ -781,7 +781,7 @@ chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
        else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
                fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
        if (fw_engine) \
-               __force_wake_get(dev_priv, fw_engine); \
+               __force_wake_auto(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
        GEN6_READ_FOOTER; \
 }
@@ -805,7 +805,7 @@ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
        else \
                fw_engine = FORCEWAKE_BLITTER; \
        if (fw_engine) \
-               __force_wake_get(dev_priv, fw_engine); \
+               __force_wake_auto(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
        GEN6_READ_FOOTER; \
 }
@@ -969,7 +969,7 @@ static void \
 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
        GEN6_WRITE_HEADER; \
        if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
-               __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+               __force_wake_auto(dev_priv, FORCEWAKE_RENDER); \
        __raw_i915_write##x(dev_priv, reg, val); \
        GEN6_WRITE_FOOTER; \
 }
@@ -989,7 +989,7 @@ chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
        else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
                fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
        if (fw_engine) \
-               __force_wake_get(dev_priv, fw_engine); \
+               __force_wake_auto(dev_priv, fw_engine); \
        __raw_i915_write##x(dev_priv, reg, val); \
        GEN6_WRITE_FOOTER; \
 }
@@ -1036,7 +1036,7 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
        else \
                fw_engine = FORCEWAKE_BLITTER; \
        if (fw_engine) \
-               __force_wake_get(dev_priv, fw_engine); \
+               __force_wake_auto(dev_priv, fw_engine); \
        __raw_i915_write##x(dev_priv, reg, val); \
        GEN6_WRITE_FOOTER; \
 }
@@ -1161,7 +1161,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (INTEL_INFO(dev_priv->dev)->gen <= 5)
+       if (INTEL_INFO(dev_priv)->gen <= 5)
                return;
 
        if (IS_GEN9(dev)) {
@@ -1673,6 +1673,25 @@ bool intel_has_gpu_reset(struct drm_device *dev)
        return intel_get_gpu_reset(dev) != NULL;
 }
 
+int intel_guc_reset(struct drm_i915_private *dev_priv)
+{
+       int ret;
+       unsigned long irqflags;
+
+       if (!i915.enable_guc_submission)
+               return -EINVAL;
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+       return ret;
+}
+
 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
 {
        return check_for_unclaimed_mmio(dev_priv);
index 749dceab7c0250db49eb07b2f95f75e02d566162..9ff1e960d61774cb356f4875d522e75b9c6f11ba 100644 (file)
@@ -261,9 +261,6 @@ struct old_child_dev_config {
  * versions. Notice that the meaning of the contents contents may still change,
  * but at least the offsets are consistent. */
 
-/* Definitions for flags_1 */
-#define IBOOST_ENABLE (1<<3)
-
 struct common_child_dev_config {
        u16 handle;
        u16 device_type;
@@ -272,9 +269,18 @@ struct common_child_dev_config {
        u8 not_common2[2];
        u8 ddc_pin;
        u16 edid_ptr;
-       u8 obsolete;
-       u8 flags_1;
-       u8 not_common3[13];
+       u8 dvo_cfg; /* See DEVICE_CFG_* above */
+       u8 efp_routed:1;
+       u8 lane_reversal:1;
+       u8 lspcon:1;
+       u8 iboost:1;
+       u8 hpd_invert:1;
+       u8 flag_reserved:3;
+       u8 hdmi_support:1;
+       u8 dp_support:1;
+       u8 tmds_support:1;
+       u8 support_reserved:5;
+       u8 not_common3[12];
        u8 iboost_level;
 } __packed;
 
index e42495ad813632002d5d313da5351e615240f274..70d4e221a3adb485ff7927c33737db23a127b577 100644 (file)
@@ -54,6 +54,25 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
                         GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
 }
 
+static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
+{
+       if (size != 0 && nmemb > SIZE_MAX / size)
+               return NULL;
+
+       if (size * nmemb <= PAGE_SIZE)
+               return kmalloc(nmemb * size, gfp);
+
+       if (gfp & __GFP_RECLAIMABLE) {
+               void *ptr = kmalloc(nmemb * size,
+                                   gfp | __GFP_NOWARN | __GFP_NORETRY);
+               if (ptr)
+                       return ptr;
+       }
+
+       return __vmalloc(size * nmemb,
+                        gfp | __GFP_HIGHMEM, PAGE_KERNEL);
+}
+
 static __inline void drm_free_large(void *ptr)
 {
        kvfree(ptr);
index d1f1d338af2051a07d6df62e5408056ede5acf05..8b51df3ab3347001a535f7444345b5887859691a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/rbtree.h>
 
 struct vm_area_struct;         /* vma defining user mapping in mm_types.h */
+struct notifier_block;         /* in notifier.h */
 
 /* bits in flags of vmalloc's vm_struct below */
 #define VM_IOREMAP             0x00000001      /* ioremap() and friends */
@@ -187,4 +188,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 #define VMALLOC_TOTAL 0UL
 #endif
 
+int register_vmap_purge_notifier(struct notifier_block *nb);
+int unregister_vmap_purge_notifier(struct notifier_block *nb);
+
 #endif /* _LINUX_VMALLOC_H */
index ae7d20b447ffbd74da85e85201ed668d758d9e1b..293889d7f482995daf8d39de6174698fbeea20be 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/debugobjects.h>
 #include <linux/kallsyms.h>
 #include <linux/list.h>
+#include <linux/notifier.h>
 #include <linux/rbtree.h>
 #include <linux/radix-tree.h>
 #include <linux/rcupdate.h>
@@ -344,6 +345,8 @@ static void __insert_vmap_area(struct vmap_area *va)
 
 static void purge_vmap_area_lazy(void);
 
+static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
+
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
@@ -363,6 +366,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
        BUG_ON(offset_in_page(size));
        BUG_ON(!is_power_of_2(align));
 
+       might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+
        va = kmalloc_node(sizeof(struct vmap_area),
                        gfp_mask & GFP_RECLAIM_MASK, node);
        if (unlikely(!va))
@@ -468,6 +473,16 @@ overflow:
                purged = 1;
                goto retry;
        }
+
+       if (gfpflags_allow_blocking(gfp_mask)) {
+               unsigned long freed = 0;
+               blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
+               if (freed > 0) {
+                       purged = 0;
+                       goto retry;
+               }
+       }
+
        if (printk_ratelimit())
                pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
                        size);
@@ -475,6 +490,18 @@ overflow:
        return ERR_PTR(-EBUSY);
 }
 
+int register_vmap_purge_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(&vmap_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
+
+int unregister_vmap_purge_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
+
 static void __free_vmap_area(struct vmap_area *va)
 {
        BUG_ON(RB_EMPTY_NODE(&va->rb_node));