Backmerge tag 'v4.16-rc7' into drm-next
authorDave Airlie <airlied@redhat.com>
Wed, 28 Mar 2018 04:30:41 +0000 (14:30 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 28 Mar 2018 04:30:41 +0000 (14:30 +1000)
Linux 4.16-rc7

This was requested by Daniel, and things were getting
a bit hard to reconcile, most of the conflicts were
trivial though.

36 files changed:
1  2 
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/trace.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/sun4i/sun4i_crtc.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tcon.h
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/plane.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/pci/quirks.c
sound/pci/hda/hda_intel.c

diff --cc MAINTAINERS
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index a55b4975c154b7c65cab88957dbe6a85d53dcb66,d74d6f05c62c4e60d4b1604707cd79ffa992d041..638abe84857c75dd34b8b73d9848b35c8e4e5a83
@@@ -52,29 -52,54 +52,77 @@@ static void set_context_pdp_root_pointe
                pdp_pair[i].val = pdp[7 - i];
  }
  
 +static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 +{
 +      struct intel_vgpu *vgpu = workload->vgpu;
 +      int ring_id = workload->ring_id;
 +      struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
 +      struct drm_i915_gem_object *ctx_obj =
 +              shadow_ctx->engine[ring_id].state->obj;
 +      struct execlist_ring_context *shadow_ring_context;
 +      struct page *page;
 +
 +      if (WARN_ON(!workload->shadow_mm))
 +              return;
 +
 +      if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
 +              return;
 +
 +      page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
 +      shadow_ring_context = kmap(page);
 +      set_context_pdp_root_pointer(shadow_ring_context,
 +                      (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
 +      kunmap(page);
 +}
 +
+ /*
+  * when populating shadow ctx from guest, we should not overrride oa related
+  * registers, so that they will not be overlapped by guest oa configs. Thus
+  * made it possible to capture oa data from host for both host and guests.
+  */
+ static void sr_oa_regs(struct intel_vgpu_workload *workload,
+               u32 *reg_state, bool save)
+ {
+       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
+       u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+       int i = 0;
+       u32 flex_mmio[] = {
+               i915_mmio_reg_offset(EU_PERF_CNTL0),
+               i915_mmio_reg_offset(EU_PERF_CNTL1),
+               i915_mmio_reg_offset(EU_PERF_CNTL2),
+               i915_mmio_reg_offset(EU_PERF_CNTL3),
+               i915_mmio_reg_offset(EU_PERF_CNTL4),
+               i915_mmio_reg_offset(EU_PERF_CNTL5),
+               i915_mmio_reg_offset(EU_PERF_CNTL6),
+       };
+       if (!workload || !reg_state || workload->ring_id != RCS)
+               return;
+       if (save) {
+               workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
+               for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
+                       u32 state_offset = ctx_flexeu0 + i * 2;
+                       workload->flex_mmio[i] = reg_state[state_offset + 1];
+               }
+       } else {
+               reg_state[ctx_oactxctrl] =
+                       i915_mmio_reg_offset(GEN8_OACTXCONTROL);
+               reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
+               for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
+                       u32 state_offset = ctx_flexeu0 + i * 2;
+                       u32 mmio = flex_mmio[i];
+                       reg_state[state_offset] = mmio;
+                       reg_state[state_offset + 1] = workload->flex_mmio[i];
+               }
+       }
+ }
  static int populate_shadow_context(struct intel_vgpu_workload *workload)
  {
        struct intel_vgpu *vgpu = workload->vgpu;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 1a114e380f13ee8e13d970d18d57c8f4f930e093,a818ca4916051ade239efa0f4789d5c3cab36165..c3d92d537240b6cad39ddb38a3fb5cc063de8eda
@@@ -1195,11 -1144,7 +1205,12 @@@ static const struct sun4i_tcon_quirks s
  };
  
  static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
+       .supports_lvds          = true,
 +      .has_channel_0          = true,
 +};
 +
 +static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
 +      .has_channel_1          = true,
  };
  
  static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
index d3a945b7bb604330945cc4f07084b9d0dfc9dd34,278700c7bf9f6f71ec3c85686c81919362aa40fa..161e09427124f29030ff7c7fa9b7db9c21652521
@@@ -176,7 -175,7 +176,8 @@@ struct sun4i_tcon_quirks 
        bool    has_channel_1;  /* a33 does not have channel 1 */
        bool    has_lvds_alt;   /* Does the LVDS clock have a parent other than the TCON clock? */
        bool    needs_de_be_mux; /* sun6i needs mux to select backend */
 +      bool    needs_edp_reset; /* a80 edp reset needed for tcon0 access */
+       bool    supports_lvds;   /* Does the TCON support an LVDS output? */
  
        /* callback to handle tcon muxing options */
        int     (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
Simple merge
Simple merge
Simple merge
Simple merge
index 9e60de95b863d73da96550c1c4d6d0350261f81f,9116fe8baebcab24575a0b6dc329578ac4db0b40..f34f368c1a2ebc1713f35a7aab0f94c83061cbfa
@@@ -947,8 -938,7 +947,9 @@@ int vmw_kms_present(struct vmw_private 
  int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
  void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
 +int vmw_kms_suspend(struct drm_device *dev);
 +int vmw_kms_resume(struct drm_device *dev);
+ void vmw_kms_lost_device(struct drm_device *dev);
  
  int vmw_dumb_create(struct drm_file *file_priv,
                    struct drm_device *dev,
index 3628a9fe705fc4ac416c2baa58587e87f5eff9c0,3c824fd7cbf36d64e72e758bdb7c5b5e3b270dd6..f11601b6fd747cf37db69c841e3450615af406d8
@@@ -2551,10 -2561,11 +2557,12 @@@ int vmw_kms_helper_resource_prepare(str
        if (res->backup) {
                ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
                                                    interruptible,
 -                                                  res->dev_priv->has_mob);
 +                                                  res->dev_priv->has_mob,
 +                                                  false);
                if (ret)
                        goto out_unreserve;
+               ctx->buf = vmw_dmabuf_reference(res->backup);
        }
        ret = vmw_resource_validate(res);
        if (ret)
@@@ -2850,49 -2863,12 +2860,59 @@@ int vmw_kms_set_config(struct drm_mode_
  }
  
  
 +/**
 + * vmw_kms_suspend - Save modesetting state and turn modesetting off.
 + *
 + * @dev: Pointer to the drm device
 + * Return: 0 on success. Negative error code on failure.
 + */
 +int vmw_kms_suspend(struct drm_device *dev)
 +{
 +      struct vmw_private *dev_priv = vmw_priv(dev);
 +
 +      dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
 +      if (IS_ERR(dev_priv->suspend_state)) {
 +              int ret = PTR_ERR(dev_priv->suspend_state);
 +
 +              DRM_ERROR("Failed kms suspend: %d\n", ret);
 +              dev_priv->suspend_state = NULL;
 +
 +              return ret;
 +      }
 +
 +      return 0;
 +}
 +
 +
 +/**
 + * vmw_kms_resume - Re-enable modesetting and restore state
 + *
 + * @dev: Pointer to the drm device
 + * Return: 0 on success. Negative error code on failure.
 + *
 + * State is resumed from a previous vmw_kms_suspend(). It's illegal
 + * to call this function without a previous vmw_kms_suspend().
 + */
 +int vmw_kms_resume(struct drm_device *dev)
 +{
 +      struct vmw_private *dev_priv = vmw_priv(dev);
 +      int ret;
 +
 +      if (WARN_ON(!dev_priv->suspend_state))
 +              return 0;
 +
 +      ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
 +      dev_priv->suspend_state = NULL;
 +
 +      return ret;
 +}
++
+ /**
+  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
+  *
+  * @dev: Pointer to the drm device
+  */
+ void vmw_kms_lost_device(struct drm_device *dev)
+ {
+       drm_atomic_helper_shutdown(dev);
+ }
Simple merge
Simple merge
Simple merge
Simple merge
index ec4e6b829ee279b9926ed6aa731ac67a7a7d03ca,c507c69029e31f9fe8715c384dab8f98114db7bb..e2f649fffd56484f793269582639257036e979cc
@@@ -2201,8 -2224,8 +2228,9 @@@ static int azx_probe_continue(struct az
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
        struct hdac_bus *bus = azx_bus(chip);
        struct pci_dev *pci = chip->pci;
 +      struct hda_codec *codec;
        int dev = chip->dev_index;
+       int val;
        int err;
  
        hda->probe_continued = 1;
        chip->running = 1;
        azx_add_card_list(chip);
  
-       snd_hda_set_power_save(&chip->bus, power_save * 1000);
+       val = power_save;
+ #ifdef CONFIG_PM
+       if (pm_blacklist) {
+               const struct snd_pci_quirk *q;
+               q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+               if (q && val) {
+                       dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+                                q->subvendor, q->subdevice);
+                       val = 0;
+               }
+       }
+ #endif /* CONFIG_PM */
 +      /*
 +       * The discrete GPU cannot power down unless the HDA controller runtime
 +       * suspends, so activate runtime PM on codecs even if power_save == 0.
 +       */
 +      if (use_vga_switcheroo(hda))
 +              list_for_each_codec(codec, &chip->bus)
 +                      codec->auto_runtime_pm = 1;
 +
 -      if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
+       snd_hda_set_power_save(&chip->bus, val * 1000);
 +      if (azx_has_pm_runtime(chip))
                pm_runtime_put_autosuspend(&pci->dev);
  
  out_free: