Merge airlied/drm-next into drm-intel-next-queued
authorRodrigo Vivi <rodrigo.vivi@intel.com>
Fri, 8 Dec 2017 18:15:30 +0000 (10:15 -0800)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Fri, 8 Dec 2017 18:15:30 +0000 (10:15 -0800)
Chris requested this backmerge for a reconciliation on
drm_print.h between drm-misc-next and drm-intel-next-queued

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
56 files changed:
Documentation/gpu/i915.rst
drivers/gpu/drm/i915/gvt/Makefile
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/display.h
drivers/gpu/drm/i915/gvt/dmabuf.c [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/dmabuf.h [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/fb_decoder.c [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/fb_decoder.h [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/hypercall.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mpt.h
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/sched_policy.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_guc.c
drivers/gpu/drm/i915/intel_guc_fw.c
drivers/gpu/drm/i915/intel_guc_fw.h
drivers/gpu/drm/i915/intel_guc_log.c
drivers/gpu/drm/i915/intel_gvt.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_huc.c
drivers/gpu/drm/i915/intel_huc.h
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_uc.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/i915/intel_uc_fw.c
drivers/gpu/drm/i915/intel_uc_fw.h
drivers/gpu/drm/i915/selftests/intel_guc.c
include/uapi/linux/vfio.h

index 21577eabaf78586fb35b3408d2a34cfa820c700f..84021142a8f3cee8e5bb0d3f2da36541686f1280 100644 (file)
@@ -341,10 +341,10 @@ GuC
 GuC-specific firmware loader
 ----------------------------
 
-.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c
    :doc: GuC-specific firmware loader
 
-.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c
    :internal:
 
 GuC-based command submission
index 18e1c172e792365b03d4a3666a351a635a960ed7..883189694eb64e5d8bb501a33aaa9c29ca8ab1cf 100644 (file)
@@ -2,7 +2,8 @@
 GVT_DIR := gvt
 GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
        interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
-       execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o
+       execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o \
+       fb_decoder.o dmabuf.o
 
 ccflags-y                              += -I$(src) -I$(src)/$(GVT_DIR)
 i915-y                                 += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
index 4ce2e6bd06803138a114ffde419411cd27ca8158..97bfc00d2a8204e46244d477edc57c1483e3f767 100644 (file)
@@ -335,7 +335,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        case INTEL_GVT_PCI_OPREGION:
                if (WARN_ON(!IS_ALIGNED(offset, 4)))
                        return -EINVAL;
-               ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+               ret = intel_vgpu_opregion_base_write_handler(vgpu,
+                                                  *(u32 *)p_data);
                if (ret)
                        return ret;
 
index 355120865efd14873726e8eae2e1ec6d6fb31b9f..09185036bac83b236adfe071c0c643205b5530a1 100644 (file)
@@ -67,7 +67,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
        return 1;
 }
 
-static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
index d73de22102e2b77f1c4c166ee0688b86d2e29391..b46b86892d58f1a39199369ee044d603e3d77eb7 100644 (file)
@@ -179,4 +179,6 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
 void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
 
+int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe);
+
 #endif
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
new file mode 100644 (file)
index 0000000..9c40a67
--- /dev/null
@@ -0,0 +1,538 @@
+/*
+ * Copyright 2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Xiaoguang Chen
+ *    Tina Zhang <tina.zhang@intel.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <linux/vfio.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
+
+static int vgpu_gem_get_pages(
+               struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct sg_table *st;
+       struct scatterlist *sg;
+       int i, ret;
+       gen8_pte_t __iomem *gtt_entries;
+       struct intel_vgpu_fb_info *fb_info;
+
+       fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
+       if (WARN_ON(!fb_info))
+               return -ENODEV;
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (unlikely(!st))
+               return -ENOMEM;
+
+       ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+       if (ret) {
+               kfree(st);
+               return ret;
+       }
+       gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+               (fb_info->start >> PAGE_SHIFT);
+       for_each_sg(st->sgl, sg, fb_info->size, i) {
+               sg->offset = 0;
+               sg->length = PAGE_SIZE;
+               sg_dma_address(sg) =
+                       GEN8_DECODE_PTE(readq(&gtt_entries[i]));
+               sg_dma_len(sg) = PAGE_SIZE;
+       }
+
+       __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
+
+       return 0;
+}
+
+static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
+               struct sg_table *pages)
+{
+       sg_free_table(pages);
+       kfree(pages);
+}
+
+static void dmabuf_gem_object_free(struct kref *kref)
+{
+       struct intel_vgpu_dmabuf_obj *obj =
+               container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
+       struct intel_vgpu *vgpu = obj->vgpu;
+       struct list_head *pos;
+       struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+
+       if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+               list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+                       dmabuf_obj = container_of(pos,
+                                       struct intel_vgpu_dmabuf_obj, list);
+                       if (dmabuf_obj == obj) {
+                               intel_gvt_hypervisor_put_vfio_device(vgpu);
+                               idr_remove(&vgpu->object_idr,
+                                          dmabuf_obj->dmabuf_id);
+                               kfree(dmabuf_obj->info);
+                               kfree(dmabuf_obj);
+                               list_del(pos);
+                               break;
+                       }
+               }
+       } else {
+               /* Free the orphan dmabuf_objs here */
+               kfree(obj->info);
+               kfree(obj);
+       }
+}
+
+
+static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
+{
+       kref_get(&obj->kref);
+}
+
+static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
+{
+       kref_put(&obj->kref, dmabuf_gem_object_free);
+}
+
+static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
+{
+
+       struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
+       struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
+       struct intel_vgpu *vgpu = obj->vgpu;
+
+       if (vgpu) {
+               mutex_lock(&vgpu->dmabuf_lock);
+               gem_obj->base.dma_buf = NULL;
+               dmabuf_obj_put(obj);
+               mutex_unlock(&vgpu->dmabuf_lock);
+       } else {
+               /* vgpu is NULL, as it has been removed already */
+               gem_obj->base.dma_buf = NULL;
+               dmabuf_obj_put(obj);
+       }
+}
+
+static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
+       .flags = I915_GEM_OBJECT_IS_PROXY,
+       .get_pages = vgpu_gem_get_pages,
+       .put_pages = vgpu_gem_put_pages,
+       .release = vgpu_gem_release,
+};
+
+static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
+               struct intel_vgpu_fb_info *info)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_gem_object *obj;
+
+       obj = i915_gem_object_alloc(dev_priv);
+       if (obj == NULL)
+               return NULL;
+
+       drm_gem_private_object_init(dev, &obj->base,
+               info->size << PAGE_SHIFT);
+       i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+
+       obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+       obj->base.write_domain = 0;
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               unsigned int tiling_mode = 0;
+               unsigned int stride = 0;
+
+               switch (info->drm_format_mod << 10) {
+               case PLANE_CTL_TILED_LINEAR:
+                       tiling_mode = I915_TILING_NONE;
+                       break;
+               case PLANE_CTL_TILED_X:
+                       tiling_mode = I915_TILING_X;
+                       stride = info->stride;
+                       break;
+               case PLANE_CTL_TILED_Y:
+                       tiling_mode = I915_TILING_Y;
+                       stride = info->stride;
+                       break;
+               default:
+                       gvt_dbg_core("not supported tiling mode\n");
+               }
+               obj->tiling_and_stride = tiling_mode | stride;
+       } else {
+               obj->tiling_and_stride = info->drm_format_mod ?
+                                       I915_TILING_X : 0;
+       }
+
+       return obj;
+}
+
+static int vgpu_get_plane_info(struct drm_device *dev,
+               struct intel_vgpu *vgpu,
+               struct intel_vgpu_fb_info *info,
+               int plane_id)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_vgpu_primary_plane_format p;
+       struct intel_vgpu_cursor_plane_format c;
+       int ret;
+
+       if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
+               ret = intel_vgpu_decode_primary_plane(vgpu, &p);
+               if (ret)
+                       return ret;
+               info->start = p.base;
+               info->start_gpa = p.base_gpa;
+               info->width = p.width;
+               info->height = p.height;
+               info->stride = p.stride;
+               info->drm_format = p.drm_format;
+               info->drm_format_mod = p.tiled;
+               info->size = (((p.stride * p.height * p.bpp) / 8) +
+                               (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+       } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
+               ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
+               if (ret)
+                       return ret;
+               info->start = c.base;
+               info->start_gpa = c.base_gpa;
+               info->width = c.width;
+               info->height = c.height;
+               info->stride = c.width * (c.bpp / 8);
+               info->drm_format = c.drm_format;
+               info->drm_format_mod = 0;
+               info->x_pos = c.x_pos;
+               info->y_pos = c.y_pos;
+
+               /* The invalid cursor hotspot value is delivered to host
+                * until we find a way to get the cursor hotspot info of
+                * guest OS.
+                */
+               info->x_hot = UINT_MAX;
+               info->y_hot = UINT_MAX;
+               info->size = (((info->stride * c.height * c.bpp) / 8)
+                               + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+       } else {
+               gvt_vgpu_err("invalid plane id:%d\n", plane_id);
+               return -EINVAL;
+       }
+
+       if (info->size == 0) {
+               gvt_vgpu_err("fb size is zero\n");
+               return -EINVAL;
+       }
+
+       if (info->start & (PAGE_SIZE - 1)) {
+               gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
+               return -EFAULT;
+       }
+       if (((info->start >> PAGE_SHIFT) + info->size) >
+               ggtt_total_entries(&dev_priv->ggtt)) {
+               gvt_vgpu_err("Invalid GTT offset or size\n");
+               return -EFAULT;
+       }
+
+       if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
+               gvt_vgpu_err("invalid gma addr\n");
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+static struct intel_vgpu_dmabuf_obj *
+pick_dmabuf_by_info(struct intel_vgpu *vgpu,
+                   struct intel_vgpu_fb_info *latest_info)
+{
+       struct list_head *pos;
+       struct intel_vgpu_fb_info *fb_info;
+       struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
+       struct intel_vgpu_dmabuf_obj *ret = NULL;
+
+       list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+               dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+                                               list);
+               if ((dmabuf_obj == NULL) ||
+                   (dmabuf_obj->info == NULL))
+                       continue;
+
+               fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
+               if ((fb_info->start == latest_info->start) &&
+                   (fb_info->start_gpa == latest_info->start_gpa) &&
+                   (fb_info->size == latest_info->size) &&
+                   (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
+                   (fb_info->drm_format == latest_info->drm_format) &&
+                   (fb_info->width == latest_info->width) &&
+                   (fb_info->height == latest_info->height)) {
+                       ret = dmabuf_obj;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static struct intel_vgpu_dmabuf_obj *
+pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
+{
+       struct list_head *pos;
+       struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
+       struct intel_vgpu_dmabuf_obj *ret = NULL;
+
+       list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+               dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+                                               list);
+               if (!dmabuf_obj)
+                       continue;
+
+               if (dmabuf_obj->dmabuf_id == id) {
+                       ret = dmabuf_obj;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
+                     struct intel_vgpu_fb_info *fb_info)
+{
+       gvt_dmabuf->drm_format = fb_info->drm_format;
+       gvt_dmabuf->width = fb_info->width;
+       gvt_dmabuf->height = fb_info->height;
+       gvt_dmabuf->stride = fb_info->stride;
+       gvt_dmabuf->size = fb_info->size;
+       gvt_dmabuf->x_pos = fb_info->x_pos;
+       gvt_dmabuf->y_pos = fb_info->y_pos;
+       gvt_dmabuf->x_hot = fb_info->x_hot;
+       gvt_dmabuf->y_hot = fb_info->y_hot;
+}
+
+int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
+{
+       struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+       struct vfio_device_gfx_plane_info *gfx_plane_info = args;
+       struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+       struct intel_vgpu_fb_info fb_info;
+       int ret = 0;
+
+       if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
+                                      VFIO_GFX_PLANE_TYPE_PROBE))
+               return ret;
+       else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
+                       (!gfx_plane_info->flags))
+               return -EINVAL;
+
+       ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
+                                       gfx_plane_info->drm_plane_type);
+       if (ret != 0)
+               goto out;
+
+       mutex_lock(&vgpu->dmabuf_lock);
+       /* If exists, pick up the exposed dmabuf_obj */
+       dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
+       if (dmabuf_obj) {
+               update_fb_info(gfx_plane_info, &fb_info);
+               gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
+
+               /* This buffer may be released between query_plane ioctl and
+                * get_dmabuf ioctl. Add the refcount to make sure it won't
+                * be released between the two ioctls.
+                */
+               if (!dmabuf_obj->initref) {
+                       dmabuf_obj->initref = true;
+                       dmabuf_obj_get(dmabuf_obj);
+               }
+               ret = 0;
+               gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
+                           vgpu->id, kref_read(&dmabuf_obj->kref),
+                           gfx_plane_info->dmabuf_id);
+               mutex_unlock(&vgpu->dmabuf_lock);
+               goto out;
+       }
+
+       mutex_unlock(&vgpu->dmabuf_lock);
+
+       /* Need to allocate a new one*/
+       dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
+       if (unlikely(!dmabuf_obj)) {
+               gvt_vgpu_err("alloc dmabuf_obj failed\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
+                                  GFP_KERNEL);
+       if (unlikely(!dmabuf_obj->info)) {
+               gvt_vgpu_err("allocate intel vgpu fb info failed\n");
+               ret = -ENOMEM;
+               goto out_free_dmabuf;
+       }
+       memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
+
+       ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
+
+       dmabuf_obj->vgpu = vgpu;
+
+       ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
+       if (ret < 0)
+               goto out_free_info;
+       gfx_plane_info->dmabuf_id = ret;
+       dmabuf_obj->dmabuf_id = ret;
+
+       dmabuf_obj->initref = true;
+
+       kref_init(&dmabuf_obj->kref);
+
+       mutex_lock(&vgpu->dmabuf_lock);
+       if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
+               gvt_vgpu_err("get vfio device failed\n");
+               mutex_unlock(&vgpu->dmabuf_lock);
+               goto out_free_info;
+       }
+       mutex_unlock(&vgpu->dmabuf_lock);
+
+       update_fb_info(gfx_plane_info, &fb_info);
+
+       INIT_LIST_HEAD(&dmabuf_obj->list);
+       mutex_lock(&vgpu->dmabuf_lock);
+       list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
+       mutex_unlock(&vgpu->dmabuf_lock);
+
+       gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
+                   __func__, kref_read(&dmabuf_obj->kref), ret);
+
+       return 0;
+
+out_free_info:
+       kfree(dmabuf_obj->info);
+out_free_dmabuf:
+       kfree(dmabuf_obj);
+out:
+       /* ENODEV means plane isn't ready, which might be a normal case. */
+       return (ret == -ENODEV) ? 0 : ret;
+}
+
+/* To associate an exposed dmabuf with the dmabuf_obj */
+int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
+{
+       struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+       struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+       struct drm_i915_gem_object *obj;
+       struct dma_buf *dmabuf;
+       int dmabuf_fd;
+       int ret = 0;
+
+       mutex_lock(&vgpu->dmabuf_lock);
+
+       dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
+       if (dmabuf_obj == NULL) {
+               gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       obj = vgpu_create_gem(dev, dmabuf_obj->info);
+       if (obj == NULL) {
+               gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id);
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       obj->gvt_info = dmabuf_obj->info;
+
+       dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
+       if (IS_ERR(dmabuf)) {
+               gvt_vgpu_err("export dma-buf failed\n");
+               ret = PTR_ERR(dmabuf);
+               goto out_free_gem;
+       }
+       obj->base.dma_buf = dmabuf;
+
+       i915_gem_object_put(obj);
+
+       ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
+       if (ret < 0) {
+               gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
+               goto out_free_dmabuf;
+       }
+       dmabuf_fd = ret;
+
+       dmabuf_obj_get(dmabuf_obj);
+
+       if (dmabuf_obj->initref) {
+               dmabuf_obj->initref = false;
+               dmabuf_obj_put(dmabuf_obj);
+       }
+
+       mutex_unlock(&vgpu->dmabuf_lock);
+
+       gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
+                   "        file count: %ld, GEM ref: %d\n",
+                   vgpu->id, dmabuf_obj->dmabuf_id,
+                   kref_read(&dmabuf_obj->kref),
+                   dmabuf_fd,
+                   file_count(dmabuf->file),
+                   kref_read(&obj->base.refcount));
+
+       return dmabuf_fd;
+
+out_free_dmabuf:
+       dma_buf_put(dmabuf);
+out_free_gem:
+       i915_gem_object_put(obj);
+out:
+       mutex_unlock(&vgpu->dmabuf_lock);
+       return ret;
+}
+
+void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
+{
+       struct list_head *pos, *n;
+       struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+
+       mutex_lock(&vgpu->dmabuf_lock);
+       list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
+               dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+                                               list);
+               if (dmabuf_obj->initref) {
+                       dmabuf_obj->initref = false;
+                       dmabuf_obj_put(dmabuf_obj);
+               }
+
+               idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
+
+               if (dmabuf_obj->vgpu)
+                       intel_gvt_hypervisor_put_vfio_device(vgpu);
+
+               list_del(pos);
+               dmabuf_obj->vgpu = NULL;
+
+       }
+       mutex_unlock(&vgpu->dmabuf_lock);
+}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.h b/drivers/gpu/drm/i915/gvt/dmabuf.h
new file mode 100644 (file)
index 0000000..5f8f03f
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ *    Xiaoguang Chen
+ *    Tina Zhang <tina.zhang@intel.com>
+ */
+
+#ifndef _GVT_DMABUF_H_
+#define _GVT_DMABUF_H_
+#include <linux/vfio.h>
+
+struct intel_vgpu_fb_info {
+       __u64 start;
+       __u64 start_gpa;
+       __u64 drm_format_mod;
+       __u32 drm_format;       /* drm format of plane */
+       __u32 width;    /* width of plane */
+       __u32 height;   /* height of plane */
+       __u32 stride;   /* stride of plane */
+       __u32 size;     /* size of plane in bytes, align on page */
+       __u32 x_pos;    /* horizontal position of cursor plane */
+       __u32 y_pos;    /* vertical position of cursor plane */
+       __u32 x_hot;    /* horizontal position of cursor hotspot */
+       __u32 y_hot;    /* vertical position of cursor hotspot */
+       struct intel_vgpu_dmabuf_obj *obj;
+};
+
+/**
+ * struct intel_vgpu_dmabuf_obj- Intel vGPU device buffer object
+ */
+struct intel_vgpu_dmabuf_obj {
+       struct intel_vgpu *vgpu;
+       struct intel_vgpu_fb_info *info;
+       __u32 dmabuf_id;
+       struct kref kref;
+       bool initref;
+       struct list_head list;
+};
+
+int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
+int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id);
+void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
+
+#endif
index c9fa0fb488d3a549e3e2293539dde2d70ae16555..769c1c24ae7598e9299c1301a93e7fce284de29f 100644 (file)
@@ -458,7 +458,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
        gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
                        emulate_schedule_in);
 
-       queue_workload(workload);
+       intel_vgpu_queue_workload(workload);
        return 0;
 }
 
@@ -528,7 +528,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
-void clean_execlist(struct intel_vgpu *vgpu)
+static void clean_execlist(struct intel_vgpu *vgpu)
 {
        enum intel_engine_id i;
        struct intel_engine_cs *engine;
@@ -542,7 +542,7 @@ void clean_execlist(struct intel_vgpu *vgpu)
        }
 }
 
-void reset_execlist(struct intel_vgpu *vgpu,
+static void reset_execlist(struct intel_vgpu *vgpu,
                unsigned long engine_mask)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -553,7 +553,7 @@ void reset_execlist(struct intel_vgpu *vgpu,
                init_vgpu_execlist(vgpu, engine->id);
 }
 
-int init_execlist(struct intel_vgpu *vgpu)
+static int init_execlist(struct intel_vgpu *vgpu)
 {
        reset_execlist(vgpu, ALL_ENGINES);
        return 0;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
new file mode 100644 (file)
index 0000000..72f4217
--- /dev/null
@@ -0,0 +1,508 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ *    Bing Niu <bing.niu@intel.com>
+ *    Xu Han <xu.han@intel.com>
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Xiaoguang Chen <xiaoguang.chen@intel.com>
+ *    Yang Liu <yang2.liu@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include <uapi/drm/drm_fourcc.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define PRIMARY_FORMAT_NUM     16
+struct pixel_format {
+       int     drm_format;     /* Pixel format in DRM definition */
+       int     bpp;            /* Bits per pixel, 0 indicates invalid */
+       char    *desc;          /* The description */
+};
+
+static struct pixel_format bdw_pixel_formats[] = {
+       {DRM_FORMAT_C8, 8, "8-bit Indexed"},
+       {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
+       {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
+       {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
+
+       {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
+       {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
+
+       /* non-supported format has bpp default to 0 */
+       {0, 0, NULL},
+};
+
+static struct pixel_format skl_pixel_formats[] = {
+       {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"},
+       {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"},
+       {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"},
+       {DRM_FORMAT_VYUY, 16, "16-bit packed VYUY (8:8:8:8 MSB-Y2:U:Y1:V)"},
+
+       {DRM_FORMAT_C8, 8, "8-bit Indexed"},
+       {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
+       {DRM_FORMAT_ABGR8888, 32, "32-bit RGBA (8:8:8:8 MSB-A:B:G:R)"},
+       {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
+
+       {DRM_FORMAT_ARGB8888, 32, "32-bit BGRA (8:8:8:8 MSB-A:R:G:B)"},
+       {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
+       {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
+       {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
+
+       /* non-supported format has bpp default to 0 */
+       {0, 0, NULL},
+};
+
+static int bdw_format_to_drm(int format)
+{
+       int bdw_pixel_formats_index = 6;
+
+       switch (format) {
+       case DISPPLANE_8BPP:
+               bdw_pixel_formats_index = 0;
+               break;
+       case DISPPLANE_BGRX565:
+               bdw_pixel_formats_index = 1;
+               break;
+       case DISPPLANE_BGRX888:
+               bdw_pixel_formats_index = 2;
+               break;
+       case DISPPLANE_RGBX101010:
+               bdw_pixel_formats_index = 3;
+               break;
+       case DISPPLANE_BGRX101010:
+               bdw_pixel_formats_index = 4;
+               break;
+       case DISPPLANE_RGBX888:
+               bdw_pixel_formats_index = 5;
+               break;
+
+       default:
+               break;
+       }
+
+       return bdw_pixel_formats_index;
+}
+
+static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
+       int yuv_order)
+{
+       int skl_pixel_formats_index = 12;
+
+       switch (format) {
+       case PLANE_CTL_FORMAT_INDEXED:
+               skl_pixel_formats_index = 4;
+               break;
+       case PLANE_CTL_FORMAT_RGB_565:
+               skl_pixel_formats_index = 5;
+               break;
+       case PLANE_CTL_FORMAT_XRGB_8888:
+               if (rgb_order)
+                       skl_pixel_formats_index = alpha ? 6 : 7;
+               else
+                       skl_pixel_formats_index = alpha ? 8 : 9;
+               break;
+       case PLANE_CTL_FORMAT_XRGB_2101010:
+               skl_pixel_formats_index = rgb_order ? 10 : 11;
+               break;
+       case PLANE_CTL_FORMAT_YUV422:
+               skl_pixel_formats_index = yuv_order >> 16;
+               if (skl_pixel_formats_index > 3)
+                       return -EINVAL;
+               break;
+
+       default:
+               break;
+       }
+
+       return skl_pixel_formats_index;
+}
+
+static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
+       u32 tiled, int stride_mask, int bpp)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       u32 stride_reg = vgpu_vreg(vgpu, DSPSTRIDE(pipe)) & stride_mask;
+       u32 stride = stride_reg;
+
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               switch (tiled) {
+               case PLANE_CTL_TILED_LINEAR:
+                       stride = stride_reg * 64;
+                       break;
+               case PLANE_CTL_TILED_X:
+                       stride = stride_reg * 512;
+                       break;
+               case PLANE_CTL_TILED_Y:
+                       stride = stride_reg * 128;
+                       break;
+               case PLANE_CTL_TILED_YF:
+                       if (bpp == 8)
+                               stride = stride_reg * 64;
+                       else if (bpp == 16 || bpp == 32 || bpp == 64)
+                               stride = stride_reg * 128;
+                       else
+                               gvt_dbg_core("skl: unsupported bpp:%d\n", bpp);
+                       break;
+               default:
+                       gvt_dbg_core("skl: unsupported tile format:%x\n",
+                               tiled);
+               }
+       }
+
+       return stride;
+}
+
+static int get_active_pipe(struct intel_vgpu *vgpu)
+{
+       int i;
+
+       for (i = 0; i < I915_MAX_PIPES; i++)
+               if (pipe_is_enabled(vgpu, i))
+                       break;
+
+       return i;
+}
+
+/**
+ * intel_vgpu_decode_primary_plane - Decode primary plane
+ * @vgpu: input vgpu
+ * @plane: primary plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+       struct intel_vgpu_primary_plane_format *plane)
+{
+       u32 val, fmt;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       int pipe;
+
+       pipe = get_active_pipe(vgpu);
+       if (pipe >= I915_MAX_PIPES)
+               return -ENODEV;
+
+       val = vgpu_vreg(vgpu, DSPCNTR(pipe));
+       plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
+       if (!plane->enabled)
+               return -ENODEV;
+
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
+               _PLANE_CTL_TILED_SHIFT;
+               fmt = skl_format_to_drm(
+                       val & PLANE_CTL_FORMAT_MASK,
+                       val & PLANE_CTL_ORDER_RGBX,
+                       val & PLANE_CTL_ALPHA_MASK,
+                       val & PLANE_CTL_YUV422_ORDER_MASK);
+               plane->bpp = skl_pixel_formats[fmt].bpp;
+               plane->drm_format = skl_pixel_formats[fmt].drm_format;
+       } else {
+               plane->tiled = !!(val & DISPPLANE_TILED);
+               fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
+               plane->bpp = bdw_pixel_formats[fmt].bpp;
+               plane->drm_format = bdw_pixel_formats[fmt].drm_format;
+       }
+
+       if (!plane->bpp) {
+               gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
+               return -EINVAL;
+       }
+
+       plane->hw_format = fmt;
+
+       plane->base = vgpu_vreg(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
+       if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+               gvt_vgpu_err("invalid gma address: %lx\n",
+                            (unsigned long)plane->base);
+               return  -EINVAL;
+       }
+
+       plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+       if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+               gvt_vgpu_err("invalid gma address: %lx\n",
+                               (unsigned long)plane->base);
+               return  -EINVAL;
+       }
+
+       plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+               (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+                       (_PRI_PLANE_STRIDE_MASK >> 6) :
+                               _PRI_PLANE_STRIDE_MASK, plane->bpp);
+
+       plane->width = (vgpu_vreg(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
+               _PIPE_H_SRCSZ_SHIFT;
+       plane->width += 1;
+       plane->height = (vgpu_vreg(vgpu, PIPESRC(pipe)) &
+                       _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
+       plane->height += 1;     /* raw height is one minus the real value */
+
+       val = vgpu_vreg(vgpu, DSPTILEOFF(pipe));
+       plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
+               _PRI_PLANE_X_OFF_SHIFT;
+       plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
+               _PRI_PLANE_Y_OFF_SHIFT;
+
+       return 0;
+}
+
+#define CURSOR_FORMAT_NUM      (1 << 6)
+struct cursor_mode_format {
+       int     drm_format;     /* Pixel format in DRM definition */
+       u8      bpp;            /* Bits per pixel; 0 indicates invalid */
+       u32     width;          /* In pixel */
+       u32     height;         /* In lines */
+       char    *desc;          /* The description */
+};
+
+static struct cursor_mode_format cursor_pixel_formats[] = {
+       {DRM_FORMAT_ARGB8888, 32, 128, 128, "128x128 32bpp ARGB"},
+       {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"},
+       {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
+       {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
+
+       /* non-supported format has bpp default to 0 */
+       {0, 0, 0, 0, NULL},
+};
+
+static int cursor_mode_to_drm(int mode)
+{
+       int cursor_pixel_formats_index = 4;
+
+       switch (mode) {
+       case CURSOR_MODE_128_ARGB_AX:
+               cursor_pixel_formats_index = 0;
+               break;
+       case CURSOR_MODE_256_ARGB_AX:
+               cursor_pixel_formats_index = 1;
+               break;
+       case CURSOR_MODE_64_ARGB_AX:
+               cursor_pixel_formats_index = 2;
+               break;
+       case CURSOR_MODE_64_32B_AX:
+               cursor_pixel_formats_index = 3;
+               break;
+
+       default:
+               break;
+       }
+
+       return cursor_pixel_formats_index;
+}
+
+/**
+ * intel_vgpu_decode_cursor_plane - Decode sprite plane
+ * @vgpu: input vgpu
+ * @plane: cursor plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
+       struct intel_vgpu_cursor_plane_format *plane)
+{
+       u32 val, mode, index;
+       u32 alpha_plane, alpha_force;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       int pipe;
+
+       pipe = get_active_pipe(vgpu);
+       if (pipe >= I915_MAX_PIPES)
+               return -ENODEV;
+
+       val = vgpu_vreg(vgpu, CURCNTR(pipe));
+       mode = val & CURSOR_MODE;
+       plane->enabled = (mode != CURSOR_MODE_DISABLE);
+       if (!plane->enabled)
+               return -ENODEV;
+
+       index = cursor_mode_to_drm(mode);
+
+       if (!cursor_pixel_formats[index].bpp) {
+               gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode);
+               return -EINVAL;
+       }
+       plane->mode = mode;
+       plane->bpp = cursor_pixel_formats[index].bpp;
+       plane->drm_format = cursor_pixel_formats[index].drm_format;
+       plane->width = cursor_pixel_formats[index].width;
+       plane->height = cursor_pixel_formats[index].height;
+
+       alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >>
+                               _CURSOR_ALPHA_PLANE_SHIFT;
+       alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >>
+                               _CURSOR_ALPHA_FORCE_SHIFT;
+       if (alpha_plane || alpha_force)
+               gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
+                       alpha_plane, alpha_force);
+
+       plane->base = vgpu_vreg(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
+       if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+               gvt_vgpu_err("invalid gma address: %lx\n",
+                            (unsigned long)plane->base);
+               return  -EINVAL;
+       }
+
+       plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+       if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+               gvt_vgpu_err("invalid gma address: %lx\n",
+                               (unsigned long)plane->base);
+               return  -EINVAL;
+       }
+
+       val = vgpu_vreg(vgpu, CURPOS(pipe));
+       plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
+       plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
+       plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
+       plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+
+       return 0;
+}
+
+#define SPRITE_FORMAT_NUM      (1 << 3)
+
+static struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = {
+       [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"},
+       [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"},
+       [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"},
+       [0x4] = {DRM_FORMAT_AYUV, 32,
+               "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"},
+};
+
+/**
+ * intel_vgpu_decode_sprite_plane - Decode sprite plane
+ * @vgpu: input vgpu
+ * @plane: sprite plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
+       struct intel_vgpu_sprite_plane_format *plane)
+{
+       u32 val, fmt;
+       u32 color_order, yuv_order;
+       int drm_format;
+       int pipe;
+
+       pipe = get_active_pipe(vgpu);
+       if (pipe >= I915_MAX_PIPES)
+               return -ENODEV;
+
+       val = vgpu_vreg(vgpu, SPRCTL(pipe));
+       plane->enabled = !!(val & SPRITE_ENABLE);
+       if (!plane->enabled)
+               return -ENODEV;
+
+       plane->tiled = !!(val & SPRITE_TILED);
+       color_order = !!(val & SPRITE_RGB_ORDER_RGBX);
+       yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >>
+                               _SPRITE_YUV_ORDER_SHIFT;
+
+       fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT;
+       if (!sprite_pixel_formats[fmt].bpp) {
+               gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
+               return -EINVAL;
+       }
+       plane->hw_format = fmt;
+       plane->bpp = sprite_pixel_formats[fmt].bpp;
+       drm_format = sprite_pixel_formats[fmt].drm_format;
+
+       /* Order of RGB values in an RGBxxx buffer may be ordered RGB or
+        * BGR depending on the state of the color_order field
+        */
+       if (!color_order) {
+               if (drm_format == DRM_FORMAT_XRGB2101010)
+                       drm_format = DRM_FORMAT_XBGR2101010;
+               else if (drm_format == DRM_FORMAT_XRGB8888)
+                       drm_format = DRM_FORMAT_XBGR8888;
+       }
+
+       if (drm_format == DRM_FORMAT_YUV422) {
+               switch (yuv_order) {
+               case 0:
+                       drm_format = DRM_FORMAT_YUYV;
+                       break;
+               case 1:
+                       drm_format = DRM_FORMAT_UYVY;
+                       break;
+               case 2:
+                       drm_format = DRM_FORMAT_YVYU;
+                       break;
+               case 3:
+                       drm_format = DRM_FORMAT_VYUY;
+                       break;
+               default:
+                       /* yuv_order has only 2 bits */
+                       break;
+               }
+       }
+
+       plane->drm_format = drm_format;
+
+       plane->base = vgpu_vreg(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
+       if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+               gvt_vgpu_err("invalid gma address: %lx\n",
+                            (unsigned long)plane->base);
+               return  -EINVAL;
+       }
+
+       plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+       if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+               gvt_vgpu_err("invalid gma address: %lx\n",
+                               (unsigned long)plane->base);
+               return  -EINVAL;
+       }
+
+       plane->stride = vgpu_vreg(vgpu, SPRSTRIDE(pipe)) &
+                               _SPRITE_STRIDE_MASK;
+
+       val = vgpu_vreg(vgpu, SPRSIZE(pipe));
+       plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
+               _SPRITE_SIZE_HEIGHT_SHIFT;
+       plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
+               _SPRITE_SIZE_WIDTH_SHIFT;
+       plane->height += 1;     /* raw height is one minus the real value */
+       plane->width += 1;      /* raw width is one minus the real value */
+
+       val = vgpu_vreg(vgpu, SPRPOS(pipe));
+       plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
+       plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
+
+       val = vgpu_vreg(vgpu, SPROFFSET(pipe));
+       plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
+                          _SPRITE_OFFSET_START_X_SHIFT;
+       plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
+                          _SPRITE_OFFSET_START_Y_SHIFT;
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
new file mode 100644 (file)
index 0000000..cb055f3
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ *    Bing Niu <bing.niu@intel.com>
+ *    Xu Han <xu.han@intel.com>
+ *    Ping Gao <ping.a.gao@intel.com>
+ *    Xiaoguang Chen <xiaoguang.chen@intel.com>
+ *    Yang Liu <yang2.liu@intel.com>
+ *    Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_FB_DECODER_H_
+#define _GVT_FB_DECODER_H_
+
+#define _PLANE_CTL_FORMAT_SHIFT                24
+#define _PLANE_CTL_TILED_SHIFT         10
+#define _PIPE_V_SRCSZ_SHIFT            0
+#define _PIPE_V_SRCSZ_MASK             (0xfff << _PIPE_V_SRCSZ_SHIFT)
+#define _PIPE_H_SRCSZ_SHIFT            16
+#define _PIPE_H_SRCSZ_MASK             (0x1fff << _PIPE_H_SRCSZ_SHIFT)
+
+#define _PRI_PLANE_FMT_SHIFT           26
+#define _PRI_PLANE_STRIDE_MASK         (0x3ff << 6)
+#define _PRI_PLANE_X_OFF_SHIFT         0
+#define _PRI_PLANE_X_OFF_MASK          (0x1fff << _PRI_PLANE_X_OFF_SHIFT)
+#define _PRI_PLANE_Y_OFF_SHIFT         16
+#define _PRI_PLANE_Y_OFF_MASK          (0xfff << _PRI_PLANE_Y_OFF_SHIFT)
+
+#define _CURSOR_MODE                   0x3f
+#define _CURSOR_ALPHA_FORCE_SHIFT      8
+#define _CURSOR_ALPHA_FORCE_MASK       (0x3 << _CURSOR_ALPHA_FORCE_SHIFT)
+#define _CURSOR_ALPHA_PLANE_SHIFT      10
+#define _CURSOR_ALPHA_PLANE_MASK       (0x3 << _CURSOR_ALPHA_PLANE_SHIFT)
+#define _CURSOR_POS_X_SHIFT            0
+#define _CURSOR_POS_X_MASK             (0x1fff << _CURSOR_POS_X_SHIFT)
+#define _CURSOR_SIGN_X_SHIFT           15
+#define _CURSOR_SIGN_X_MASK            (1 << _CURSOR_SIGN_X_SHIFT)
+#define _CURSOR_POS_Y_SHIFT            16
+#define _CURSOR_POS_Y_MASK             (0xfff << _CURSOR_POS_Y_SHIFT)
+#define _CURSOR_SIGN_Y_SHIFT           31
+#define _CURSOR_SIGN_Y_MASK            (1 << _CURSOR_SIGN_Y_SHIFT)
+
+#define _SPRITE_FMT_SHIFT              25
+#define _SPRITE_COLOR_ORDER_SHIFT      20
+#define _SPRITE_YUV_ORDER_SHIFT                16
+#define _SPRITE_STRIDE_SHIFT           6
+#define _SPRITE_STRIDE_MASK            (0x1ff << _SPRITE_STRIDE_SHIFT)
+#define _SPRITE_SIZE_WIDTH_SHIFT       0
+#define _SPRITE_SIZE_HEIGHT_SHIFT      16
+#define _SPRITE_SIZE_WIDTH_MASK                (0x1fff << _SPRITE_SIZE_WIDTH_SHIFT)
+#define _SPRITE_SIZE_HEIGHT_MASK       (0xfff << _SPRITE_SIZE_HEIGHT_SHIFT)
+#define _SPRITE_POS_X_SHIFT            0
+#define _SPRITE_POS_Y_SHIFT            16
+#define _SPRITE_POS_X_MASK             (0x1fff << _SPRITE_POS_X_SHIFT)
+#define _SPRITE_POS_Y_MASK             (0xfff << _SPRITE_POS_Y_SHIFT)
+#define _SPRITE_OFFSET_START_X_SHIFT   0
+#define _SPRITE_OFFSET_START_Y_SHIFT   16
+#define _SPRITE_OFFSET_START_X_MASK    (0x1fff << _SPRITE_OFFSET_START_X_SHIFT)
+#define _SPRITE_OFFSET_START_Y_MASK    (0xfff << _SPRITE_OFFSET_START_Y_SHIFT)
+
+enum GVT_FB_EVENT {
+       FB_MODE_SET_START = 1,
+       FB_MODE_SET_END,
+       FB_DISPLAY_FLIP,
+};
+
+enum DDI_PORT {
+       DDI_PORT_NONE   = 0,
+       DDI_PORT_B      = 1,
+       DDI_PORT_C      = 2,
+       DDI_PORT_D      = 3,
+       DDI_PORT_E      = 4
+};
+
+struct intel_gvt;
+
+/* color space conversion and gamma correction are not included */
+struct intel_vgpu_primary_plane_format {
+       u8      enabled;        /* plane is enabled */
+       u8      tiled;          /* X-tiled */
+       u8      bpp;            /* bits per pixel */
+       u32     hw_format;      /* format field in the PRI_CTL register */
+       u32     drm_format;     /* format in DRM definition */
+       u32     base;           /* framebuffer base in graphics memory */
+       u64     base_gpa;
+       u32     x_offset;       /* in pixels */
+       u32     y_offset;       /* in lines */
+       u32     width;          /* in pixels */
+       u32     height;         /* in lines */
+       u32     stride;         /* in bytes */
+};
+
+struct intel_vgpu_sprite_plane_format {
+       u8      enabled;        /* plane is enabled */
+       u8      tiled;          /* X-tiled */
+       u8      bpp;            /* bits per pixel */
+       u32     hw_format;      /* format field in the SPR_CTL register */
+       u32     drm_format;     /* format in DRM definition */
+       u32     base;           /* sprite base in graphics memory */
+       u64     base_gpa;
+       u32     x_pos;          /* in pixels */
+       u32     y_pos;          /* in lines */
+       u32     x_offset;       /* in pixels */
+       u32     y_offset;       /* in lines */
+       u32     width;          /* in pixels */
+       u32     height;         /* in lines */
+       u32     stride;         /* in bytes */
+};
+
+struct intel_vgpu_cursor_plane_format {
+       u8      enabled;
+       u8      mode;           /* cursor mode select */
+       u8      bpp;            /* bits per pixel */
+       u32     drm_format;     /* format in DRM definition */
+       u32     base;           /* cursor base in graphics memory */
+       u64     base_gpa;
+       u32     x_pos;          /* in pixels */
+       u32     y_pos;          /* in lines */
+       u8      x_sign;         /* X Position Sign */
+       u8      y_sign;         /* Y Position Sign */
+       u32     width;          /* in pixels */
+       u32     height;         /* in lines */
+       u32     x_hot;          /* in pixels */
+       u32     y_hot;          /* in pixels */
+};
+
+struct intel_vgpu_pipe_format {
+       struct intel_vgpu_primary_plane_format  primary;
+       struct intel_vgpu_sprite_plane_format   sprite;
+       struct intel_vgpu_cursor_plane_format   cursor;
+       enum DDI_PORT ddi_port;  /* the DDI port that pipe is connected to */
+};
+
+struct intel_vgpu_fb_format {
+       struct intel_vgpu_pipe_format   pipes[I915_MAX_PIPES];
+};
+
+int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+       struct intel_vgpu_primary_plane_format *plane);
+int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
+       struct intel_vgpu_cursor_plane_format *plane);
+int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
+       struct intel_vgpu_sprite_plane_format *plane);
+
+#endif
index 3a74a408a96656d9efe852ba924bf79b833670e2..9a5dce3aa10ab2bfe2aa884af4974517ebf9f5c0 100644 (file)
@@ -181,6 +181,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .vgpu_deactivate = intel_gvt_deactivate_vgpu,
        .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
        .get_gvt_attrs = intel_get_gvt_attrs,
+       .vgpu_query_plane = intel_vgpu_query_plane,
+       .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
 };
 
 /**
index 393066726993e55aacc4e5535f3447d74f033d57..77df9bad5dea1d6a8dc76293e1ddd12a947b8cc8 100644 (file)
@@ -46,6 +46,8 @@
 #include "sched_policy.h"
 #include "render.h"
 #include "cmd_parser.h"
+#include "fb_decoder.h"
+#include "dmabuf.h"
 
 #define GVT_MAX_VGPU 8
 
@@ -123,7 +125,9 @@ struct intel_vgpu_irq {
 };
 
 struct intel_vgpu_opregion {
+       bool mapped;
        void *va;
+       void *va_gopregion;
        u32 gfn[INTEL_GVT_OPREGION_PAGES];
 };
 
@@ -206,8 +210,16 @@ struct intel_vgpu {
                struct kvm *kvm;
                struct work_struct release_work;
                atomic_t released;
+               struct vfio_device *vfio_device;
        } vdev;
 #endif
+
+       struct list_head dmabuf_obj_list_head;
+       struct mutex dmabuf_lock;
+       struct idr object_idr;
+
+       struct completion vblank_done;
+
 };
 
 /* validating GM healthy status*/
@@ -505,7 +517,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
 }
 
 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
-int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
 
 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
 void populate_pvinfo_page(struct intel_vgpu *vgpu);
@@ -532,6 +545,8 @@ struct intel_gvt_ops {
                        const char *name);
        bool (*get_gvt_attrs)(struct attribute ***type_attrs,
                        struct attribute_group ***intel_vgpu_type_groups);
+       int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
+       int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
 };
 
 
index df7f33abd393ed6ef0c071895f6a4b1b519b7415..a1bd82feb827451d8c1aa4f010e847e22fffabcd 100644 (file)
@@ -55,6 +55,9 @@ struct intel_gvt_mpt {
                              unsigned long mfn, unsigned int nr, bool map);
        int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
                             bool map);
+       int (*set_opregion)(void *vgpu);
+       int (*get_vfio_device)(void *vgpu);
+       void (*put_vfio_device)(void *vgpu);
 };
 
 extern struct intel_gvt_mpt xengt_mpt;
index 110f07e8bcfb9093b52027f89af1ad1017821535..b8a85e08091ad852be028a9a6f64639845b8c913 100644 (file)
@@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops;
 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
 #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
 
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+
+struct vfio_region;
+struct intel_vgpu_regops {
+       size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
+                       size_t count, loff_t *ppos, bool iswrite);
+       void (*release)(struct intel_vgpu *vgpu,
+                       struct vfio_region *region);
+};
+
 struct vfio_region {
        u32                             type;
        u32                             subtype;
        size_t                          size;
        u32                             flags;
+       const struct intel_vgpu_regops  *ops;
+       void                            *data;
 };
 
 struct kvmgt_pgfn {
@@ -316,6 +328,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
        }
 }
 
+static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
+               size_t count, loff_t *ppos, bool iswrite)
+{
+       unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+                       VFIO_PCI_NUM_REGIONS;
+       void *base = vgpu->vdev.region[i].data;
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+       if (pos >= vgpu->vdev.region[i].size || iswrite) {
+               gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
+               return -EINVAL;
+       }
+       count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+       memcpy(buf, base + pos, count);
+
+       return count;
+}
+
+static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
+               struct vfio_region *region)
+{
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
+       .rw = intel_vgpu_reg_rw_opregion,
+       .release = intel_vgpu_reg_release_opregion,
+};
+
+static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
+               unsigned int type, unsigned int subtype,
+               const struct intel_vgpu_regops *ops,
+               size_t size, u32 flags, void *data)
+{
+       struct vfio_region *region;
+
+       region = krealloc(vgpu->vdev.region,
+                       (vgpu->vdev.num_regions + 1) * sizeof(*region),
+                       GFP_KERNEL);
+       if (!region)
+               return -ENOMEM;
+
+       vgpu->vdev.region = region;
+       vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
+       vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
+       vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
+       vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
+       vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
+       vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
+       vgpu->vdev.num_regions++;
+       return 0;
+}
+
+static int kvmgt_get_vfio_device(void *p_vgpu)
+{
+       struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+       vgpu->vdev.vfio_device = vfio_device_get_from_dev(
+               mdev_dev(vgpu->vdev.mdev));
+       if (!vgpu->vdev.vfio_device) {
+               gvt_vgpu_err("failed to get vfio device\n");
+               return -ENODEV;
+       }
+       return 0;
+}
+
+
+static int kvmgt_set_opregion(void *p_vgpu)
+{
+       struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+       void *base;
+       int ret;
+
+       /* Each vgpu has its own opregion, although VFIO would create another
+        * one later. This one is used to expose opregion to VFIO. And the
+        * other one created by VFIO later, is used by guest actually.
+        */
+       base = vgpu_opregion(vgpu)->va;
+       if (!base)
+               return -ENOMEM;
+
+       if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+               memunmap(base);
+               return -EINVAL;
+       }
+
+       ret = intel_vgpu_register_reg(vgpu,
+                       PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+                       VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
+                       &intel_vgpu_regops_opregion, OPREGION_SIZE,
+                       VFIO_REGION_INFO_FLAG_READ, base);
+
+       return ret;
+}
+
+static void kvmgt_put_vfio_device(void *vgpu)
+{
+       if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+               return;
+
+       vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+}
+
 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 {
        struct intel_vgpu *vgpu = NULL;
@@ -546,7 +660,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
        int ret = -EINVAL;
 
 
-       if (index >= VFIO_PCI_NUM_REGIONS) {
+       if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
                gvt_vgpu_err("invalid index: %u\n", index);
                return -EINVAL;
        }
@@ -574,8 +688,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
        case VFIO_PCI_BAR5_REGION_INDEX:
        case VFIO_PCI_VGA_REGION_INDEX:
        case VFIO_PCI_ROM_REGION_INDEX:
+               break;
        default:
-               gvt_vgpu_err("unsupported region: %u\n", index);
+               if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+                       return -EINVAL;
+
+               index -= VFIO_PCI_NUM_REGIONS;
+               return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+                               ppos, is_write);
        }
 
        return ret == 0 ? count : ret;
@@ -838,7 +958,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 
                info.flags = VFIO_DEVICE_FLAGS_PCI;
                info.flags |= VFIO_DEVICE_FLAGS_RESET;
-               info.num_regions = VFIO_PCI_NUM_REGIONS;
+               info.num_regions = VFIO_PCI_NUM_REGIONS +
+                               vgpu->vdev.num_regions;
                info.num_irqs = VFIO_PCI_NUM_IRQS;
 
                return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -959,6 +1080,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
                }
 
                if (caps.size) {
+                       info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
                        if (info.argsz < sizeof(info) + caps.size) {
                                info.argsz = sizeof(info) + caps.size;
                                info.cap_offset = 0;
@@ -1045,6 +1167,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
        } else if (cmd == VFIO_DEVICE_RESET) {
                intel_gvt_ops->vgpu_reset(vgpu);
                return 0;
+       } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
+               struct vfio_device_gfx_plane_info dmabuf;
+               int ret = 0;
+
+               minsz = offsetofend(struct vfio_device_gfx_plane_info,
+                                   dmabuf_id);
+               if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
+                       return -EFAULT;
+               if (dmabuf.argsz < minsz)
+                       return -EINVAL;
+
+               ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
+               if (ret != 0)
+                       return ret;
+
+               return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
+                                                               -EFAULT : 0;
+       } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
+               __u32 dmabuf_id;
+               __s32 dmabuf_fd;
+
+               if (get_user(dmabuf_id, (__u32 __user *)arg))
+                       return -EFAULT;
+
+               dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
+               return dmabuf_fd;
+
        }
 
        return 0;
@@ -1286,6 +1435,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
        kvmgt_protect_table_init(info);
        gvt_cache_init(vgpu);
 
+       mutex_init(&vgpu->dmabuf_lock);
+       init_completion(&vgpu->vblank_done);
+
        info->track_node.track_write = kvmgt_page_track_write;
        info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
        kvm_page_track_register_notifier(kvm, &info->track_node);
@@ -1426,6 +1578,9 @@ struct intel_gvt_mpt kvmgt_mpt = {
        .read_gpa = kvmgt_read_gpa,
        .write_gpa = kvmgt_write_gpa,
        .gfn_to_mfn = kvmgt_gfn_to_pfn,
+       .set_opregion = kvmgt_set_opregion,
+       .get_vfio_device = kvmgt_get_vfio_device,
+       .put_vfio_device = kvmgt_put_vfio_device,
 };
 EXPORT_SYMBOL_GPL(kvmgt_mpt);
 
index c436e20ea59efce25fe2c91c8321dbf8fcd4020e..ca8005a6d5faa139bb96b55320b7aa6d3703cb8c 100644 (file)
@@ -294,4 +294,49 @@ static inline int intel_gvt_hypervisor_set_trap_area(
        return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
 }
 
+/**
+ * intel_gvt_hypervisor_set_opregion - Set opregion for guest
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
+{
+       if (!intel_gvt_host.mpt->set_opregion)
+               return 0;
+
+       return intel_gvt_host.mpt->set_opregion(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
+{
+       if (!intel_gvt_host.mpt->get_vfio_device)
+               return 0;
+
+       return intel_gvt_host.mpt->get_vfio_device(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
+{
+       if (!intel_gvt_host.mpt->put_vfio_device)
+               return;
+
+       intel_gvt_host.mpt->put_vfio_device(vgpu);
+}
+
 #endif /* _GVT_MPT_H_ */
index 80720e59723abfebd4733ebe6456afc649dc03de..8420d1fc3ddbe922db517fb95e9ff4c1186d62f5 100644 (file)
@@ -213,11 +213,20 @@ static void virt_vbt_generation(struct vbt *v)
        v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
 }
 
-static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
 {
        u8 *buf;
        struct opregion_header *header;
        struct vbt v;
+       const char opregion_signature[16] = OPREGION_SIGNATURE;
 
        gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
        vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
@@ -231,8 +240,8 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
        /* emulated opregion with VBT mailbox only */
        buf = (u8 *)vgpu_opregion(vgpu)->va;
        header = (struct opregion_header *)buf;
-       memcpy(header->signature, OPREGION_SIGNATURE,
-                       sizeof(OPREGION_SIGNATURE));
+       memcpy(header->signature, opregion_signature,
+              sizeof(opregion_signature));
        header->size = 0x8;
        header->opregion_ver = 0x02000000;
        header->mboxes = MBOX_VBT;
@@ -250,25 +259,6 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
        return 0;
 }
 
-static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
-{
-       int i, ret;
-
-       if (WARN((vgpu_opregion(vgpu)->va),
-                       "vgpu%d: opregion has been initialized already.\n",
-                       vgpu->id))
-               return -EINVAL;
-
-       ret = alloc_and_init_virt_opregion(vgpu);
-       if (ret < 0)
-               return ret;
-
-       for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
-               vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
-
-       return 0;
-}
-
 static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
 {
        u64 mfn;
@@ -290,59 +280,91 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
                        return ret;
                }
        }
+
+       vgpu_opregion(vgpu)->mapped = map;
+
        return 0;
 }
 
 /**
- * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
+ *
  * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
  *
+ * Returns:
+ * Zero on success, negative error code if failed.
  */
-void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
+int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
 {
-       gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
 
-       if (!vgpu_opregion(vgpu)->va)
-               return;
+       int i, ret = 0;
+       unsigned long pfn;
 
-       if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
-               map_vgpu_opregion(vgpu, false);
-               free_pages((unsigned long)vgpu_opregion(vgpu)->va,
-                               get_order(INTEL_GVT_OPREGION_SIZE));
+       gvt_dbg_core("emulate opregion from kernel\n");
+
+       switch (intel_gvt_host.hypervisor_type) {
+       case INTEL_GVT_HYPERVISOR_KVM:
+               pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
+               vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT,
+                                               INTEL_GVT_OPREGION_SIZE,
+                                               MEMREMAP_WB);
+               if (!vgpu_opregion(vgpu)->va_gopregion) {
+                       gvt_vgpu_err("failed to map guest opregion\n");
+                       ret = -EFAULT;
+               }
+               vgpu_opregion(vgpu)->mapped = true;
+               break;
+       case INTEL_GVT_HYPERVISOR_XEN:
+               /**
+                * Wins guest on Xengt will write this register twice: xen
+                * hvmloader and windows graphic driver.
+                */
+               if (vgpu_opregion(vgpu)->mapped)
+                       map_vgpu_opregion(vgpu, false);
+
+               for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+                       vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
 
-               vgpu_opregion(vgpu)->va = NULL;
+               ret = map_vgpu_opregion(vgpu, true);
+               break;
+       default:
+               ret = -EINVAL;
+               gvt_vgpu_err("not supported hypervisor\n");
        }
+
+       return ret;
 }
 
 /**
- * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
  * @vgpu: a vGPU
- * @gpa: guest physical address of opregion
  *
- * Returns:
- * Zero on success, negative error code if failed.
  */
-int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
 {
-       int ret;
+       gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
 
-       gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+       if (!vgpu_opregion(vgpu)->va)
+               return;
 
        if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
-               gvt_dbg_core("emulate opregion from kernel\n");
-
-               ret = init_vgpu_opregion(vgpu, gpa);
-               if (ret)
-                       return ret;
-
-               ret = map_vgpu_opregion(vgpu, true);
-               if (ret)
-                       return ret;
+               if (vgpu_opregion(vgpu)->mapped)
+                       map_vgpu_opregion(vgpu, false);
+       } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
+               if (vgpu_opregion(vgpu)->mapped) {
+                       memunmap(vgpu_opregion(vgpu)->va_gopregion);
+                       vgpu_opregion(vgpu)->va_gopregion = NULL;
+               }
        }
+       free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+                  get_order(INTEL_GVT_OPREGION_SIZE));
+
+       vgpu_opregion(vgpu)->va = NULL;
 
-       return 0;
 }
 
+
 #define GVT_OPREGION_FUNC(scic)                                        \
        ({                                                      \
         u32 __ret;                                             \
@@ -461,8 +483,21 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
        u32 *scic, *parm;
        u32 func, subfunc;
 
-       scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
-       parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+       switch (intel_gvt_host.hypervisor_type) {
+       case INTEL_GVT_HYPERVISOR_XEN:
+               scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
+               parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+               break;
+       case INTEL_GVT_HYPERVISOR_KVM:
+               scic = vgpu_opregion(vgpu)->va_gopregion +
+                                               INTEL_GVT_OPREGION_SCIC;
+               parm = vgpu_opregion(vgpu)->va_gopregion +
+                                               INTEL_GVT_OPREGION_PARM;
+               break;
+       default:
+               gvt_vgpu_err("not supported hypervisor\n");
+               return -EINVAL;
+       }
 
        if (!(swsci & SWSCI_SCI_SELECT)) {
                gvt_vgpu_err("requesting SMI service\n");
index 03532dfc0cd51b8342e50da61524024dafc8ac34..eea1a2f920990a17f9ead7f91187b83c01e85eb6 100644 (file)
@@ -372,6 +372,11 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
        vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
 }
 
+void intel_gvt_kick_schedule(struct intel_gvt *gvt)
+{
+       intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+}
+
 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 {
        struct intel_gvt_workload_scheduler *scheduler =
index ba00a5f7455fd57688f0c3292951e19b7ad66ec1..7b59e3e88b8b869776c375181e7aceef884a95be 100644 (file)
@@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
 
 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
 
+void intel_gvt_kick_schedule(struct intel_gvt *gvt);
+
 #endif
index d6177a0baeec5f590d469568e1404b46bec3c194..0056638b0c16dc2090e826aaa5b9b30c5aed13bb 100644 (file)
@@ -189,10 +189,12 @@ static int shadow_context_status_change(struct notifier_block *nb,
                atomic_set(&workload->shadow_ctx_active, 1);
                break;
        case INTEL_CONTEXT_SCHEDULE_OUT:
-       case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
                save_ring_hw_state(workload->vgpu, ring_id);
                atomic_set(&workload->shadow_ctx_active, 0);
                break;
+       case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+               save_ring_hw_state(workload->vgpu, ring_id);
+               break;
        default:
                WARN_ON(1);
                return NOTIFY_OK;
@@ -246,7 +248,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
        return 0;
 }
 
-void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
        if (!wa_ctx->indirect_ctx.obj)
                return;
@@ -1037,6 +1039,9 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
        if (IS_ERR(s->shadow_ctx))
                return PTR_ERR(s->shadow_ctx);
 
+       if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
+               s->shadow_ctx->priority = INT_MAX;
+
        bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
        s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
@@ -1329,3 +1334,15 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 
        return workload;
 }
+
+/**
+ * intel_vgpu_queue_workload - Qeue a vGPU workload
+ * @workload: the workload to queue in
+ */
+void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
+{
+       list_add_tail(&workload->list,
+               workload_q_head(workload->vgpu, workload->ring_id));
+       intel_gvt_kick_schedule(workload->vgpu->gvt);
+       wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
+}
index e4a9f9acd4a953a2b747588203de7985282efa04..3de77dfa7c59f27d6daaa1249cb81f5e803c7561 100644 (file)
@@ -125,12 +125,7 @@ struct intel_vgpu_shadow_bb {
 #define workload_q_head(vgpu, ring_id) \
        (&(vgpu->submission.workload_q_head[ring_id]))
 
-#define queue_workload(workload) do { \
-       list_add_tail(&workload->list, \
-       workload_q_head(workload->vgpu, workload->ring_id)); \
-       wake_up(&workload->vgpu->gvt-> \
-       scheduler.waitq[workload->ring_id]); \
-} while (0)
+void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
 
 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
 
index c6b82d1ba7dee0b2f09fc057bde14c248372484d..39926176fbebd54cc350d5322720e53911a478e0 100644 (file)
@@ -236,6 +236,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
        }
 
        intel_vgpu_stop_schedule(vgpu);
+       intel_vgpu_dmabuf_cleanup(vgpu);
 
        mutex_unlock(&gvt->lock);
 }
@@ -265,6 +266,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_gvt_hypervisor_detach_vgpu(vgpu);
        intel_vgpu_free_resource(vgpu);
        intel_vgpu_clean_mmio(vgpu);
+       intel_vgpu_dmabuf_cleanup(vgpu);
        vfree(vgpu);
 
        intel_gvt_update_vgpu_types(gvt);
@@ -349,7 +351,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        vgpu->handle = param->handle;
        vgpu->gvt = gvt;
        vgpu->sched_ctl.weight = param->weight;
-
+       INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
+       idr_init(&vgpu->object_idr);
        intel_vgpu_init_cfg_space(vgpu, param->primary);
 
        ret = intel_vgpu_init_mmio(vgpu);
@@ -370,10 +373,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_detach_hypervisor_vgpu;
 
-       ret = intel_vgpu_init_display(vgpu, param->resolution);
+       ret = intel_vgpu_init_opregion(vgpu);
        if (ret)
                goto out_clean_gtt;
 
+       ret = intel_vgpu_init_display(vgpu, param->resolution);
+       if (ret)
+               goto out_clean_opregion;
+
        ret = intel_vgpu_setup_submission(vgpu);
        if (ret)
                goto out_clean_display;
@@ -386,6 +393,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_sched_policy;
 
+       ret = intel_gvt_hypervisor_set_opregion(vgpu);
+       if (ret)
+               goto out_clean_sched_policy;
+
        mutex_unlock(&gvt->lock);
 
        return vgpu;
@@ -396,6 +407,8 @@ out_clean_submission:
        intel_vgpu_clean_submission(vgpu);
 out_clean_display:
        intel_vgpu_clean_display(vgpu);
+out_clean_opregion:
+       intel_vgpu_clean_opregion(vgpu);
 out_clean_gtt:
        intel_vgpu_clean_gtt(vgpu);
 out_detach_hypervisor_vgpu:
index 28294470ae31f2275a6b984810d6ebb206d5a6e0..7b41a1799a03a4882f03f1e9ae0ca486914275ca 100644 (file)
@@ -111,8 +111,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
        u64 size = 0;
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
+       for_each_ggtt_vma(vma, obj) {
+               if (drm_mm_node_allocated(&vma->node))
                        size += vma->node.size;
        }
 
index 7faf20aff25a3784d142b338ad918ae235c543de..5b1fd5f1defb6999ab272ef7cd5d6472b26243b9 100644 (file)
@@ -1897,9 +1897,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
        disable_irq(i915->drm.irq);
        ret = i915_gem_reset_prepare(i915);
        if (ret) {
-               DRM_ERROR("GPU recovery failed\n");
+               dev_err(i915->drm.dev, "GPU recovery failed\n");
                intel_gpu_reset(i915, ALL_ENGINES);
-               goto error;
+               goto taint;
        }
 
        if (!intel_has_gpu_reset(i915)) {
@@ -1916,7 +1916,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
        }
        if (ret) {
                dev_err(i915->drm.dev, "Failed to reset chip\n");
-               goto error;
+               goto taint;
        }
 
        i915_gem_reset(i915);
@@ -1959,6 +1959,20 @@ wakeup:
        wake_up_bit(&error->flags, I915_RESET_HANDOFF);
        return;
 
+taint:
+       /*
+        * History tells us that if we cannot reset the GPU now, we
+        * never will. This then impacts everything that is run
+        * subsequently. On failing the reset, we mark the driver
+        * as wedged, preventing further execution on the GPU.
+        * We also want to go one step further and add a taint to the
+        * kernel so that any subsequent faults can be traced back to
+        * this failure. This is important for CI, where if the
+        * GPU/driver fails we would like to reboot and restart testing
+        * rather than continue on into oblivion. For everyone else,
+        * the system should still plod along, but they have been warned!
+        */
+       add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 error:
        i915_gem_set_wedged(i915);
        i915_gem_retire_requests(i915);
index 594fd14e66c509992b5b71abbc69310ad99bc745..d57859cfad8eec428e88f7daacdadcc59f284c26 100644 (file)
@@ -398,6 +398,7 @@ enum intel_display_power_domain {
        POWER_DOMAIN_AUX_D,
        POWER_DOMAIN_GMBUS,
        POWER_DOMAIN_MODESET,
+       POWER_DOMAIN_GT_IRQ,
        POWER_DOMAIN_INIT,
 
        POWER_DOMAIN_NUM,
@@ -3234,8 +3235,16 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_GUC_CT(dev_priv)   ((dev_priv)->info.has_guc_ct)
 #define HAS_GUC_UCODE(dev_priv)        (HAS_GUC(dev_priv))
 #define HAS_GUC_SCHED(dev_priv)        (HAS_GUC(dev_priv))
+
+/* For now, anything with a GuC has also HuC */
+#define HAS_HUC(dev_priv)      (HAS_GUC(dev_priv))
 #define HAS_HUC_UCODE(dev_priv)        (HAS_GUC(dev_priv))
 
+/* Having a GuC is not the same as using a GuC */
+#define USES_GUC(dev_priv)             intel_uc_is_using_guc()
+#define USES_GUC_SUBMISSION(dev_priv)  intel_uc_is_using_guc_submission()
+#define USES_HUC(dev_priv)             intel_uc_is_using_huc()
+
 #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
 
 #define HAS_POOLED_EU(dev_priv)        ((dev_priv)->info.has_pooled_eu)
@@ -3879,6 +3888,8 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
                                         unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm);
 
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
+
 /* belongs in i915_gem_gtt.h */
 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
 {
index e083f242b8dc6ed4ff7fc2796fdd7f57ab6bab9b..fcc9b53864f0ad4646774b24f34be27f07c0a5ba 100644 (file)
@@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
         * must wait for all rendering to complete to the object (as unbinding
         * must anyway), and retire the requests.
         */
-       ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE |
-                                  I915_WAIT_LOCKED |
-                                  I915_WAIT_ALL,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
+       ret = i915_gem_object_set_to_cpu_domain(obj, false);
        if (ret)
                return ret;
 
-       i915_gem_retire_requests(to_i915(obj->base.dev));
-
        while ((vma = list_first_entry_or_null(&obj->vma_list,
                                               struct i915_vma,
                                               obj_link))) {
@@ -673,17 +666,13 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
                obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
 }
 
-static void
-flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
-       if (!(obj->base.write_domain & flush_domains))
-               return;
-
-       /* No actual flushing is required for the GTT write domain.  Writes
-        * to it "immediately" go to main memory as far as we know, so there's
-        * no chipset flush.  It also doesn't land in render cache.
+       /*
+        * No actual flushing is required for the GTT write domain for reads
+        * from the GTT domain. Writes to it "immediately" go to main memory
+        * as far as we know, so there's no chipset flush. It also doesn't
+        * land in the GPU render cache.
         *
         * However, we do have to enforce the order so that all writes through
         * the GTT land before any writes to the device, such as updates to
@@ -694,22 +683,43 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
         * timing. This issue has only been observed when switching quickly
         * between GTT writes and CPU reads from inside the kernel on recent hw,
         * and it appears to only affect discrete GTT blocks (i.e. on LLC
-        * system agents we cannot reproduce this behaviour).
+        * system agents we cannot reproduce this behaviour, until Cannonlake
+        * that was!).
         */
+
        wmb();
 
+       intel_runtime_pm_get(dev_priv);
+       spin_lock_irq(&dev_priv->uncore.lock);
+
+       POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
+
+       spin_unlock_irq(&dev_priv->uncore.lock);
+       intel_runtime_pm_put(dev_priv);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct i915_vma *vma;
+
+       if (!(obj->base.write_domain & flush_domains))
+               return;
+
        switch (obj->base.write_domain) {
        case I915_GEM_DOMAIN_GTT:
-               if (!HAS_LLC(dev_priv)) {
-                       intel_runtime_pm_get(dev_priv);
-                       spin_lock_irq(&dev_priv->uncore.lock);
-                       POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
-                       spin_unlock_irq(&dev_priv->uncore.lock);
-                       intel_runtime_pm_put(dev_priv);
-               }
+               i915_gem_flush_ggtt_writes(dev_priv);
 
                intel_fb_obj_flush(obj,
                                   fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+
+               for_each_ggtt_vma(vma, obj) {
+                       if (vma->iomap)
+                               continue;
+
+                       i915_vma_unset_ggtt_write(vma);
+               }
                break;
 
        case I915_GEM_DOMAIN_CPU:
@@ -1556,10 +1566,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (!i915_vma_is_ggtt(vma))
-                       break;
-
+       for_each_ggtt_vma(vma, obj) {
                if (i915_vma_is_active(vma))
                        continue;
 
@@ -1972,6 +1979,8 @@ int i915_gem_fault(struct vm_fault *vmf)
                list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
        GEM_BUG_ON(!obj->userfault_count);
 
+       i915_vma_set_ggtt_write(vma);
+
 err_fence:
        i915_vma_unpin_fence(vma);
 err_unpin:
@@ -2036,12 +2045,8 @@ static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
        drm_vma_node_unmap(&obj->base.vma_node,
                           obj->base.dev->anon_inode->i_mapping);
 
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (!i915_vma_is_ggtt(vma))
-                       break;
-
+       for_each_ggtt_vma(vma, obj)
                i915_vma_unset_userfault(vma);
-       }
 }
 
 /**
@@ -3381,6 +3386,9 @@ i915_gem_idle_work_handler(struct work_struct *work)
 
        if (INTEL_GEN(dev_priv) >= 6)
                gen6_rps_idle(dev_priv);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
+
        intel_runtime_pm_put(dev_priv);
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3753,7 +3761,8 @@ restart:
                        return -EBUSY;
                }
 
-               if (i915_gem_valid_gtt_space(vma, cache_level))
+               if (!i915_vma_is_closed(vma) &&
+                   i915_gem_valid_gtt_space(vma, cache_level))
                        continue;
 
                ret = i915_vma_unbind(vma);
@@ -3806,7 +3815,7 @@ restart:
                         * dropped the fence as all snoopable access is
                         * supposed to be linear.
                         */
-                       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+                       for_each_ggtt_vma(vma, obj) {
                                ret = i915_vma_put_fence(vma);
                                if (ret)
                                        return ret;
index ce3139e5ec4c4406d50b84fe041559e5c62d1683..21ce374d9924fbbaa5a67297a3bec8a8806ada5a 100644 (file)
@@ -316,7 +316,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
         * present or not in use we still need a small bias as ring wraparound
         * at offset 0 sometimes hangs. No idea why.
         */
-       if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
+       if (USES_GUC(dev_priv))
                ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
        else
                ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
@@ -409,7 +409,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
        i915_gem_context_set_closed(ctx); /* not user accessible */
        i915_gem_context_clear_bannable(ctx);
        i915_gem_context_set_force_single_submission(ctx);
-       if (!i915_modparams.enable_guc_submission)
+       if (!USES_GUC_SUBMISSION(to_i915(dev)))
                ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
 
        GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
index f3c35e82632191f9b1e2060149413861963c5087..5e7efbbac9f7ca942672ffba50e3b8a771449a49 100644 (file)
@@ -3503,7 +3503,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
         * currently don't have any bits spare to pass in this upper
         * restriction!
         */
-       if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
+       if (USES_GUC(dev_priv)) {
                ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
                ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
        }
@@ -3620,10 +3620,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
                bool ggtt_bound = false;
                struct i915_vma *vma;
 
-               list_for_each_entry(vma, &obj->vma_list, obj_link) {
-                       if (vma->vm != &ggtt->base)
-                               continue;
-
+               for_each_ggtt_vma(vma, obj) {
                        if (!i915_vma_unbind(vma))
                                continue;
 
index 19fb28c177d8040ace1da091a009b518a3015930..05e89e1c0a088b6cc2ca9e28f0d4ddd606726f97 100644 (file)
@@ -261,6 +261,8 @@ struct drm_i915_gem_object {
                } userptr;
 
                unsigned long scratch;
+
+               void *gvt_info;
        };
 
        /** for phys allocated objects */
index a90bdd26571f58cffe8300d1a1b74eebf3c6005b..c28a4ceb016df2daa949db31805213aec2620182 100644 (file)
@@ -252,6 +252,20 @@ static void mark_busy(struct drm_i915_private *i915)
        GEM_BUG_ON(!i915->gt.active_requests);
 
        intel_runtime_pm_get_noresume(i915);
+
+       /*
+        * It seems that the DMC likes to transition between the DC states a lot
+        * when there are no connected displays (no active power domains) during
+        * command submission.
+        *
+        * This activity has negative impact on the performance of the chip with
+        * huge latencies observed in the interrupt handler and elsewhere.
+        *
+        * Work around it by grabbing a GT IRQ power domain whilst there is any
+        * GT activity, preventing any DC state transitions.
+        */
+       intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+
        i915->gt.awake = true;
 
        intel_enable_gt_powersave(i915);
index b85d7ebd9beea5beeb89115676ce97787c1a7960..d9dc9df523b58e9a8645838e9c2ffa2080c54d61 100644 (file)
@@ -205,10 +205,7 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
        if (tiling_mode == I915_TILING_NONE)
                return 0;
 
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (!i915_vma_is_ggtt(vma))
-                       break;
-
+       for_each_ggtt_vma(vma, obj) {
                if (i915_vma_fence_prepare(vma, tiling_mode, stride))
                        continue;
 
@@ -285,10 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
        }
        mutex_unlock(&obj->mm.lock);
 
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (!i915_vma_is_ggtt(vma))
-                       break;
-
+       for_each_ggtt_vma(vma, obj) {
                vma->fence_size =
                        i915_gem_fence_size(i915, vma->size, tiling, stride);
                vma->fence_alignment =
index 7cac07db89b99aebfc361b64f706f9177d0ad318..3517c6548e2cccd3c21985a0c52266c1f818d99c 100644 (file)
@@ -1400,7 +1400,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
 
        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
                notify_ring(engine);
-               tasklet |= i915_modparams.enable_guc_submission;
+               tasklet |= USES_GUC_SUBMISSION(engine->i915);
        }
 
        if (tasklet)
index 7bc53868787168135818668217e2c5e6a5882571..8dfea0320c2fffb451a538575499e50f86912c76 100644 (file)
@@ -147,13 +147,10 @@ i915_param_named_unsafe(edp_vswing, int, 0400,
        "(0=use value from vbt [default], 1=low power swing(200mV),"
        "2=default swing(400mV))");
 
-i915_param_named_unsafe(enable_guc_loading, int, 0400,
-       "Enable GuC firmware loading "
-       "(-1=auto, 0=never [default], 1=if available, 2=required)");
-
-i915_param_named_unsafe(enable_guc_submission, int, 0400,
-       "Enable GuC submission "
-       "(-1=auto, 0=never [default], 1=if available, 2=required)");
+i915_param_named_unsafe(enable_guc, int, 0400,
+       "Enable GuC load for GuC submission and/or HuC load. "
+       "Required functionality can be selected using bitmask values. "
+       "(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)");
 
 i915_param_named(guc_log_level, int, 0400,
        "GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
index c48c88bb95e88c18b7bdc0ee2e22c9a45629058f..792ce26d744946d22e43d825f3c76820b4725da3 100644 (file)
 #ifndef _I915_PARAMS_H_
 #define _I915_PARAMS_H_
 
+#include <linux/bitops.h>
 #include <linux/cache.h> /* for __read_mostly */
 
+#define ENABLE_GUC_SUBMISSION          BIT(0)
+#define ENABLE_GUC_LOAD_HUC            BIT(1)
+
 #define I915_PARAMS_FOR_EACH(param) \
        param(char *, vbt_firmware, NULL) \
        param(int, modeset, -1) \
@@ -41,8 +45,7 @@
        param(int, disable_power_well, -1) \
        param(int, enable_ips, 1) \
        param(int, invert_brightness, 0) \
-       param(int, enable_guc_loading, 0) \
-       param(int, enable_guc_submission, 0) \
+       param(int, enable_guc, 0) \
        param(int, guc_log_level, -1) \
        param(char *, guc_firmware_path, NULL) \
        param(char *, huc_firmware_path, NULL) \
index bf6d8d1eaabeb97467218436e71905db8a7e0d0d..92c11e70fea48c4be65aabc1d0ef69ab7469e666 100644 (file)
@@ -142,6 +142,12 @@ vma_create(struct drm_i915_gem_object *obj,
                                                                i915_gem_object_get_stride(obj));
                GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
 
+               /*
+                * We put the GGTT vma at the start of the vma-list, followed
+                * by the ppGGTT vma. This allows us to break early when
+                * iterating over only the GGTT vma for an object, see
+                * for_each_ggtt_vma()
+                */
                vma->flags |= I915_VMA_GGTT;
                list_add(&vma->obj_link, &obj->vma_list);
        } else {
@@ -322,6 +328,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
        if (err)
                goto err_unpin;
 
+       i915_vma_set_ggtt_write(vma);
        return ptr;
 
 err_unpin:
@@ -330,12 +337,24 @@ err:
        return IO_ERR_PTR(err);
 }
 
+void i915_vma_flush_writes(struct i915_vma *vma)
+{
+       if (!i915_vma_has_ggtt_write(vma))
+               return;
+
+       i915_gem_flush_ggtt_writes(vma->vm->i915);
+
+       i915_vma_unset_ggtt_write(vma);
+}
+
 void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
        lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
 
        GEM_BUG_ON(vma->iomap == NULL);
 
+       i915_vma_flush_writes(vma);
+
        i915_vma_unpin_fence(vma);
        i915_vma_unpin(vma);
 }
@@ -466,6 +485,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        u64 start, end;
        int ret;
 
+       GEM_BUG_ON(i915_vma_is_closed(vma));
        GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
@@ -678,7 +698,9 @@ static void i915_vma_destroy(struct i915_vma *vma)
                GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
        GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
 
+       list_del(&vma->obj_link);
        list_del(&vma->vm_link);
+
        if (!i915_vma_is_ggtt(vma))
                i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
@@ -690,7 +712,6 @@ void i915_vma_close(struct i915_vma *vma)
        GEM_BUG_ON(i915_vma_is_closed(vma));
        vma->flags |= I915_VMA_CLOSED;
 
-       list_del(&vma->obj_link);
        rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
        if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
@@ -790,6 +811,15 @@ int i915_vma_unbind(struct i915_vma *vma)
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
        if (i915_vma_is_map_and_fenceable(vma)) {
+               /*
+                * Check that we have flushed all writes through the GGTT
+                * before the unbind, other due to non-strict nature of those
+                * indirect writes they may end up referencing the GGTT PTE
+                * after the unbind.
+                */
+               i915_vma_flush_writes(vma);
+               GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
+
                /* release the fence reg _after_ flushing */
                ret = i915_vma_put_fence(vma);
                if (ret)
index 1e2bc9b3c3ac19a4790222eb151765050e264d49..fd5b84904f7cb8b902819c14069945b3f147d148 100644 (file)
@@ -90,6 +90,7 @@ struct i915_vma {
 #define I915_VMA_CLOSED                BIT(10)
 #define I915_VMA_USERFAULT_BIT 11
 #define I915_VMA_USERFAULT     BIT(I915_VMA_USERFAULT_BIT)
+#define I915_VMA_GGTT_WRITE    BIT(12)
 
        unsigned int active;
        struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -138,6 +139,24 @@ static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
        return vma->flags & I915_VMA_GGTT;
 }
 
+static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
+{
+       return vma->flags & I915_VMA_GGTT_WRITE;
+}
+
+static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+       vma->flags |= I915_VMA_GGTT_WRITE;
+}
+
+static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+{
+       vma->flags &= ~I915_VMA_GGTT_WRITE;
+}
+
+void i915_vma_flush_writes(struct i915_vma *vma);
+
 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
 {
        return vma->flags & I915_VMA_CAN_FENCE;
@@ -389,5 +408,19 @@ i915_vma_unpin_fence(struct i915_vma *vma)
                __i915_vma_unpin_fence(vma);
 }
 
-#endif
+#define for_each_until(cond) if (cond) break; else
+
+/**
+ * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
+ * @V: the #i915_vma iterator
+ * @OBJ: the #drm_i915_gem_object
+ *
+ * GGTT VMA are placed at the being of the object's vma_list, see
+ * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
+ * or the list is empty ofc.
+ */
+#define for_each_ggtt_vma(V, OBJ) \
+       list_for_each_entry(V, &(OBJ)->vma_list, obj_link)              \
+               for_each_until(!i915_vma_is_ggtt(V))
 
+#endif
index 07e4f7bc4412712074edafd34ea1f13e74469b8c..7fe4aac0facc64bd34affae3e2bbf8e5ca9a6e14 100644 (file)
@@ -44,9 +44,9 @@
 MODULE_FIRMWARE(I915_CSR_KBL);
 #define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 4)
 
-#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
 MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 26)
+#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 27)
 
 #define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
 MODULE_FIRMWARE(I915_CSR_BXT);
index 02f8bf101ccd74617fb6a3ce94355cafedcf1801..405d70124a46434a18aa428779b7f2f8f4d40119 100644 (file)
@@ -403,15 +403,15 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
                                freq = f24_mhz;
                                break;
                        }
-               }
 
-               /* Now figure out how the command stream's timestamp register
-                * increments from this frequency (it might increment only
-                * every few clock cycle).
-                */
-               freq >>= 3 - ((rpm_config_reg &
-                              GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
-                             GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+                       /* Now figure out how the command stream's timestamp
+                        * register increments from this frequency (it might
+                        * increment only every few clock cycle).
+                        */
+                       freq >>= 3 - ((rpm_config_reg &
+                                      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+                                     GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+               }
 
                return freq;
        }
index 1f7e312d0d0d12bf7af3c51d0cc2b563d9ca7d1f..f0a8686f051360ca324d01ea40b6509ee5df9d38 100644 (file)
@@ -9940,11 +9940,10 @@ found:
        }
 
        ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+       drm_framebuffer_put(fb);
        if (ret)
                goto fail;
 
-       drm_framebuffer_put(fb);
-
        ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
        if (ret)
                goto fail;
@@ -10967,31 +10966,6 @@ fail:
        return ret;
 }
 
-static void
-intel_modeset_update_crtc_state(struct drm_atomic_state *state)
-{
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *new_crtc_state;
-       int i;
-
-       /* Double check state. */
-       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
-               to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
-
-               /*
-                * Update legacy state to satisfy fbc code. This can
-                * be removed when fbc uses the atomic state.
-                */
-               if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
-                       struct drm_plane_state *plane_state = crtc->primary->state;
-
-                       crtc->primary->fb = plane_state->fb;
-                       crtc->x = plane_state->src_x >> 16;
-                       crtc->y = plane_state->src_y >> 16;
-               }
-       }
-}
-
 static bool intel_fuzzy_clock_check(int clock1, int clock2)
 {
        int diff;
@@ -12364,9 +12338,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                }
        }
 
-       /* Only after disabling all output pipelines that will be changed can we
-        * update the the output configuration. */
-       intel_modeset_update_crtc_state(state);
+       /* FIXME: Eventually get rid of our intel_crtc->config pointer */
+       for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+               to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
 
        if (intel_state->modeset) {
                drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
index d08e760252d40ae7ad1779fb065176802d76817d..177ee69ca9b1256715ecfafb2c0e63ce1cdd622d 100644 (file)
@@ -61,6 +61,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
 
 void intel_guc_init_early(struct intel_guc *guc)
 {
+       intel_guc_fw_init_early(guc);
        intel_guc_ct_init_early(&guc->ct);
 
        mutex_init(&guc->send_mutex);
@@ -128,7 +129,7 @@ void intel_guc_init_params(struct intel_guc *guc)
        }
 
        /* If GuC submission is enabled, set up additional parameters here */
-       if (i915_modparams.enable_guc_submission) {
+       if (USES_GUC_SUBMISSION(dev_priv)) {
                u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
                u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
                u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
index 89862fa8ab42500fbf4a1fe0e24ccc42522dd866..cbc51c9604256da3b479901dc950b5411a781464 100644 (file)
@@ -56,45 +56,54 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
 
 #define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
 
-/**
- * intel_guc_fw_select() - selects GuC firmware for uploading
- *
- * @guc:       intel_guc struct
- *
- * Return: zero when we know firmware, non-zero in other case
- */
-int intel_guc_fw_select(struct intel_guc *guc)
+static void guc_fw_select(struct intel_uc_fw *guc_fw)
 {
+       struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
-       intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
+       GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
+
+       if (!HAS_GUC(dev_priv))
+               return;
 
        if (i915_modparams.guc_firmware_path) {
-               guc->fw.path = i915_modparams.guc_firmware_path;
-               guc->fw.major_ver_wanted = 0;
-               guc->fw.minor_ver_wanted = 0;
+               guc_fw->path = i915_modparams.guc_firmware_path;
+               guc_fw->major_ver_wanted = 0;
+               guc_fw->minor_ver_wanted = 0;
        } else if (IS_SKYLAKE(dev_priv)) {
-               guc->fw.path = I915_SKL_GUC_UCODE;
-               guc->fw.major_ver_wanted = SKL_FW_MAJOR;
-               guc->fw.minor_ver_wanted = SKL_FW_MINOR;
+               guc_fw->path = I915_SKL_GUC_UCODE;
+               guc_fw->major_ver_wanted = SKL_FW_MAJOR;
+               guc_fw->minor_ver_wanted = SKL_FW_MINOR;
        } else if (IS_BROXTON(dev_priv)) {
-               guc->fw.path = I915_BXT_GUC_UCODE;
-               guc->fw.major_ver_wanted = BXT_FW_MAJOR;
-               guc->fw.minor_ver_wanted = BXT_FW_MINOR;
+               guc_fw->path = I915_BXT_GUC_UCODE;
+               guc_fw->major_ver_wanted = BXT_FW_MAJOR;
+               guc_fw->minor_ver_wanted = BXT_FW_MINOR;
        } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
-               guc->fw.path = I915_KBL_GUC_UCODE;
-               guc->fw.major_ver_wanted = KBL_FW_MAJOR;
-               guc->fw.minor_ver_wanted = KBL_FW_MINOR;
+               guc_fw->path = I915_KBL_GUC_UCODE;
+               guc_fw->major_ver_wanted = KBL_FW_MAJOR;
+               guc_fw->minor_ver_wanted = KBL_FW_MINOR;
        } else if (IS_GEMINILAKE(dev_priv)) {
-               guc->fw.path = I915_GLK_GUC_UCODE;
-               guc->fw.major_ver_wanted = GLK_FW_MAJOR;
-               guc->fw.minor_ver_wanted = GLK_FW_MINOR;
+               guc_fw->path = I915_GLK_GUC_UCODE;
+               guc_fw->major_ver_wanted = GLK_FW_MAJOR;
+               guc_fw->minor_ver_wanted = GLK_FW_MINOR;
        } else {
-               DRM_ERROR("No GuC firmware known for platform with GuC!\n");
-               return -ENOENT;
+               DRM_WARN("%s: No firmware known for this platform!\n",
+                        intel_uc_fw_type_repr(guc_fw->type));
        }
+}
 
-       return 0;
+/**
+ * intel_guc_fw_init_early() - initializes GuC firmware struct
+ * @guc: intel_guc struct
+ *
+ * On platforms with GuC selects firmware for uploading
+ */
+void intel_guc_fw_init_early(struct intel_guc *guc)
+{
+       struct intel_uc_fw *guc_fw = &guc->fw;
+
+       intel_uc_fw_init(guc_fw, INTEL_UC_FW_TYPE_GUC);
+       guc_fw_select(guc_fw);
 }
 
 static void guc_prepare_xfer(struct intel_guc *guc)
index 023f5baa9dd60ac68ae7292cf290779210ac4b39..4ec5d3d9e2b0591ab3d1e5bc6afff60f041f4129 100644 (file)
@@ -27,7 +27,7 @@
 
 struct intel_guc;
 
-int intel_guc_fw_select(struct intel_guc *guc);
+void intel_guc_fw_init_early(struct intel_guc *guc);
 int intel_guc_fw_upload(struct intel_guc *guc);
 
 #endif
index 76d3eb1e4614123052807b3bf978d716fefb59e9..1a2c5eed9929efe7fcddc56c7f60e7d84d984249 100644 (file)
@@ -505,7 +505,7 @@ static void guc_flush_logs(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
-       if (!i915_modparams.enable_guc_submission ||
+       if (!USES_GUC_SUBMISSION(dev_priv) ||
            (i915_modparams.guc_log_level < 0))
                return;
 
@@ -646,7 +646,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
 
 void i915_guc_log_register(struct drm_i915_private *dev_priv)
 {
-       if (!i915_modparams.enable_guc_submission ||
+       if (!USES_GUC_SUBMISSION(dev_priv) ||
            (i915_modparams.guc_log_level < 0))
                return;
 
@@ -657,7 +657,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv)
 
 void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
 {
-       if (!i915_modparams.enable_guc_submission)
+       if (!USES_GUC_SUBMISSION(dev_priv))
                return;
 
        mutex_lock(&dev_priv->drm.struct_mutex);
index 126f7c769c69416313a79d6a169668ce926fa3b8..a2fe7c8d44775e06e4e4168336f250c4be02bfa2 100644 (file)
@@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
                return 0;
        }
 
-       if (i915_modparams.enable_guc_submission) {
+       if (USES_GUC_SUBMISSION(dev_priv)) {
                DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
                return -EIO;
        }
index a40f35af225c3c17148893b4da944fd63eb86b2b..bced7b954d93d855496bb20ebdbdd5131e5855c2 100644 (file)
@@ -1383,7 +1383,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
                }
        }
 
-       /* Display Wa #1139 */
+       /* Display WA #1139: glk */
        if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
            crtc_state->base.adjusted_mode.htotal > 5460)
                return false;
index 98d17254593c8a26c5c4769068d6bd010ad3b267..974be3defa70912af96ee741dd9f3b836bcc0516 100644 (file)
@@ -77,42 +77,56 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
 #define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
        GLK_HUC_FW_MINOR, GLK_BLD_NUM)
 
-/**
- * intel_huc_select_fw() - selects HuC firmware for loading
- * @huc:       intel_huc struct
- */
-void intel_huc_select_fw(struct intel_huc *huc)
+static void huc_fw_select(struct intel_uc_fw *huc_fw)
 {
+       struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
        struct drm_i915_private *dev_priv = huc_to_i915(huc);
 
-       intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
+       GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
+
+       if (!HAS_HUC(dev_priv))
+               return;
 
        if (i915_modparams.huc_firmware_path) {
-               huc->fw.path = i915_modparams.huc_firmware_path;
-               huc->fw.major_ver_wanted = 0;
-               huc->fw.minor_ver_wanted = 0;
+               huc_fw->path = i915_modparams.huc_firmware_path;
+               huc_fw->major_ver_wanted = 0;
+               huc_fw->minor_ver_wanted = 0;
        } else if (IS_SKYLAKE(dev_priv)) {
-               huc->fw.path = I915_SKL_HUC_UCODE;
-               huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
-               huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
+               huc_fw->path = I915_SKL_HUC_UCODE;
+               huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
+               huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
        } else if (IS_BROXTON(dev_priv)) {
-               huc->fw.path = I915_BXT_HUC_UCODE;
-               huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
-               huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
+               huc_fw->path = I915_BXT_HUC_UCODE;
+               huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
+               huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
        } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
-               huc->fw.path = I915_KBL_HUC_UCODE;
-               huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
-               huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+               huc_fw->path = I915_KBL_HUC_UCODE;
+               huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
+               huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
        } else if (IS_GEMINILAKE(dev_priv)) {
-               huc->fw.path = I915_GLK_HUC_UCODE;
-               huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
-               huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
+               huc_fw->path = I915_GLK_HUC_UCODE;
+               huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
+               huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
        } else {
-               DRM_ERROR("No HuC firmware known for platform with HuC!\n");
-               return;
+               DRM_WARN("%s: No firmware known for this platform!\n",
+                        intel_uc_fw_type_repr(huc_fw->type));
        }
 }
 
+/**
+ * intel_huc_init_early() - initializes HuC struct
+ * @huc: intel_huc struct
+ *
+ * On platforms with HuC selects firmware for uploading
+ */
+void intel_huc_init_early(struct intel_huc *huc)
+{
+       struct intel_uc_fw *huc_fw = &huc->fw;
+
+       intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC);
+       huc_fw_select(huc_fw);
+}
+
 /**
  * huc_ucode_xfer() - DMA's the firmware
  * @dev_priv: the drm_i915_private device
@@ -167,17 +181,17 @@ static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
  * intel_huc_init_hw() - load HuC uCode to device
  * @huc: intel_huc structure
  *
- * Called from guc_setup() during driver loading and also after a GPU reset.
- * Be note that HuC loading must be done before GuC loading.
+ * Called from intel_uc_init_hw() during driver loading and also after a GPU
+ * reset. Be note that HuC loading must be done before GuC loading.
  *
  * The firmware image should have already been fetched into memory by the
- * earlier call to intel_huc_init(), so here we need only check that
+ * earlier call to intel_uc_init_fw(), so here we need only check that
  * is succeeded, and then transfer the image to the h/w.
  *
  */
-void intel_huc_init_hw(struct intel_huc *huc)
+int intel_huc_init_hw(struct intel_huc *huc)
 {
-       intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
+       return intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
 }
 
 /**
@@ -191,7 +205,7 @@ void intel_huc_init_hw(struct intel_huc *huc)
  * signature through intel_guc_auth_huc(). It then waits for 50ms for
  * firmware verification ACK and unpins the object.
  */
-void intel_huc_auth(struct intel_huc *huc)
+int intel_huc_auth(struct intel_huc *huc)
 {
        struct drm_i915_private *i915 = huc_to_i915(huc);
        struct intel_guc *guc = &i915->guc;
@@ -199,14 +213,14 @@ void intel_huc_auth(struct intel_huc *huc)
        int ret;
 
        if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
-               return;
+               return -ENOEXEC;
 
        vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
                                PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
        if (IS_ERR(vma)) {
-               DRM_ERROR("failed to pin huc fw object %d\n",
-                               (int)PTR_ERR(vma));
-               return;
+               ret = PTR_ERR(vma);
+               DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
+               return ret;
        }
 
        ret = intel_guc_auth_huc(guc,
@@ -229,4 +243,5 @@ void intel_huc_auth(struct intel_huc *huc)
 
 out:
        i915_vma_unpin(vma);
+       return ret;
 }
index aaa38b9e58171f5fb84d944a04cae3e4bb2f0d59..40039db59e04f5351cdf6bb3c21006ee044d5c91 100644 (file)
@@ -34,8 +34,8 @@ struct intel_huc {
        /* HuC-specific additions */
 };
 
-void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_init_hw(struct intel_huc *huc);
-void intel_huc_auth(struct intel_huc *huc);
+void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init_hw(struct intel_huc *huc);
+int intel_huc_auth(struct intel_huc *huc);
 
 #endif
index 2a8160f603ab05e4ccf57241b1390f935ae00392..2e38fbfdf08fe652e7cdefd7964956a1b2ccde57 100644 (file)
@@ -431,8 +431,6 @@ static inline void elsp_write(u64 desc, u32 __iomem *elsp)
 static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
        struct execlist_port *port = engine->execlists.port;
-       u32 __iomem *elsp =
-               engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
        unsigned int n;
 
        for (n = execlists_num_ports(&engine->execlists); n--; ) {
@@ -458,7 +456,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
                        desc = 0;
                }
 
-               elsp_write(desc, elsp);
+               elsp_write(desc, engine->execlists.elsp);
        }
        execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
 }
@@ -496,8 +494,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
 {
        struct intel_context *ce =
                &engine->i915->preempt_context->engine[engine->id];
-       u32 __iomem *elsp =
-               engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
        unsigned int n;
 
        GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
@@ -510,9 +506,9 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
 
        GEM_TRACE("\n");
        for (n = execlists_num_ports(&engine->execlists); --n; )
-               elsp_write(0, elsp);
+               elsp_write(0, engine->execlists.elsp);
 
-       elsp_write(ce->lrc_desc, elsp);
+       elsp_write(ce->lrc_desc, engine->execlists.elsp);
        execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
 }
 
@@ -1509,6 +1505,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
        execlists->csb_head = -1;
        execlists->active = 0;
 
+       execlists->elsp =
+               dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+
        /* After a GPU reset, we may have requests to replay */
        if (execlists->first)
                tasklet_schedule(&execlists->tasklet);
index 67f326230a7e5414ef4687d57c41c3927adaedac..5836181d6f8a46fd4ba0b0a515ec08b67873a817 100644 (file)
@@ -58,7 +58,7 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
        if (HAS_LLC(dev_priv)) {
                /*
                 * WaCompressedResourceDisplayNewHashMode:skl,kbl
-                * Display WA#0390: skl,kbl
+                * Display WA #0390: skl,kbl
                 *
                 * Must match Sampler, Pixel Back End, and Media. See
                 * WaCompressedResourceSamplerPbeMediaNewHashMode.
@@ -8417,7 +8417,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
        if (!HAS_PCH_CNP(dev_priv))
                return;
 
-       /* Wa #1181 */
+       /* Display WA #1181: cnp */
        I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
                   CNP_PWM_CGE_GATING_DISABLE);
 }
index c68ab3ead83cef95ec9a6fa89de3b714f101ac9d..183165b9b3fbb8276e49e7874503882f6bc7e5d6 100644 (file)
@@ -199,6 +199,11 @@ struct intel_engine_execlists {
         */
        bool no_priolist;
 
+       /**
+        * @elsp: the ExecList Submission Port register
+        */
+       u32 __iomem *elsp;
+
        /**
         * @port: execlist port states
         *
index 8315499452dc91494284455229d3f15ca6c9bac3..96ab74f3d101c1b3673324704d4fed6b2ef09c2a 100644 (file)
@@ -130,6 +130,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
                return "INIT";
        case POWER_DOMAIN_MODESET:
                return "MODESET";
+       case POWER_DOMAIN_GT_IRQ:
+               return "GT_IRQ";
        default:
                MISSING_CASE(domain);
                return "?";
@@ -1705,6 +1707,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        BIT_ULL(POWER_DOMAIN_INIT))
 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
        SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
+       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
@@ -1727,6 +1730,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        BIT_ULL(POWER_DOMAIN_INIT))
 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (             \
        BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
+       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
@@ -1785,6 +1789,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        BIT_ULL(POWER_DOMAIN_INIT))
 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (             \
        GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
+       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
index 1e2a30a40ede342fc96a081191cf8bef954421f8..461047c86e0dd5abf4ea24e0d20ad79a5083df6e 100644 (file)
@@ -47,55 +47,93 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
 {
-       if (!HAS_GUC(dev_priv)) {
-               if (i915_modparams.enable_guc_loading > 0 ||
-                   i915_modparams.enable_guc_submission > 0)
-                       DRM_INFO("Ignoring GuC options, no hardware\n");
+       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+       int enable_guc = 0;
 
-               i915_modparams.enable_guc_loading = 0;
-               i915_modparams.enable_guc_submission = 0;
-               return;
-       }
+       /* Default is to enable GuC/HuC if we know their firmwares */
+       if (intel_uc_fw_is_selected(guc_fw))
+               enable_guc |= ENABLE_GUC_SUBMISSION;
+       if (intel_uc_fw_is_selected(huc_fw))
+               enable_guc |= ENABLE_GUC_LOAD_HUC;
 
-       /* A negative value means "use platform default" */
-       if (i915_modparams.enable_guc_loading < 0)
-               i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+       /* Any platform specific fine-tuning can be done here */
 
-       /* Verify firmware version */
-       if (i915_modparams.enable_guc_loading) {
-               if (HAS_HUC_UCODE(dev_priv))
-                       intel_huc_select_fw(&dev_priv->huc);
+       return enable_guc;
+}
 
-               if (intel_guc_fw_select(&dev_priv->guc))
-                       i915_modparams.enable_guc_loading = 0;
+/**
+ * intel_uc_sanitize_options - sanitize uC related modparam options
+ * @dev_priv: device private
+ *
+ * In case of "enable_guc" option this function will attempt to modify
+ * it only if it was initially set to "auto(-1)". Default value for this
+ * modparam varies between platforms and it is hardcoded in driver code.
+ * Any other modparam value is only monitored against availability of the
+ * related hardware or firmware definitions.
+ */
+void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+{
+       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+
+       /* A negative value means "use platform default" */
+       if (i915_modparams.enable_guc < 0)
+               i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+
+       DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
+                        i915_modparams.enable_guc,
+                        yesno(intel_uc_is_using_guc_submission()),
+                        yesno(intel_uc_is_using_huc()));
+
+       /* Verify GuC firmware availability */
+       if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
+               DRM_WARN("Incompatible option detected: enable_guc=%d, %s!\n",
+                        i915_modparams.enable_guc,
+                        !HAS_GUC(dev_priv) ? "no GuC hardware" :
+                                             "no GuC firmware");
        }
 
-       /* Can't enable guc submission without guc loaded */
-       if (!i915_modparams.enable_guc_loading)
-               i915_modparams.enable_guc_submission = 0;
+       /* Verify HuC firmware availability */
+       if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
+               DRM_WARN("Incompatible option detected: enable_guc=%d, %s!\n",
+                        i915_modparams.enable_guc,
+                        !HAS_HUC(dev_priv) ? "no HuC hardware" :
+                                             "no HuC firmware");
+       }
 
-       /* A negative value means "use platform default" */
-       if (i915_modparams.enable_guc_submission < 0)
-               i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+       /* Make sure that sanitization was done */
+       GEM_BUG_ON(i915_modparams.enable_guc < 0);
 }
 
 void intel_uc_init_early(struct drm_i915_private *dev_priv)
 {
        intel_guc_init_early(&dev_priv->guc);
+       intel_huc_init_early(&dev_priv->huc);
 }
 
 void intel_uc_init_fw(struct drm_i915_private *dev_priv)
 {
-       intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+       if (!USES_GUC(dev_priv))
+               return;
+
+       if (USES_HUC(dev_priv))
+               intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+
        intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
 }
 
 void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
 {
+       if (!USES_GUC(dev_priv))
+               return;
+
        intel_uc_fw_fini(&dev_priv->guc.fw);
-       intel_uc_fw_fini(&dev_priv->huc.fw);
+
+       if (USES_HUC(dev_priv))
+               intel_uc_fw_fini(&dev_priv->huc.fw);
 }
 
 /**
@@ -152,18 +190,24 @@ static void guc_disable_communication(struct intel_guc *guc)
 int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 {
        struct intel_guc *guc = &dev_priv->guc;
+       struct intel_huc *huc = &dev_priv->huc;
        int ret, attempts;
 
-       if (!i915_modparams.enable_guc_loading)
+       if (!USES_GUC(dev_priv))
                return 0;
 
+       if (!HAS_GUC(dev_priv)) {
+               ret = -ENODEV;
+               goto err_out;
+       }
+
        guc_disable_communication(guc);
        gen9_reset_guc_interrupts(dev_priv);
 
        /* We need to notify the guc whenever we change the GGTT */
        i915_ggtt_enable_guc(dev_priv);
 
-       if (i915_modparams.enable_guc_submission) {
+       if (USES_GUC_SUBMISSION(dev_priv)) {
                /*
                 * This is stuff we need to have available at fw load time
                 * if we are planning to enable submission later
@@ -194,7 +238,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
                if (ret)
                        goto err_submission;
 
-               intel_huc_init_hw(&dev_priv->huc);
+               if (USES_HUC(dev_priv)) {
+                       ret = intel_huc_init_hw(huc);
+                       if (ret)
+                               goto err_submission;
+               }
+
                intel_guc_init_params(guc);
                ret = intel_guc_fw_upload(guc);
                if (ret == 0 || ret != -EAGAIN)
@@ -212,8 +261,13 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
        if (ret)
                goto err_log_capture;
 
-       intel_huc_auth(&dev_priv->huc);
-       if (i915_modparams.enable_guc_submission) {
+       if (USES_HUC(dev_priv)) {
+               ret = intel_huc_auth(huc);
+               if (ret)
+                       goto err_communication;
+       }
+
+       if (USES_GUC_SUBMISSION(dev_priv)) {
                if (i915_modparams.guc_log_level >= 0)
                        gen9_enable_guc_interrupts(dev_priv);
 
@@ -222,50 +276,38 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
                        goto err_interrupts;
        }
 
-       dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
-                i915_modparams.enable_guc_submission ? "submission enabled" :
-                                                       "loaded",
-                guc->fw.path,
+       dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
                 guc->fw.major_ver_found, guc->fw.minor_ver_found);
+       dev_info(dev_priv->drm.dev, "GuC submission %s\n",
+                enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
+       dev_info(dev_priv->drm.dev, "HuC %s\n",
+                enableddisabled(USES_HUC(dev_priv)));
 
        return 0;
 
        /*
         * We've failed to load the firmware :(
-        *
-        * Decide whether to disable GuC submission and fall back to
-        * execlist mode, and whether to hide the error by returning
-        * zero or to return -EIO, which the caller will treat as a
-        * nonfatal error (i.e. it doesn't prevent driver load, but
-        * marks the GPU as wedged until reset).
         */
 err_interrupts:
-       guc_disable_communication(guc);
        gen9_disable_guc_interrupts(dev_priv);
+err_communication:
+       guc_disable_communication(guc);
 err_log_capture:
        guc_capture_load_err_log(guc);
 err_submission:
-       if (i915_modparams.enable_guc_submission)
+       if (USES_GUC_SUBMISSION(dev_priv))
                intel_guc_submission_fini(guc);
 err_guc:
        i915_ggtt_disable_guc(dev_priv);
+err_out:
+       /*
+        * Note that there is no fallback as either user explicitly asked for
+        * the GuC or driver default option was to run with the GuC enabled.
+        */
+       if (GEM_WARN_ON(ret == -EIO))
+               ret = -EINVAL;
 
-       if (i915_modparams.enable_guc_loading > 1 ||
-           i915_modparams.enable_guc_submission > 1) {
-               DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
-               ret = -EIO;
-       } else {
-               DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
-               ret = 0;
-       }
-
-       if (i915_modparams.enable_guc_submission) {
-               i915_modparams.enable_guc_submission = 0;
-               DRM_NOTE("Falling back from GuC submission to execlist mode\n");
-       }
-
-       i915_modparams.enable_guc_loading = 0;
-
+       dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
        return ret;
 }
 
@@ -275,15 +317,15 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
 
        guc_free_load_err_log(guc);
 
-       if (!i915_modparams.enable_guc_loading)
+       if (!USES_GUC(dev_priv))
                return;
 
-       if (i915_modparams.enable_guc_submission)
+       if (USES_GUC_SUBMISSION(dev_priv))
                intel_guc_submission_disable(guc);
 
        guc_disable_communication(guc);
 
-       if (i915_modparams.enable_guc_submission) {
+       if (USES_GUC_SUBMISSION(dev_priv)) {
                gen9_disable_guc_interrupts(dev_priv);
                intel_guc_submission_fini(guc);
        }
index e18d3bb020887430bbe66960a0a9144be7cea26f..7a59e2486e9e24fe053dda55784040f85b430048 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "intel_guc.h"
 #include "intel_huc.h"
+#include "i915_params.h"
 
 void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
@@ -35,4 +36,22 @@ void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
 int intel_uc_init_hw(struct drm_i915_private *dev_priv);
 void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
 
+static inline bool intel_uc_is_using_guc(void)
+{
+       GEM_BUG_ON(i915_modparams.enable_guc < 0);
+       return i915_modparams.enable_guc > 0;
+}
+
+static inline bool intel_uc_is_using_guc_submission(void)
+{
+       GEM_BUG_ON(i915_modparams.enable_guc < 0);
+       return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+}
+
+static inline bool intel_uc_is_using_huc(void)
+{
+       GEM_BUG_ON(i915_modparams.enable_guc < 0);
+       return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
+}
+
 #endif
index b376dd3b28cc8f03c195d728f33b9071f49f4bf5..784eff9cdfc891242459116bf4b4514868992c78 100644 (file)
@@ -214,7 +214,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
                         intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
 
        if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
-               return -EIO;
+               return -ENOEXEC;
 
        uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
        DRM_DEBUG_DRIVER("%s fw load %s\n",
index 5394d9d1e683e2690aee2baf14a2a2656455acd7..d5fd4609c7859d4968263ab0bbd1a5ec9bcd15c8 100644 (file)
@@ -110,6 +110,11 @@ void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
        uc_fw->type = type;
 }
 
+static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
+{
+       return uc_fw->path != NULL;
+}
+
 void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
                       struct intel_uc_fw *uc_fw);
 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
index 7b23597858bbf55d190d389be9612d35d13a5b2b..68d6a69c738fdc836542f068bbd5eb3fb2c784ea 100644 (file)
@@ -362,7 +362,7 @@ int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
                SUBTEST(igt_guc_doorbells),
        };
 
-       if (!i915_modparams.enable_guc_submission)
+       if (!USES_GUC_SUBMISSION(dev_priv))
                return 0;
 
        return i915_subtests(tests, dev_priv);
index e3301dbd27d48521912015500604077fa6f4fac8..48c0661a826cd5df9e892d0f455556d5c73ac067 100644 (file)
@@ -503,6 +503,68 @@ struct vfio_pci_hot_reset {
 
 #define VFIO_DEVICE_PCI_HOT_RESET      _IO(VFIO_TYPE, VFIO_BASE + 13)
 
+/**
+ * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
+ *                                    struct vfio_device_query_gfx_plane)
+ *
+ * Set the drm_plane_type and flags, then retrieve the gfx plane info.
+ *
+ * flags supported:
+ * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
+ *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
+ *   support for dma-buf.
+ * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
+ *   to ask if the mdev supports region. 0 on support, -EINVAL on no
+ *   support for region.
+ * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
+ *   with each call to query the plane info.
+ * - Others are invalid and return -EINVAL.
+ *
+ * Note:
+ * 1. Plane could be disabled by guest. In that case, success will be
+ *    returned with zero-initialized drm_format, size, width and height
+ *    fields.
+ * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
+ *
+ * Return: 0 on success, -errno on other failure.
+ */
+struct vfio_device_gfx_plane_info {
+       __u32 argsz;
+       __u32 flags;
+#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
+#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
+#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
+       /* in */
+       __u32 drm_plane_type;   /* type of plane: DRM_PLANE_TYPE_* */
+       /* out */
+       __u32 drm_format;       /* drm format of plane */
+       __u64 drm_format_mod;   /* tiled mode */
+       __u32 width;    /* width of plane */
+       __u32 height;   /* height of plane */
+       __u32 stride;   /* stride of plane */
+       __u32 size;     /* size of plane in bytes, align on page*/
+       __u32 x_pos;    /* horizontal position of cursor plane */
+       __u32 y_pos;    /* vertical position of cursor plane*/
+       __u32 x_hot;    /* horizontal position of cursor hotspot */
+       __u32 y_hot;    /* vertical position of cursor hotspot */
+       union {
+               __u32 region_index;     /* region index */
+               __u32 dmabuf_id;        /* dma-buf id */
+       };
+};
+
+#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
+
+/**
+ * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
+ *
+ * Return a new dma-buf file descriptor for an exposed guest framebuffer
+ * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
+ * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
+ */
+
+#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
+
 /* -------- API for Type1 VFIO IOMMU -------- */
 
 /**