drm/i915: Fix fbdev unload sequence
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_drv.c
index 48428672fc6ece0927416d17a8dde8c41f00f500..ffeb3a3bed5867676b65867576fbc39a548befb6 100644 (file)
@@ -132,13 +132,20 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
                DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
        } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
                ret = PCH_CPT;
-               DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
+               DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                ret = PCH_LPT;
+               if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+                       dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+               else
+                       dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
                DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
        } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                ret = PCH_SPT;
                DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
+       } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+               ret = PCH_CNP;
+               DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
        }
 
        return ret;
@@ -170,6 +177,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
        while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
                if (pch->vendor == PCI_VENDOR_ID_INTEL) {
                        unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
                        dev_priv->pch_id = id;
 
                        if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
@@ -179,14 +187,14 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
                        } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_CPT;
                                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
-                               WARN_ON(!(IS_GEN6(dev_priv) ||
-                                       IS_IVYBRIDGE(dev_priv)));
+                               WARN_ON(!IS_GEN6(dev_priv) &&
+                                       !IS_IVYBRIDGE(dev_priv));
                        } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
                                /* PantherPoint is CPT compatible */
                                dev_priv->pch_type = PCH_CPT;
                                DRM_DEBUG_KMS("Found PantherPoint PCH\n");
-                               WARN_ON(!(IS_GEN6(dev_priv) ||
-                                       IS_IVYBRIDGE(dev_priv)));
+                               WARN_ON(!IS_GEN6(dev_priv) &&
+                                       !IS_IVYBRIDGE(dev_priv));
                        } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint PCH\n");
@@ -201,6 +209,22 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
                                        !IS_BROADWELL(dev_priv));
                                WARN_ON(!IS_HSW_ULT(dev_priv) &&
                                        !IS_BDW_ULT(dev_priv));
+                       } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
+                               /* WildcatPoint is LPT compatible */
+                               dev_priv->pch_type = PCH_LPT;
+                               DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+                               WARN_ON(!IS_HASWELL(dev_priv) &&
+                                       !IS_BROADWELL(dev_priv));
+                               WARN_ON(IS_HSW_ULT(dev_priv) ||
+                                       IS_BDW_ULT(dev_priv));
+                       } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
+                               /* WildcatPoint is LPT compatible */
+                               dev_priv->pch_type = PCH_LPT;
+                               DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+                               WARN_ON(!IS_HASWELL(dev_priv) &&
+                                       !IS_BROADWELL(dev_priv));
+                               WARN_ON(!IS_HSW_ULT(dev_priv) &&
+                                       !IS_BDW_ULT(dev_priv));
                        } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_SPT;
                                DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
@@ -216,9 +240,19 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
                                DRM_DEBUG_KMS("Found KabyPoint PCH\n");
                                WARN_ON(!IS_SKYLAKE(dev_priv) &&
                                        !IS_KABYLAKE(dev_priv));
-                       } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
-                                  (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
-                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+                       } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_CNP;
+                               DRM_DEBUG_KMS("Found CannonPoint PCH\n");
+                               WARN_ON(!IS_CANNONLAKE(dev_priv) &&
+                                       !IS_COFFEELAKE(dev_priv));
+                       } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_CNP;
+                               DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
+                               WARN_ON(!IS_CANNONLAKE(dev_priv) &&
+                                       !IS_COFFEELAKE(dev_priv));
+                       } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+                                  id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+                                  (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
                                    pch->subsystem_vendor ==
                                            PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
                                    pch->subsystem_device ==
@@ -306,6 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
                break;
        case I915_PARAM_HAS_GPU_RESET:
                value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+               if (value && intel_has_reset_engine(dev_priv))
+                       value = 2;
                break;
        case I915_PARAM_HAS_RESOURCE_STREAMER:
                value = HAS_RESOURCE_STREAMER(dev_priv);
@@ -350,6 +386,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_EXEC_SOFTPIN:
        case I915_PARAM_HAS_EXEC_ASYNC:
        case I915_PARAM_HAS_EXEC_FENCE:
+       case I915_PARAM_HAS_EXEC_CAPTURE:
+       case I915_PARAM_HAS_EXEC_BATCH_FIRST:
                /* For the time being all of these are always true;
                 * if some supported hardware does not have one of these
                 * features this value needs to be provided from
@@ -357,6 +395,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
                 */
                value = 1;
                break;
+       case I915_PARAM_SLICE_MASK:
+               value = INTEL_INFO(dev_priv)->sseu.slice_mask;
+               if (!value)
+                       return -ENODEV;
+               break;
+       case I915_PARAM_SUBSLICE_MASK:
+               value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+               if (!value)
+                       return -ENODEV;
+               break;
        default:
                DRM_DEBUG("Unknown parameter %d\n", param->param);
                return -EINVAL;
@@ -548,15 +596,19 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
 
 static void i915_gem_fini(struct drm_i915_private *dev_priv)
 {
+       /* Flush any outstanding unpin_work. */
+       i915_gem_drain_workqueue(dev_priv);
+
        mutex_lock(&dev_priv->drm.struct_mutex);
        intel_uc_fini_hw(dev_priv);
        i915_gem_cleanup_engines(dev_priv);
-       i915_gem_context_fini(dev_priv);
+       i915_gem_contexts_fini(dev_priv);
+       i915_gem_cleanup_userptr(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        i915_gem_drain_freed_objects(dev_priv);
 
-       WARN_ON(!list_empty(&dev_priv->context_list));
+       WARN_ON(!list_empty(&dev_priv->contexts.list));
 }
 
 static int i915_load_modeset_init(struct drm_device *dev)
@@ -834,10 +886,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        intel_uc_init_early(dev_priv);
        i915_memcpy_init_early(dev_priv);
 
-       ret = intel_engines_init_early(dev_priv);
-       if (ret)
-               return ret;
-
        ret = i915_workqueues_init(dev_priv);
        if (ret < 0)
                goto err_engines;
@@ -855,7 +903,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        intel_init_audio_hooks(dev_priv);
        ret = i915_gem_load_init(dev_priv);
        if (ret < 0)
-               goto err_workqueues;
+               goto err_irq;
 
        intel_display_crc_init(dev_priv);
 
@@ -867,7 +915,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 
        return 0;
 
-err_workqueues:
+err_irq:
+       intel_irq_fini(dev_priv);
        i915_workqueues_cleanup(dev_priv);
 err_engines:
        i915_engines_cleanup(dev_priv);
@@ -882,6 +931,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 {
        i915_perf_fini(dev_priv);
        i915_gem_load_cleanup(dev_priv);
+       intel_irq_fini(dev_priv);
        i915_workqueues_cleanup(dev_priv);
        i915_engines_cleanup(dev_priv);
 }
@@ -947,14 +997,21 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 
        ret = i915_mmio_setup(dev_priv);
        if (ret < 0)
-               goto put_bridge;
+               goto err_bridge;
 
        intel_uncore_init(dev_priv);
+
+       ret = intel_engines_init_mmio(dev_priv);
+       if (ret)
+               goto err_uncore;
+
        i915_gem_init_mmio(dev_priv);
 
        return 0;
 
-put_bridge:
+err_uncore:
+       intel_uncore_fini(dev_priv);
+err_bridge:
        pci_dev_put(dev_priv->bridge_dev);
 
        return ret;
@@ -991,6 +1048,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
        DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
 
        intel_uc_sanitize_options(dev_priv);
+
+       intel_gvt_sanitize_options(dev_priv);
 }
 
 /**
@@ -1087,10 +1146,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
         * and the registers being closely associated.
         *
         * According to chipset errata, on the 965GM, MSI interrupts may
-        * be lost or delayed, but we use them anyways to avoid
-        * stuck interrupts on some machines.
+        * be lost or delayed, and was defeatured. MSI interrupts seem to
+        * get lost on g4x as well, and interrupt delivery seems to stay
+        * properly dead afterwards. So we'll just disable them for all
+        * pre-gen5 chipsets.
         */
-       if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 5) {
                if (pci_enable_msi(pdev) < 0)
                        DRM_DEBUG_DRIVER("can't enable MSI");
        }
@@ -1180,6 +1241,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 {
+       intel_fbdev_unregister(dev_priv);
        intel_audio_deinit(dev_priv);
 
        intel_gpu_ips_teardown();
@@ -1213,9 +1275,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct drm_i915_private *dev_priv;
        int ret;
 
-       /* Enable nuclear pageflip on ILK+, except vlv/chv */
-       if (!i915.nuclear_pageflip &&
-           (match_info->gen < 5 || match_info->has_gmch_display))
+       /* Enable nuclear pageflip on ILK+ */
+       if (!i915.nuclear_pageflip && match_info->gen < 5)
                driver.driver_features &= ~DRIVER_ATOMIC;
 
        ret = -ENOMEM;
@@ -1314,8 +1375,6 @@ void i915_driver_unload(struct drm_device *dev)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = dev_priv->drm.pdev;
 
-       intel_fbdev_fini(dev);
-
        if (i915_gem_suspend(dev_priv))
                DRM_ERROR("failed to idle hardware; continuing to unload!\n");
 
@@ -1354,9 +1413,6 @@ void i915_driver_unload(struct drm_device *dev)
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        i915_reset_error_state(dev_priv);
 
-       /* Flush any outstanding unpin_work. */
-       drain_workqueue(dev_priv->wq);
-
        i915_gem_fini(dev_priv);
        intel_uc_fini_fw(dev_priv);
        intel_fbc_cleanup_cfb(dev_priv);
@@ -1381,9 +1437,10 @@ static void i915_driver_release(struct drm_device *dev)
 
 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
+       struct drm_i915_private *i915 = to_i915(dev);
        int ret;
 
-       ret = i915_gem_open(dev, file);
+       ret = i915_gem_open(i915, file);
        if (ret)
                return ret;
 
@@ -1413,7 +1470,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_context_close(dev, file);
+       i915_gem_context_close(file);
        i915_gem_release(dev, file);
        mutex_unlock(&dev->struct_mutex);
 
@@ -1865,9 +1922,72 @@ wakeup:
 
 error:
        i915_gem_set_wedged(dev_priv);
+       i915_gem_retire_requests(dev_priv);
        goto finish;
 }
 
+/**
+ * i915_reset_engine - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ *  - identifies the request that caused the hang and it is dropped
+ *  - reset engine (which will force the engine to idle)
+ *  - re-init/configure engine
+ */
+int i915_reset_engine(struct intel_engine_cs *engine)
+{
+       struct i915_gpu_error *error = &engine->i915->gpu_error;
+       struct drm_i915_gem_request *active_request;
+       int ret;
+
+       GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+
+       DRM_DEBUG_DRIVER("resetting %s\n", engine->name);
+
+       active_request = i915_gem_reset_prepare_engine(engine);
+       if (IS_ERR(active_request)) {
+               DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
+               ret = PTR_ERR(active_request);
+               goto out;
+       }
+
+       /*
+        * The request that caused the hang is stuck on elsp, we know the
+        * active request and can drop it, adjust head to skip the offending
+        * request to resume executing remaining requests in the queue.
+        */
+       i915_gem_reset_engine(engine, active_request);
+
+       /* Finally, reset just this engine. */
+       ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
+
+       i915_gem_reset_finish_engine(engine);
+
+       if (ret) {
+               /* If we fail here, we expect to fallback to a global reset */
+               DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
+                                engine->name, ret);
+               goto out;
+       }
+
+       /*
+        * The engine and its registers (and workarounds in case of render)
+        * have been reset to their default values. Follow the init_ring
+        * process to program RING_MODE, HWSP and re-enable submission.
+        */
+       ret = engine->init_hw(engine);
+       if (ret)
+               goto out;
+
+       error->reset_engine_count[engine->id]++;
+out:
+       return ret;
+}
+
 static int i915_pm_suspend(struct device *kdev)
 {
        struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2454,9 +2574,6 @@ static int intel_runtime_resume(struct device *kdev)
 
        intel_guc_resume(dev_priv);
 
-       if (IS_GEN6(dev_priv))
-               intel_init_pch_refclk(dev_priv);
-
        if (IS_GEN9_LP(dev_priv)) {
                bxt_disable_dc9(dev_priv);
                bxt_display_core_init(dev_priv, true);