drm/i915: Fix locking around GuC firmware load
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_dma.c
index c0695504dd8fa35e9bd793d31f85c9404b0f2db7..b4741d121a744f67b95f1d4af95fd0518e1ddf4f 100644 (file)
@@ -75,7 +75,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = 1;
                break;
        case I915_PARAM_NUM_FENCES_AVAIL:
-               value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+               value = dev_priv->num_fence_regs;
                break;
        case I915_PARAM_HAS_OVERLAY:
                value = dev_priv->overlay ? 1 : 0;
@@ -183,35 +183,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
        return 0;
 }
 
-static int i915_setparam(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_setparam_t *param = data;
-
-       switch (param->param) {
-       case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-       case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
-       case I915_SETPARAM_ALLOW_BATCHBUFFER:
-               /* Reject all old ums/dri params. */
-               return -ENODEV;
-
-       case I915_SETPARAM_NUM_USED_FENCES:
-               if (param->value > dev_priv->num_fence_regs ||
-                   param->value < 0)
-                       return -EINVAL;
-               /* Userspace can use first N regs */
-               dev_priv->fence_reg_start = param->value;
-               break;
-       default:
-               DRM_DEBUG_DRIVER("unknown parameter %d\n",
-                                       param->param);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int i915_get_bridge_dev(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -435,10 +406,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
 
-       /* intel_guc_ucode_init() needs the mutex to allocate GEM objects */
-       mutex_lock(&dev->struct_mutex);
        intel_guc_ucode_init(dev);
-       mutex_unlock(&dev->struct_mutex);
 
        ret = i915_gem_init(dev);
        if (ret)
@@ -481,9 +449,7 @@ cleanup_gem:
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
 cleanup_irq:
-       mutex_lock(&dev->struct_mutex);
        intel_guc_ucode_fini(dev);
-       mutex_unlock(&dev->struct_mutex);
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
        i915_gem_cleanup_stolen(dev);
@@ -631,17 +597,6 @@ static void gen9_sseu_info_init(struct drm_device *dev)
        u32 fuse2, s_enable, ss_disable, eu_disable;
        u8 eu_mask = 0xff;
 
-       /*
-        * BXT has a single slice. BXT also has at most 6 EU per subslice,
-        * and therefore only the lowest 6 bits of the 8-bit EU disable
-        * fields are valid.
-       */
-       if (IS_BROXTON(dev)) {
-               s_max = 1;
-               eu_max = 6;
-               eu_mask = 0x3f;
-       }
-
        info = (struct intel_device_info *)&dev_priv->info;
        fuse2 = I915_READ(GEN8_FUSE2);
        s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
@@ -713,6 +668,82 @@ static void gen9_sseu_info_init(struct drm_device *dev)
        info->has_eu_pg = (info->eu_per_subslice > 2);
 }
 
+static void broadwell_sseu_info_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_device_info *info;
+       const int s_max = 3, ss_max = 3, eu_max = 8;
+       int s, ss;
+       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+       fuse2 = I915_READ(GEN8_FUSE2);
+       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+
+       eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
+       eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
+                        (32 - GEN8_EU_DIS0_S1_SHIFT));
+       eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
+                        (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+
+       info = (struct intel_device_info *)&dev_priv->info;
+       info->slice_total = hweight32(s_enable);
+
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       info->subslice_per_slice = ss_max - hweight32(ss_disable);
+       info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+        */
+       for (s = 0; s < s_max; s++) {
+               if (!(s_enable & (0x1 << s)))
+                       /* skip disabled slice */
+                       continue;
+
+               for (ss = 0; ss < ss_max; ss++) {
+                       u32 n_disabled;
+
+                       if (ss_disable & (0x1 << ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+
+                       /*
+                        * Record which subslices have 7 EUs.
+                        */
+                       if (eu_max - n_disabled == 7)
+                               info->subslice_7eu[s] |= 1 << ss;
+
+                       info->eu_total += eu_max - n_disabled;
+               }
+       }
+
+       /*
+        * BDW is expected to always have a uniform distribution of EU across
+        * subslices with the exception that any one EU in any one subslice may
+        * be fused off for die recovery.
+        */
+       info->eu_per_subslice = info->subslice_total ?
+               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+
+       /*
+        * BDW supports slice power gating on devices with more than
+        * one slice.
+        */
+       info->has_slice_pg = (info->slice_total > 1);
+       info->has_subslice_pg = 0;
+       info->has_eu_pg = 0;
+}
+
 /*
  * Determine various intel_device_info fields at runtime.
  *
@@ -783,6 +814,8 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
        /* Initialize slice/subslice/EU info */
        if (IS_CHERRYVIEW(dev))
                cherryview_sseu_info_init(dev);
+       else if (IS_BROADWELL(dev))
+               broadwell_sseu_info_init(dev);
        else if (INTEL_INFO(dev)->gen >= 9)
                gen9_sseu_info_init(dev);
 
@@ -858,6 +891,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        mutex_init(&dev_priv->sb_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
        mutex_init(&dev_priv->csr_lock);
+       mutex_init(&dev_priv->av_mutex);
 
        intel_pm_setup(dev);
 
@@ -1085,12 +1119,9 @@ out_freecsr:
 put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
 free_priv:
-       if (dev_priv->requests)
-               kmem_cache_destroy(dev_priv->requests);
-       if (dev_priv->vmas)
-               kmem_cache_destroy(dev_priv->vmas);
-       if (dev_priv->objects)
-               kmem_cache_destroy(dev_priv->objects);
+       kmem_cache_destroy(dev_priv->requests);
+       kmem_cache_destroy(dev_priv->vmas);
+       kmem_cache_destroy(dev_priv->objects);
        kfree(dev_priv);
        return ret;
 }
@@ -1157,8 +1188,8 @@ int i915_driver_unload(struct drm_device *dev)
        /* Flush any outstanding unpin_work. */
        flush_workqueue(dev_priv->wq);
 
-       mutex_lock(&dev->struct_mutex);
        intel_guc_ucode_fini(dev);
+       mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
@@ -1181,13 +1212,9 @@ int i915_driver_unload(struct drm_device *dev)
        if (dev_priv->regs != NULL)
                pci_iounmap(dev->pdev, dev_priv->regs);
 
-       if (dev_priv->requests)
-               kmem_cache_destroy(dev_priv->requests);
-       if (dev_priv->vmas)
-               kmem_cache_destroy(dev_priv->vmas);
-       if (dev_priv->objects)
-               kmem_cache_destroy(dev_priv->objects);
-
+       kmem_cache_destroy(dev_priv->requests);
+       kmem_cache_destroy(dev_priv->vmas);
+       kmem_cache_destroy(dev_priv->objects);
        pci_dev_put(dev_priv->bridge_dev);
        kfree(dev_priv);
 
@@ -1257,7 +1284,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -1267,41 +1294,41 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
 };
 
 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);