drm/i915/bdw: Check for slice, subslice and EU count for BDW
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/async.h>
32 #include <drm/drmP.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_fb_helper.h>
35 #include <drm/drm_legacy.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_vgpu.h"
40 #include "i915_trace.h"
41 #include <linux/pci.h>
42 #include <linux/console.h>
43 #include <linux/vt.h>
44 #include <linux/vgaarb.h>
45 #include <linux/acpi.h>
46 #include <linux/pnp.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/slab.h>
49 #include <acpi/video.h>
50 #include <linux/pm.h>
51 #include <linux/pm_runtime.h>
52 #include <linux/oom.h>
53
54
55 static int i915_getparam(struct drm_device *dev, void *data,
56                          struct drm_file *file_priv)
57 {
58         struct drm_i915_private *dev_priv = dev->dev_private;
59         drm_i915_getparam_t *param = data;
60         int value;
61
62         switch (param->param) {
63         case I915_PARAM_IRQ_ACTIVE:
64         case I915_PARAM_ALLOW_BATCHBUFFER:
65         case I915_PARAM_LAST_DISPATCH:
66                 /* Reject all old ums/dri params. */
67                 return -ENODEV;
68         case I915_PARAM_CHIPSET_ID:
69                 value = dev->pdev->device;
70                 break;
71         case I915_PARAM_REVISION:
72                 value = dev->pdev->revision;
73                 break;
74         case I915_PARAM_HAS_GEM:
75                 value = 1;
76                 break;
77         case I915_PARAM_NUM_FENCES_AVAIL:
78                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
79                 break;
80         case I915_PARAM_HAS_OVERLAY:
81                 value = dev_priv->overlay ? 1 : 0;
82                 break;
83         case I915_PARAM_HAS_PAGEFLIPPING:
84                 value = 1;
85                 break;
86         case I915_PARAM_HAS_EXECBUF2:
87                 /* depends on GEM */
88                 value = 1;
89                 break;
90         case I915_PARAM_HAS_BSD:
91                 value = intel_ring_initialized(&dev_priv->ring[VCS]);
92                 break;
93         case I915_PARAM_HAS_BLT:
94                 value = intel_ring_initialized(&dev_priv->ring[BCS]);
95                 break;
96         case I915_PARAM_HAS_VEBOX:
97                 value = intel_ring_initialized(&dev_priv->ring[VECS]);
98                 break;
99         case I915_PARAM_HAS_BSD2:
100                 value = intel_ring_initialized(&dev_priv->ring[VCS2]);
101                 break;
102         case I915_PARAM_HAS_RELAXED_FENCING:
103                 value = 1;
104                 break;
105         case I915_PARAM_HAS_COHERENT_RINGS:
106                 value = 1;
107                 break;
108         case I915_PARAM_HAS_EXEC_CONSTANTS:
109                 value = INTEL_INFO(dev)->gen >= 4;
110                 break;
111         case I915_PARAM_HAS_RELAXED_DELTA:
112                 value = 1;
113                 break;
114         case I915_PARAM_HAS_GEN7_SOL_RESET:
115                 value = 1;
116                 break;
117         case I915_PARAM_HAS_LLC:
118                 value = HAS_LLC(dev);
119                 break;
120         case I915_PARAM_HAS_WT:
121                 value = HAS_WT(dev);
122                 break;
123         case I915_PARAM_HAS_ALIASING_PPGTT:
124                 value = USES_PPGTT(dev);
125                 break;
126         case I915_PARAM_HAS_WAIT_TIMEOUT:
127                 value = 1;
128                 break;
129         case I915_PARAM_HAS_SEMAPHORES:
130                 value = i915_semaphore_is_enabled(dev);
131                 break;
132         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
133                 value = 1;
134                 break;
135         case I915_PARAM_HAS_SECURE_BATCHES:
136                 value = capable(CAP_SYS_ADMIN);
137                 break;
138         case I915_PARAM_HAS_PINNED_BATCHES:
139                 value = 1;
140                 break;
141         case I915_PARAM_HAS_EXEC_NO_RELOC:
142                 value = 1;
143                 break;
144         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
145                 value = 1;
146                 break;
147         case I915_PARAM_CMD_PARSER_VERSION:
148                 value = i915_cmd_parser_get_version();
149                 break;
150         case I915_PARAM_HAS_COHERENT_PHYS_GTT:
151                 value = 1;
152                 break;
153         case I915_PARAM_MMAP_VERSION:
154                 value = 1;
155                 break;
156         case I915_PARAM_SUBSLICE_TOTAL:
157                 value = INTEL_INFO(dev)->subslice_total;
158                 if (!value)
159                         return -ENODEV;
160                 break;
161         case I915_PARAM_EU_TOTAL:
162                 value = INTEL_INFO(dev)->eu_total;
163                 if (!value)
164                         return -ENODEV;
165                 break;
166         case I915_PARAM_HAS_GPU_RESET:
167                 value = i915.enable_hangcheck &&
168                         intel_has_gpu_reset(dev);
169                 break;
170         case I915_PARAM_HAS_RESOURCE_STREAMER:
171                 value = HAS_RESOURCE_STREAMER(dev);
172                 break;
173         default:
174                 DRM_DEBUG("Unknown parameter %d\n", param->param);
175                 return -EINVAL;
176         }
177
178         if (copy_to_user(param->value, &value, sizeof(int))) {
179                 DRM_ERROR("copy_to_user failed\n");
180                 return -EFAULT;
181         }
182
183         return 0;
184 }
185
186 static int i915_setparam(struct drm_device *dev, void *data,
187                          struct drm_file *file_priv)
188 {
189         struct drm_i915_private *dev_priv = dev->dev_private;
190         drm_i915_setparam_t *param = data;
191
192         switch (param->param) {
193         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
194         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
195         case I915_SETPARAM_ALLOW_BATCHBUFFER:
196                 /* Reject all old ums/dri params. */
197                 return -ENODEV;
198
199         case I915_SETPARAM_NUM_USED_FENCES:
200                 if (param->value > dev_priv->num_fence_regs ||
201                     param->value < 0)
202                         return -EINVAL;
203                 /* Userspace can use first N regs */
204                 dev_priv->fence_reg_start = param->value;
205                 break;
206         default:
207                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
208                                         param->param);
209                 return -EINVAL;
210         }
211
212         return 0;
213 }
214
215 static int i915_get_bridge_dev(struct drm_device *dev)
216 {
217         struct drm_i915_private *dev_priv = dev->dev_private;
218
219         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
220         if (!dev_priv->bridge_dev) {
221                 DRM_ERROR("bridge device not found\n");
222                 return -1;
223         }
224         return 0;
225 }
226
227 #define MCHBAR_I915 0x44
228 #define MCHBAR_I965 0x48
229 #define MCHBAR_SIZE (4*4096)
230
231 #define DEVEN_REG 0x54
232 #define   DEVEN_MCHBAR_EN (1 << 28)
233
234 /* Allocate space for the MCH regs if needed, return nonzero on error */
235 static int
236 intel_alloc_mchbar_resource(struct drm_device *dev)
237 {
238         struct drm_i915_private *dev_priv = dev->dev_private;
239         int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
240         u32 temp_lo, temp_hi = 0;
241         u64 mchbar_addr;
242         int ret;
243
244         if (INTEL_INFO(dev)->gen >= 4)
245                 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
246         pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
247         mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
248
249         /* If ACPI doesn't have it, assume we need to allocate it ourselves */
250 #ifdef CONFIG_PNP
251         if (mchbar_addr &&
252             pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
253                 return 0;
254 #endif
255
256         /* Get some space for it */
257         dev_priv->mch_res.name = "i915 MCHBAR";
258         dev_priv->mch_res.flags = IORESOURCE_MEM;
259         ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
260                                      &dev_priv->mch_res,
261                                      MCHBAR_SIZE, MCHBAR_SIZE,
262                                      PCIBIOS_MIN_MEM,
263                                      0, pcibios_align_resource,
264                                      dev_priv->bridge_dev);
265         if (ret) {
266                 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
267                 dev_priv->mch_res.start = 0;
268                 return ret;
269         }
270
271         if (INTEL_INFO(dev)->gen >= 4)
272                 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
273                                        upper_32_bits(dev_priv->mch_res.start));
274
275         pci_write_config_dword(dev_priv->bridge_dev, reg,
276                                lower_32_bits(dev_priv->mch_res.start));
277         return 0;
278 }
279
280 /* Setup MCHBAR if possible, return true if we should disable it again */
281 static void
282 intel_setup_mchbar(struct drm_device *dev)
283 {
284         struct drm_i915_private *dev_priv = dev->dev_private;
285         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
286         u32 temp;
287         bool enabled;
288
289         if (IS_VALLEYVIEW(dev))
290                 return;
291
292         dev_priv->mchbar_need_disable = false;
293
294         if (IS_I915G(dev) || IS_I915GM(dev)) {
295                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
296                 enabled = !!(temp & DEVEN_MCHBAR_EN);
297         } else {
298                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
299                 enabled = temp & 1;
300         }
301
302         /* If it's already enabled, don't have to do anything */
303         if (enabled)
304                 return;
305
306         if (intel_alloc_mchbar_resource(dev))
307                 return;
308
309         dev_priv->mchbar_need_disable = true;
310
311         /* Space is allocated or reserved, so enable it. */
312         if (IS_I915G(dev) || IS_I915GM(dev)) {
313                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
314                                        temp | DEVEN_MCHBAR_EN);
315         } else {
316                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
317                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
318         }
319 }
320
321 static void
322 intel_teardown_mchbar(struct drm_device *dev)
323 {
324         struct drm_i915_private *dev_priv = dev->dev_private;
325         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
326         u32 temp;
327
328         if (dev_priv->mchbar_need_disable) {
329                 if (IS_I915G(dev) || IS_I915GM(dev)) {
330                         pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
331                         temp &= ~DEVEN_MCHBAR_EN;
332                         pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
333                 } else {
334                         pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
335                         temp &= ~1;
336                         pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
337                 }
338         }
339
340         if (dev_priv->mch_res.start)
341                 release_resource(&dev_priv->mch_res);
342 }
343
344 /* true = enable decode, false = disable decoder */
345 static unsigned int i915_vga_set_decode(void *cookie, bool state)
346 {
347         struct drm_device *dev = cookie;
348
349         intel_modeset_vga_set_state(dev, state);
350         if (state)
351                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
352                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
353         else
354                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
355 }
356
357 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
358 {
359         struct drm_device *dev = pci_get_drvdata(pdev);
360         pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
361
362         if (state == VGA_SWITCHEROO_ON) {
363                 pr_info("switched on\n");
364                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
365                 /* i915 resume handler doesn't set to D0 */
366                 pci_set_power_state(dev->pdev, PCI_D0);
367                 i915_resume_switcheroo(dev);
368                 dev->switch_power_state = DRM_SWITCH_POWER_ON;
369         } else {
370                 pr_err("switched off\n");
371                 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
372                 i915_suspend_switcheroo(dev, pmm);
373                 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
374         }
375 }
376
377 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
378 {
379         struct drm_device *dev = pci_get_drvdata(pdev);
380
381         /*
382          * FIXME: open_count is protected by drm_global_mutex but that would lead to
383          * locking inversion with the driver load path. And the access here is
384          * completely racy anyway. So don't bother with locking for now.
385          */
386         return dev->open_count == 0;
387 }
388
389 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
390         .set_gpu_state = i915_switcheroo_set_state,
391         .reprobe = NULL,
392         .can_switch = i915_switcheroo_can_switch,
393 };
394
395 static int i915_load_modeset_init(struct drm_device *dev)
396 {
397         struct drm_i915_private *dev_priv = dev->dev_private;
398         int ret;
399
400         ret = intel_parse_bios(dev);
401         if (ret)
402                 DRM_INFO("failed to find VBIOS tables\n");
403
404         /* If we have > 1 VGA cards, then we need to arbitrate access
405          * to the common VGA resources.
406          *
407          * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
408          * then we do not take part in VGA arbitration and the
409          * vga_client_register() fails with -ENODEV.
410          */
411         ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
412         if (ret && ret != -ENODEV)
413                 goto out;
414
415         intel_register_dsm_handler();
416
417         ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
418         if (ret)
419                 goto cleanup_vga_client;
420
421         /* Initialise stolen first so that we may reserve preallocated
422          * objects for the BIOS to KMS transition.
423          */
424         ret = i915_gem_init_stolen(dev);
425         if (ret)
426                 goto cleanup_vga_switcheroo;
427
428         intel_power_domains_init_hw(dev_priv);
429
430         ret = intel_irq_install(dev_priv);
431         if (ret)
432                 goto cleanup_gem_stolen;
433
434         /* Important: The output setup functions called by modeset_init need
435          * working irqs for e.g. gmbus and dp aux transfers. */
436         intel_modeset_init(dev);
437
438         /* intel_guc_ucode_init() needs the mutex to allocate GEM objects */
439         mutex_lock(&dev->struct_mutex);
440         intel_guc_ucode_init(dev);
441         mutex_unlock(&dev->struct_mutex);
442
443         ret = i915_gem_init(dev);
444         if (ret)
445                 goto cleanup_irq;
446
447         intel_modeset_gem_init(dev);
448
449         /* Always safe in the mode setting case. */
450         /* FIXME: do pre/post-mode set stuff in core KMS code */
451         dev->vblank_disable_allowed = true;
452         if (INTEL_INFO(dev)->num_pipes == 0)
453                 return 0;
454
455         ret = intel_fbdev_init(dev);
456         if (ret)
457                 goto cleanup_gem;
458
459         /* Only enable hotplug handling once the fbdev is fully set up. */
460         intel_hpd_init(dev_priv);
461
462         /*
463          * Some ports require correctly set-up hpd registers for detection to
464          * work properly (leading to ghost connected connector status), e.g. VGA
465          * on gm45.  Hence we can only set up the initial fbdev config after hpd
466          * irqs are fully enabled. Now we should scan for the initial config
467          * only once hotplug handling is enabled, but due to screwed-up locking
468          * around kms/fbdev init we can't protect the fdbev initial config
469          * scanning against hotplug events. Hence do this first and ignore the
470          * tiny window where we will loose hotplug notifactions.
471          */
472         async_schedule(intel_fbdev_initial_config, dev_priv);
473
474         drm_kms_helper_poll_init(dev);
475
476         return 0;
477
478 cleanup_gem:
479         mutex_lock(&dev->struct_mutex);
480         i915_gem_cleanup_ringbuffer(dev);
481         i915_gem_context_fini(dev);
482         mutex_unlock(&dev->struct_mutex);
483 cleanup_irq:
484         mutex_lock(&dev->struct_mutex);
485         intel_guc_ucode_fini(dev);
486         mutex_unlock(&dev->struct_mutex);
487         drm_irq_uninstall(dev);
488 cleanup_gem_stolen:
489         i915_gem_cleanup_stolen(dev);
490 cleanup_vga_switcheroo:
491         vga_switcheroo_unregister_client(dev->pdev);
492 cleanup_vga_client:
493         vga_client_register(dev->pdev, NULL, NULL, NULL);
494 out:
495         return ret;
496 }
497
498 #if IS_ENABLED(CONFIG_FB)
499 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
500 {
501         struct apertures_struct *ap;
502         struct pci_dev *pdev = dev_priv->dev->pdev;
503         bool primary;
504         int ret;
505
506         ap = alloc_apertures(1);
507         if (!ap)
508                 return -ENOMEM;
509
510         ap->ranges[0].base = dev_priv->gtt.mappable_base;
511         ap->ranges[0].size = dev_priv->gtt.mappable_end;
512
513         primary =
514                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
515
516         ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
517
518         kfree(ap);
519
520         return ret;
521 }
522 #else
523 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
524 {
525         return 0;
526 }
527 #endif
528
529 #if !defined(CONFIG_VGA_CONSOLE)
530 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
531 {
532         return 0;
533 }
534 #elif !defined(CONFIG_DUMMY_CONSOLE)
535 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
536 {
537         return -ENODEV;
538 }
539 #else
540 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
541 {
542         int ret = 0;
543
544         DRM_INFO("Replacing VGA console driver\n");
545
546         console_lock();
547         if (con_is_bound(&vga_con))
548                 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
549         if (ret == 0) {
550                 ret = do_unregister_con_driver(&vga_con);
551
552                 /* Ignore "already unregistered". */
553                 if (ret == -ENODEV)
554                         ret = 0;
555         }
556         console_unlock();
557
558         return ret;
559 }
560 #endif
561
562 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
563 {
564         const struct intel_device_info *info = &dev_priv->info;
565
566 #define PRINT_S(name) "%s"
567 #define SEP_EMPTY
568 #define PRINT_FLAG(name) info->name ? #name "," : ""
569 #define SEP_COMMA ,
570         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
571                          DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
572                          info->gen,
573                          dev_priv->dev->pdev->device,
574                          dev_priv->dev->pdev->revision,
575                          DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
576 #undef PRINT_S
577 #undef SEP_EMPTY
578 #undef PRINT_FLAG
579 #undef SEP_COMMA
580 }
581
582 static void cherryview_sseu_info_init(struct drm_device *dev)
583 {
584         struct drm_i915_private *dev_priv = dev->dev_private;
585         struct intel_device_info *info;
586         u32 fuse, eu_dis;
587
588         info = (struct intel_device_info *)&dev_priv->info;
589         fuse = I915_READ(CHV_FUSE_GT);
590
591         info->slice_total = 1;
592
593         if (!(fuse & CHV_FGT_DISABLE_SS0)) {
594                 info->subslice_per_slice++;
595                 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
596                                  CHV_FGT_EU_DIS_SS0_R1_MASK);
597                 info->eu_total += 8 - hweight32(eu_dis);
598         }
599
600         if (!(fuse & CHV_FGT_DISABLE_SS1)) {
601                 info->subslice_per_slice++;
602                 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
603                                  CHV_FGT_EU_DIS_SS1_R1_MASK);
604                 info->eu_total += 8 - hweight32(eu_dis);
605         }
606
607         info->subslice_total = info->subslice_per_slice;
608         /*
609          * CHV expected to always have a uniform distribution of EU
610          * across subslices.
611         */
612         info->eu_per_subslice = info->subslice_total ?
613                                 info->eu_total / info->subslice_total :
614                                 0;
615         /*
616          * CHV supports subslice power gating on devices with more than
617          * one subslice, and supports EU power gating on devices with
618          * more than one EU pair per subslice.
619         */
620         info->has_slice_pg = 0;
621         info->has_subslice_pg = (info->subslice_total > 1);
622         info->has_eu_pg = (info->eu_per_subslice > 2);
623 }
624
625 static void gen9_sseu_info_init(struct drm_device *dev)
626 {
627         struct drm_i915_private *dev_priv = dev->dev_private;
628         struct intel_device_info *info;
629         int s_max = 3, ss_max = 4, eu_max = 8;
630         int s, ss;
631         u32 fuse2, s_enable, ss_disable, eu_disable;
632         u8 eu_mask = 0xff;
633
634         info = (struct intel_device_info *)&dev_priv->info;
635         fuse2 = I915_READ(GEN8_FUSE2);
636         s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
637                    GEN8_F2_S_ENA_SHIFT;
638         ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
639                      GEN9_F2_SS_DIS_SHIFT;
640
641         info->slice_total = hweight32(s_enable);
642         /*
643          * The subslice disable field is global, i.e. it applies
644          * to each of the enabled slices.
645         */
646         info->subslice_per_slice = ss_max - hweight32(ss_disable);
647         info->subslice_total = info->slice_total *
648                                info->subslice_per_slice;
649
650         /*
651          * Iterate through enabled slices and subslices to
652          * count the total enabled EU.
653         */
654         for (s = 0; s < s_max; s++) {
655                 if (!(s_enable & (0x1 << s)))
656                         /* skip disabled slice */
657                         continue;
658
659                 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
660                 for (ss = 0; ss < ss_max; ss++) {
661                         int eu_per_ss;
662
663                         if (ss_disable & (0x1 << ss))
664                                 /* skip disabled subslice */
665                                 continue;
666
667                         eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
668                                                       eu_mask);
669
670                         /*
671                          * Record which subslice(s) has(have) 7 EUs. we
672                          * can tune the hash used to spread work among
673                          * subslices if they are unbalanced.
674                          */
675                         if (eu_per_ss == 7)
676                                 info->subslice_7eu[s] |= 1 << ss;
677
678                         info->eu_total += eu_per_ss;
679                 }
680         }
681
682         /*
683          * SKL is expected to always have a uniform distribution
684          * of EU across subslices with the exception that any one
685          * EU in any one subslice may be fused off for die
686          * recovery. BXT is expected to be perfectly uniform in EU
687          * distribution.
688         */
689         info->eu_per_subslice = info->subslice_total ?
690                                 DIV_ROUND_UP(info->eu_total,
691                                              info->subslice_total) : 0;
692         /*
693          * SKL supports slice power gating on devices with more than
694          * one slice, and supports EU power gating on devices with
695          * more than one EU pair per subslice. BXT supports subslice
696          * power gating on devices with more than one subslice, and
697          * supports EU power gating on devices with more than one EU
698          * pair per subslice.
699         */
700         info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
701         info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
702         info->has_eu_pg = (info->eu_per_subslice > 2);
703 }
704
705 static void broadwell_sseu_info_init(struct drm_device *dev)
706 {
707         struct drm_i915_private *dev_priv = dev->dev_private;
708         struct intel_device_info *info;
709         const int s_max = 3, ss_max = 3, eu_max = 8;
710         int s, ss;
711         u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
712
713         fuse2 = I915_READ(GEN8_FUSE2);
714         s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
715         ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
716
717         eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
718         eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
719                         ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
720                          (32 - GEN8_EU_DIS0_S1_SHIFT));
721         eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
722                         ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
723                          (32 - GEN8_EU_DIS1_S2_SHIFT));
724
725
726         info = (struct intel_device_info *)&dev_priv->info;
727         info->slice_total = hweight32(s_enable);
728
729         /*
730          * The subslice disable field is global, i.e. it applies
731          * to each of the enabled slices.
732          */
733         info->subslice_per_slice = ss_max - hweight32(ss_disable);
734         info->subslice_total = info->slice_total * info->subslice_per_slice;
735
736         /*
737          * Iterate through enabled slices and subslices to
738          * count the total enabled EU.
739          */
740         for (s = 0; s < s_max; s++) {
741                 if (!(s_enable & (0x1 << s)))
742                         /* skip disabled slice */
743                         continue;
744
745                 for (ss = 0; ss < ss_max; ss++) {
746                         u32 n_disabled;
747
748                         if (ss_disable & (0x1 << ss))
749                                 /* skip disabled subslice */
750                                 continue;
751
752                         n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
753
754                         /*
755                          * Record which subslices have 7 EUs.
756                          */
757                         if (eu_max - n_disabled == 7)
758                                 info->subslice_7eu[s] |= 1 << ss;
759
760                         info->eu_total += eu_max - n_disabled;
761                 }
762         }
763
764         /*
765          * BDW is expected to always have a uniform distribution of EU across
766          * subslices with the exception that any one EU in any one subslice may
767          * be fused off for die recovery.
768          */
769         info->eu_per_subslice = info->subslice_total ?
770                 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
771
772         /*
773          * BDW supports slice power gating on devices with more than
774          * one slice.
775          */
776         info->has_slice_pg = (info->slice_total > 1);
777         info->has_subslice_pg = 0;
778         info->has_eu_pg = 0;
779 }
780
781 /*
782  * Determine various intel_device_info fields at runtime.
783  *
784  * Use it when either:
785  *   - it's judged too laborious to fill n static structures with the limit
786  *     when a simple if statement does the job,
787  *   - run-time checks (eg read fuse/strap registers) are needed.
788  *
789  * This function needs to be called:
790  *   - after the MMIO has been setup as we are reading registers,
791  *   - after the PCH has been detected,
792  *   - before the first usage of the fields it can tweak.
793  */
794 static void intel_device_info_runtime_init(struct drm_device *dev)
795 {
796         struct drm_i915_private *dev_priv = dev->dev_private;
797         struct intel_device_info *info;
798         enum pipe pipe;
799
800         info = (struct intel_device_info *)&dev_priv->info;
801
802         /*
803          * Skylake and Broxton currently don't expose the topmost plane as its
804          * use is exclusive with the legacy cursor and we only want to expose
805          * one of those, not both. Until we can safely expose the topmost plane
806          * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
807          * we don't expose the topmost plane at all to prevent ABI breakage
808          * down the line.
809          */
810         if (IS_BROXTON(dev)) {
811                 info->num_sprites[PIPE_A] = 2;
812                 info->num_sprites[PIPE_B] = 2;
813                 info->num_sprites[PIPE_C] = 1;
814         } else if (IS_VALLEYVIEW(dev))
815                 for_each_pipe(dev_priv, pipe)
816                         info->num_sprites[pipe] = 2;
817         else
818                 for_each_pipe(dev_priv, pipe)
819                         info->num_sprites[pipe] = 1;
820
821         if (i915.disable_display) {
822                 DRM_INFO("Display disabled (module parameter)\n");
823                 info->num_pipes = 0;
824         } else if (info->num_pipes > 0 &&
825                    (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
826                    !IS_VALLEYVIEW(dev)) {
827                 u32 fuse_strap = I915_READ(FUSE_STRAP);
828                 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
829
830                 /*
831                  * SFUSE_STRAP is supposed to have a bit signalling the display
832                  * is fused off. Unfortunately it seems that, at least in
833                  * certain cases, fused off display means that PCH display
834                  * reads don't land anywhere. In that case, we read 0s.
835                  *
836                  * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
837                  * should be set when taking over after the firmware.
838                  */
839                 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
840                     sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
841                     (dev_priv->pch_type == PCH_CPT &&
842                      !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
843                         DRM_INFO("Display fused off, disabling\n");
844                         info->num_pipes = 0;
845                 }
846         }
847
848         /* Initialize slice/subslice/EU info */
849         if (IS_CHERRYVIEW(dev))
850                 cherryview_sseu_info_init(dev);
851         else if (IS_BROADWELL(dev))
852                 broadwell_sseu_info_init(dev);
853         else if (INTEL_INFO(dev)->gen >= 9)
854                 gen9_sseu_info_init(dev);
855
856         DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
857         DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
858         DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
859         DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
860         DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
861         DRM_DEBUG_DRIVER("has slice power gating: %s\n",
862                          info->has_slice_pg ? "y" : "n");
863         DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
864                          info->has_subslice_pg ? "y" : "n");
865         DRM_DEBUG_DRIVER("has EU power gating: %s\n",
866                          info->has_eu_pg ? "y" : "n");
867 }
868
869 static void intel_init_dpio(struct drm_i915_private *dev_priv)
870 {
871         if (!IS_VALLEYVIEW(dev_priv))
872                 return;
873
874         /*
875          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
876          * CHV x1 PHY (DP/HDMI D)
877          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
878          */
879         if (IS_CHERRYVIEW(dev_priv)) {
880                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
881                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
882         } else {
883                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
884         }
885 }
886
887 /**
888  * i915_driver_load - setup chip and create an initial config
889  * @dev: DRM device
890  * @flags: startup flags
891  *
892  * The driver load routine has to do several things:
893  *   - drive output discovery via intel_modeset_init()
894  *   - initialize the memory manager
895  *   - allocate initial config memory
896  *   - setup the DRM framebuffer with the allocated memory
897  */
898 int i915_driver_load(struct drm_device *dev, unsigned long flags)
899 {
900         struct drm_i915_private *dev_priv;
901         struct intel_device_info *info, *device_info;
902         int ret = 0, mmio_bar, mmio_size;
903         uint32_t aperture_size;
904
905         info = (struct intel_device_info *) flags;
906
907         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
908         if (dev_priv == NULL)
909                 return -ENOMEM;
910
911         dev->dev_private = dev_priv;
912         dev_priv->dev = dev;
913
914         /* Setup the write-once "constant" device info */
915         device_info = (struct intel_device_info *)&dev_priv->info;
916         memcpy(device_info, info, sizeof(dev_priv->info));
917         device_info->device_id = dev->pdev->device;
918
919         spin_lock_init(&dev_priv->irq_lock);
920         spin_lock_init(&dev_priv->gpu_error.lock);
921         mutex_init(&dev_priv->backlight_lock);
922         spin_lock_init(&dev_priv->uncore.lock);
923         spin_lock_init(&dev_priv->mm.object_stat_lock);
924         spin_lock_init(&dev_priv->mmio_flip_lock);
925         mutex_init(&dev_priv->sb_lock);
926         mutex_init(&dev_priv->modeset_restore_lock);
927         mutex_init(&dev_priv->csr_lock);
928
929         intel_pm_setup(dev);
930
931         intel_display_crc_init(dev);
932
933         i915_dump_device_info(dev_priv);
934
935         /* Not all pre-production machines fall into this category, only the
936          * very first ones. Almost everything should work, except for maybe
937          * suspend/resume. And we don't implement workarounds that affect only
938          * pre-production machines. */
939         if (IS_HSW_EARLY_SDV(dev))
940                 DRM_INFO("This is an early pre-production Haswell machine. "
941                          "It may not be fully functional.\n");
942
943         if (i915_get_bridge_dev(dev)) {
944                 ret = -EIO;
945                 goto free_priv;
946         }
947
948         mmio_bar = IS_GEN2(dev) ? 1 : 0;
949         /* Before gen4, the registers and the GTT are behind different BARs.
950          * However, from gen4 onwards, the registers and the GTT are shared
951          * in the same BAR, so we want to restrict this ioremap from
952          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
953          * the register BAR remains the same size for all the earlier
954          * generations up to Ironlake.
955          */
956         if (info->gen < 5)
957                 mmio_size = 512*1024;
958         else
959                 mmio_size = 2*1024*1024;
960
961         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
962         if (!dev_priv->regs) {
963                 DRM_ERROR("failed to map registers\n");
964                 ret = -EIO;
965                 goto put_bridge;
966         }
967
968         /* This must be called before any calls to HAS_PCH_* */
969         intel_detect_pch(dev);
970
971         intel_uncore_init(dev);
972
973         /* Load CSR Firmware for SKL */
974         intel_csr_ucode_init(dev);
975
976         ret = i915_gem_gtt_init(dev);
977         if (ret)
978                 goto out_freecsr;
979
980         /* WARNING: Apparently we must kick fbdev drivers before vgacon,
981          * otherwise the vga fbdev driver falls over. */
982         ret = i915_kick_out_firmware_fb(dev_priv);
983         if (ret) {
984                 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
985                 goto out_gtt;
986         }
987
988         ret = i915_kick_out_vgacon(dev_priv);
989         if (ret) {
990                 DRM_ERROR("failed to remove conflicting VGA console\n");
991                 goto out_gtt;
992         }
993
994         pci_set_master(dev->pdev);
995
996         /* overlay on gen2 is broken and can't address above 1G */
997         if (IS_GEN2(dev))
998                 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
999
1000         /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1001          * using 32bit addressing, overwriting memory if HWS is located
1002          * above 4GB.
1003          *
1004          * The documentation also mentions an issue with undefined
1005          * behaviour if any general state is accessed within a page above 4GB,
1006          * which also needs to be handled carefully.
1007          */
1008         if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1009                 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1010
1011         aperture_size = dev_priv->gtt.mappable_end;
1012
1013         dev_priv->gtt.mappable =
1014                 io_mapping_create_wc(dev_priv->gtt.mappable_base,
1015                                      aperture_size);
1016         if (dev_priv->gtt.mappable == NULL) {
1017                 ret = -EIO;
1018                 goto out_gtt;
1019         }
1020
1021         dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1022                                               aperture_size);
1023
1024         /* The i915 workqueue is primarily used for batched retirement of
1025          * requests (and thus managing bo) once the task has been completed
1026          * by the GPU. i915_gem_retire_requests() is called directly when we
1027          * need high-priority retirement, such as waiting for an explicit
1028          * bo.
1029          *
1030          * It is also used for periodic low-priority events, such as
1031          * idle-timers and recording error state.
1032          *
1033          * All tasks on the workqueue are expected to acquire the dev mutex
1034          * so there is no point in running more than one instance of the
1035          * workqueue at any time.  Use an ordered one.
1036          */
1037         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1038         if (dev_priv->wq == NULL) {
1039                 DRM_ERROR("Failed to create our workqueue.\n");
1040                 ret = -ENOMEM;
1041                 goto out_mtrrfree;
1042         }
1043
1044         dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1045         if (dev_priv->hotplug.dp_wq == NULL) {
1046                 DRM_ERROR("Failed to create our dp workqueue.\n");
1047                 ret = -ENOMEM;
1048                 goto out_freewq;
1049         }
1050
1051         dev_priv->gpu_error.hangcheck_wq =
1052                 alloc_ordered_workqueue("i915-hangcheck", 0);
1053         if (dev_priv->gpu_error.hangcheck_wq == NULL) {
1054                 DRM_ERROR("Failed to create our hangcheck workqueue.\n");
1055                 ret = -ENOMEM;
1056                 goto out_freedpwq;
1057         }
1058
1059         intel_irq_init(dev_priv);
1060         intel_uncore_sanitize(dev);
1061
1062         /* Try to make sure MCHBAR is enabled before poking at it */
1063         intel_setup_mchbar(dev);
1064         intel_setup_gmbus(dev);
1065         intel_opregion_setup(dev);
1066
1067         i915_gem_load(dev);
1068
1069         /* On the 945G/GM, the chipset reports the MSI capability on the
1070          * integrated graphics even though the support isn't actually there
1071          * according to the published specs.  It doesn't appear to function
1072          * correctly in testing on 945G.
1073          * This may be a side effect of MSI having been made available for PEG
1074          * and the registers being closely associated.
1075          *
1076          * According to chipset errata, on the 965GM, MSI interrupts may
1077          * be lost or delayed, but we use them anyways to avoid
1078          * stuck interrupts on some machines.
1079          */
1080         if (!IS_I945G(dev) && !IS_I945GM(dev))
1081                 pci_enable_msi(dev->pdev);
1082
1083         intel_device_info_runtime_init(dev);
1084
1085         intel_init_dpio(dev_priv);
1086
1087         if (INTEL_INFO(dev)->num_pipes) {
1088                 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1089                 if (ret)
1090                         goto out_gem_unload;
1091         }
1092
1093         intel_power_domains_init(dev_priv);
1094
1095         ret = i915_load_modeset_init(dev);
1096         if (ret < 0) {
1097                 DRM_ERROR("failed to init modeset\n");
1098                 goto out_power_well;
1099         }
1100
1101         /*
1102          * Notify a valid surface after modesetting,
1103          * when running inside a VM.
1104          */
1105         if (intel_vgpu_active(dev))
1106                 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1107
1108         i915_setup_sysfs(dev);
1109
1110         if (INTEL_INFO(dev)->num_pipes) {
1111                 /* Must be done after probing outputs */
1112                 intel_opregion_init(dev);
1113                 acpi_video_register();
1114         }
1115
1116         if (IS_GEN5(dev))
1117                 intel_gpu_ips_init(dev_priv);
1118
1119         intel_runtime_pm_enable(dev_priv);
1120
1121         i915_audio_component_init(dev_priv);
1122
1123         return 0;
1124
1125 out_power_well:
1126         intel_power_domains_fini(dev_priv);
1127         drm_vblank_cleanup(dev);
1128 out_gem_unload:
1129         WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1130         unregister_shrinker(&dev_priv->mm.shrinker);
1131
1132         if (dev->pdev->msi_enabled)
1133                 pci_disable_msi(dev->pdev);
1134
1135         intel_teardown_gmbus(dev);
1136         intel_teardown_mchbar(dev);
1137         pm_qos_remove_request(&dev_priv->pm_qos);
1138         destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1139 out_freedpwq:
1140         destroy_workqueue(dev_priv->hotplug.dp_wq);
1141 out_freewq:
1142         destroy_workqueue(dev_priv->wq);
1143 out_mtrrfree:
1144         arch_phys_wc_del(dev_priv->gtt.mtrr);
1145         io_mapping_free(dev_priv->gtt.mappable);
1146 out_gtt:
1147         i915_global_gtt_cleanup(dev);
1148 out_freecsr:
1149         intel_csr_ucode_fini(dev);
1150         intel_uncore_fini(dev);
1151         pci_iounmap(dev->pdev, dev_priv->regs);
1152 put_bridge:
1153         pci_dev_put(dev_priv->bridge_dev);
1154 free_priv:
1155         kmem_cache_destroy(dev_priv->requests);
1156         kmem_cache_destroy(dev_priv->vmas);
1157         kmem_cache_destroy(dev_priv->objects);
1158         kfree(dev_priv);
1159         return ret;
1160 }
1161
1162 int i915_driver_unload(struct drm_device *dev)
1163 {
1164         struct drm_i915_private *dev_priv = dev->dev_private;
1165         int ret;
1166
1167         i915_audio_component_cleanup(dev_priv);
1168
1169         ret = i915_gem_suspend(dev);
1170         if (ret) {
1171                 DRM_ERROR("failed to idle hardware: %d\n", ret);
1172                 return ret;
1173         }
1174
1175         intel_power_domains_fini(dev_priv);
1176
1177         intel_gpu_ips_teardown();
1178
1179         i915_teardown_sysfs(dev);
1180
1181         WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1182         unregister_shrinker(&dev_priv->mm.shrinker);
1183
1184         io_mapping_free(dev_priv->gtt.mappable);
1185         arch_phys_wc_del(dev_priv->gtt.mtrr);
1186
1187         acpi_video_unregister();
1188
1189         intel_fbdev_fini(dev);
1190
1191         drm_vblank_cleanup(dev);
1192
1193         intel_modeset_cleanup(dev);
1194
1195         /*
1196          * free the memory space allocated for the child device
1197          * config parsed from VBT
1198          */
1199         if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1200                 kfree(dev_priv->vbt.child_dev);
1201                 dev_priv->vbt.child_dev = NULL;
1202                 dev_priv->vbt.child_dev_num = 0;
1203         }
1204         kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1205         dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1206         kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1207         dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1208
1209         vga_switcheroo_unregister_client(dev->pdev);
1210         vga_client_register(dev->pdev, NULL, NULL, NULL);
1211
1212         /* Free error state after interrupts are fully disabled. */
1213         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1214         i915_destroy_error_state(dev);
1215
1216         if (dev->pdev->msi_enabled)
1217                 pci_disable_msi(dev->pdev);
1218
1219         intel_opregion_fini(dev);
1220
1221         /* Flush any outstanding unpin_work. */
1222         flush_workqueue(dev_priv->wq);
1223
1224         mutex_lock(&dev->struct_mutex);
1225         intel_guc_ucode_fini(dev);
1226         i915_gem_cleanup_ringbuffer(dev);
1227         i915_gem_context_fini(dev);
1228         mutex_unlock(&dev->struct_mutex);
1229         intel_fbc_cleanup_cfb(dev_priv);
1230         i915_gem_cleanup_stolen(dev);
1231
1232         intel_csr_ucode_fini(dev);
1233
1234         intel_teardown_gmbus(dev);
1235         intel_teardown_mchbar(dev);
1236
1237         destroy_workqueue(dev_priv->hotplug.dp_wq);
1238         destroy_workqueue(dev_priv->wq);
1239         destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1240         pm_qos_remove_request(&dev_priv->pm_qos);
1241
1242         i915_global_gtt_cleanup(dev);
1243
1244         intel_uncore_fini(dev);
1245         if (dev_priv->regs != NULL)
1246                 pci_iounmap(dev->pdev, dev_priv->regs);
1247
1248         kmem_cache_destroy(dev_priv->requests);
1249         kmem_cache_destroy(dev_priv->vmas);
1250         kmem_cache_destroy(dev_priv->objects);
1251         pci_dev_put(dev_priv->bridge_dev);
1252         kfree(dev_priv);
1253
1254         return 0;
1255 }
1256
1257 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1258 {
1259         int ret;
1260
1261         ret = i915_gem_open(dev, file);
1262         if (ret)
1263                 return ret;
1264
1265         return 0;
1266 }
1267
1268 /**
1269  * i915_driver_lastclose - clean up after all DRM clients have exited
1270  * @dev: DRM device
1271  *
1272  * Take care of cleaning up after all DRM clients have exited.  In the
1273  * mode setting case, we want to restore the kernel's initial mode (just
1274  * in case the last client left us in a bad state).
1275  *
1276  * Additionally, in the non-mode setting case, we'll tear down the GTT
1277  * and DMA structures, since the kernel won't be using them, and clea
1278  * up any GEM state.
1279  */
1280 void i915_driver_lastclose(struct drm_device *dev)
1281 {
1282         intel_fbdev_restore_mode(dev);
1283         vga_switcheroo_process_delayed_switch();
1284 }
1285
1286 void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1287 {
1288         mutex_lock(&dev->struct_mutex);
1289         i915_gem_context_close(dev, file);
1290         i915_gem_release(dev, file);
1291         mutex_unlock(&dev->struct_mutex);
1292
1293         intel_modeset_preclose(dev, file);
1294 }
1295
1296 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1297 {
1298         struct drm_i915_file_private *file_priv = file->driver_priv;
1299
1300         if (file_priv && file_priv->bsd_ring)
1301                 file_priv->bsd_ring = NULL;
1302         kfree(file_priv);
1303 }
1304
1305 static int
1306 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1307                           struct drm_file *file)
1308 {
1309         return -ENODEV;
1310 }
1311
1312 const struct drm_ioctl_desc i915_ioctls[] = {
1313         DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1314         DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1315         DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1316         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1317         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1318         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1319         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1320         DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1321         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1322         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1323         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1324         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1325         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1326         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1327         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1328         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1329         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1330         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1331         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1332         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1333         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1334         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1335         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1336         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1337         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1338         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1339         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1340         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1341         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1342         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1343         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1344         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1345         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1346         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1347         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1348         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1349         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1350         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1351         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1352         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1353         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1354         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1355         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1356         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1357         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1358         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1359         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1360         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1361         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1362         DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1363         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1364         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1365 };
1366
1367 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);