1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/units.h>
10 #include <drm/drm_aperture.h>
11 #include <drm/drm_atomic_helper.h>
12 #include <drm/drm_gem_ttm_helper.h>
13 #include <drm/drm_ioctl.h>
14 #include <drm/drm_managed.h>
15 #include <drm/drm_print.h>
16 #include <drm/xe_drm.h>
18 #include "display/xe_display.h"
19 #include "regs/xe_gt_regs.h"
20 #include "regs/xe_regs.h"
22 #include "xe_debugfs.h"
23 #include "xe_dma_buf.h"
24 #include "xe_drm_client.h"
27 #include "xe_exec_queue.h"
29 #include "xe_gsc_proxy.h"
31 #include "xe_gt_mcr.h"
34 #include "xe_memirq.h"
36 #include "xe_module.h"
43 #include "xe_ttm_stolen_mgr.h"
44 #include "xe_ttm_sys_mgr.h"
46 #include "xe_wait_user_fence.h"
49 struct lockdep_map xe_device_mem_access_lockdep_map = {
50 .name = "xe_device_mem_access_lockdep_map"
54 static int xe_file_open(struct drm_device *dev, struct drm_file *file)
56 struct xe_device *xe = to_xe_device(dev);
57 struct xe_drm_client *client;
61 xef = kzalloc(sizeof(*xef), GFP_KERNEL);
65 client = xe_drm_client_alloc();
75 mutex_init(&xef->vm.lock);
76 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
78 mutex_init(&xef->exec_queue.lock);
79 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
81 spin_lock(&xe->clients.lock);
83 spin_unlock(&xe->clients.lock);
85 file->driver_priv = xef;
89 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
91 struct xe_device *xe = to_xe_device(dev);
92 struct xe_file *xef = file->driver_priv;
94 struct xe_exec_queue *q;
97 mutex_lock(&xef->exec_queue.lock);
98 xa_for_each(&xef->exec_queue.xa, idx, q) {
99 xe_exec_queue_kill(q);
100 xe_exec_queue_put(q);
102 mutex_unlock(&xef->exec_queue.lock);
103 xa_destroy(&xef->exec_queue.xa);
104 mutex_destroy(&xef->exec_queue.lock);
105 mutex_lock(&xef->vm.lock);
106 xa_for_each(&xef->vm.xa, idx, vm)
107 xe_vm_close_and_put(vm);
108 mutex_unlock(&xef->vm.lock);
109 xa_destroy(&xef->vm.xa);
110 mutex_destroy(&xef->vm.lock);
112 spin_lock(&xe->clients.lock);
114 spin_unlock(&xe->clients.lock);
116 xe_drm_client_put(xef->client);
120 static const struct drm_ioctl_desc xe_ioctls[] = {
121 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
122 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
123 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
125 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
126 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
127 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
128 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
129 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
131 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
133 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
135 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
139 static const struct file_operations xe_driver_fops = {
140 .owner = THIS_MODULE,
142 .release = drm_release_noglobal,
143 .unlocked_ioctl = drm_ioctl,
144 .mmap = drm_gem_mmap,
147 .compat_ioctl = drm_compat_ioctl,
148 .llseek = noop_llseek,
149 #ifdef CONFIG_PROC_FS
150 .show_fdinfo = drm_show_fdinfo,
154 static void xe_driver_release(struct drm_device *dev)
156 struct xe_device *xe = to_xe_device(dev);
158 pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL);
161 static struct drm_driver driver = {
162 /* Don't use MTRRs here; the Xserver or userspace app should
163 * deal with them for Intel hardware.
167 DRIVER_RENDER | DRIVER_SYNCOBJ |
168 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
169 .open = xe_file_open,
170 .postclose = xe_file_close,
172 .gem_prime_import = xe_gem_prime_import,
174 .dumb_create = xe_bo_dumb_create,
175 .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
176 #ifdef CONFIG_PROC_FS
177 .show_fdinfo = xe_drm_client_fdinfo,
179 .release = &xe_driver_release,
182 .num_ioctls = ARRAY_SIZE(xe_ioctls),
183 .fops = &xe_driver_fops,
187 .major = DRIVER_MAJOR,
188 .minor = DRIVER_MINOR,
189 .patchlevel = DRIVER_PATCHLEVEL,
192 static void xe_device_destroy(struct drm_device *dev, void *dummy)
194 struct xe_device *xe = to_xe_device(dev);
196 if (xe->preempt_fence_wq)
197 destroy_workqueue(xe->preempt_fence_wq);
200 destroy_workqueue(xe->ordered_wq);
202 if (xe->unordered_wq)
203 destroy_workqueue(xe->unordered_wq);
205 ttm_device_fini(&xe->ttm);
208 struct xe_device *xe_device_create(struct pci_dev *pdev,
209 const struct pci_device_id *ent)
211 struct xe_device *xe;
214 xe_display_driver_set_hooks(&driver);
216 err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
220 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
224 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
225 xe->drm.anon_inode->i_mapping,
226 xe->drm.vma_offset_manager, false, false);
230 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
234 xe->info.devid = pdev->device;
235 xe->info.revid = pdev->revision;
236 xe->info.force_execlist = xe_modparam.force_execlist;
238 spin_lock_init(&xe->irq.lock);
239 spin_lock_init(&xe->clients.lock);
241 init_waitqueue_head(&xe->ufence_wq);
243 drmm_mutex_init(&xe->drm, &xe->usm.lock);
244 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
246 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
247 /* Trigger a large asid and an early asid wrap. */
250 BUILD_BUG_ON(XE_MAX_ASID < 2);
251 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
252 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1),
253 &xe->usm.next_asid, GFP_KERNEL);
254 drm_WARN_ON(&xe->drm, err);
256 xa_erase(&xe->usm.asid_to_vm, asid);
259 spin_lock_init(&xe->pinned.lock);
260 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
261 INIT_LIST_HEAD(&xe->pinned.external_vram);
262 INIT_LIST_HEAD(&xe->pinned.evicted);
264 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
265 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
266 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
267 if (!xe->ordered_wq || !xe->unordered_wq ||
268 !xe->preempt_fence_wq) {
270 * Cleanup done in xe_device_destroy via
271 * drmm_add_action_or_reset register above
273 drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
278 err = xe_display_create(xe);
289 * The driver-initiated FLR is the highest level of reset that we can trigger
290 * from within the driver. It is different from the PCI FLR in that it doesn't
291 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
292 * it doesn't require a re-enumeration of the PCI BARs. However, the
293 * driver-initiated FLR does still cause a reset of both GT and display and a
294 * memory wipe of local and stolen memory, so recovery would require a full HW
295 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
296 * perform the FLR as the very last action before releasing access to the HW
297 * during the driver release flow, we don't attempt recovery at all, because
298 * if/when a new instance of i915 is bound to the device it will do a full
301 static void xe_driver_flr(struct xe_device *xe)
303 const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
304 struct xe_gt *gt = xe_root_mmio_gt(xe);
307 if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
308 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
312 drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
315 * Make sure any pending FLR requests have cleared by waiting for the
316 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
317 * to make sure it's not still set from a prior attempt (it's a write to
319 * Note that we should never be in a situation where a previous attempt
320 * is still pending (unless the HW is totally dead), but better to be
321 * safe in case something unexpected happens
323 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
325 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
328 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
330 /* Trigger the actual Driver-FLR */
331 xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
333 /* Wait for hardware teardown to complete */
334 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
336 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
340 /* Wait for hardware/firmware re-init to complete */
341 ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
342 flr_timeout, NULL, false);
344 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
348 /* Clear sticky completion status */
349 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
352 static void xe_driver_flr_fini(struct drm_device *drm, void *arg)
354 struct xe_device *xe = arg;
356 if (xe->needs_flr_on_fini)
360 static void xe_device_sanitize(struct drm_device *drm, void *arg)
362 struct xe_device *xe = arg;
366 for_each_gt(gt, xe, id)
370 static int xe_set_dma_info(struct xe_device *xe)
372 unsigned int mask_size = xe->info.dma_mask_size;
375 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
377 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
381 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
388 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err);
393 * Initialize MMIO resources that don't require any knowledge about tile count.
395 int xe_device_probe_early(struct xe_device *xe)
399 err = xe_mmio_init(xe);
403 err = xe_mmio_root_tile_init(xe);
410 static int xe_device_set_has_flat_ccs(struct xe_device *xe)
415 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
418 struct xe_gt *gt = xe_root_mmio_gt(xe);
420 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
424 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
425 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
427 if (!xe->info.has_flat_ccs)
429 "Flat CCS has been disabled in bios, May lead to performance impact");
431 return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
434 int xe_device_probe(struct xe_device *xe)
436 struct xe_tile *tile;
442 xe_pat_init_early(xe);
444 err = xe_sriov_init(xe);
448 xe->info.mem_region_mask = 1;
449 err = xe_display_init_nommio(xe);
453 err = xe_set_dma_info(xe);
457 xe_mmio_probe_tiles(xe);
459 xe_ttm_sys_mgr_init(xe);
461 for_each_gt(gt, xe, id)
462 xe_force_wake_init_gt(gt, gt_to_fw(gt));
464 for_each_tile(tile, xe, id) {
465 err = xe_ggtt_init_early(tile->mem.ggtt);
468 if (IS_SRIOV_VF(xe)) {
469 err = xe_memirq_init(&tile->sriov.vf.memirq);
475 for_each_gt(gt, xe, id) {
476 err = xe_gt_init_hwconfig(gt);
481 err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe);
485 for_each_gt(gt, xe, id) {
486 err = xe_pcode_probe(gt);
491 err = xe_display_init_noirq(xe);
495 err = xe_irq_install(xe);
499 for_each_gt(gt, xe, id) {
500 err = xe_gt_init_early(gt);
502 goto err_irq_shutdown;
505 err = xe_device_set_has_flat_ccs(xe);
507 goto err_irq_shutdown;
509 err = xe_mmio_probe_vram(xe);
511 goto err_irq_shutdown;
513 for_each_tile(tile, xe, id) {
514 err = xe_tile_init_noalloc(tile);
516 goto err_irq_shutdown;
519 /* Allocate and map stolen after potential VRAM resize */
520 xe_ttm_stolen_mgr_init(xe);
523 * Now that GT is initialized (TTM in particular),
524 * we can try to init display, and inherit the initial fb.
525 * This is the reason the first allocation needs to be done
528 err = xe_display_init_noaccel(xe);
530 goto err_irq_shutdown;
532 for_each_gt(gt, xe, id) {
535 err = xe_gt_init(gt);
540 xe_heci_gsc_init(xe);
542 err = xe_display_init(xe);
546 err = drm_dev_register(&xe->drm, 0);
548 goto err_fini_display;
550 xe_display_register(xe);
552 xe_debugfs_register(xe);
554 xe_hwmon_register(xe);
556 err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
563 xe_display_driver_remove(xe);
566 for_each_gt(gt, xe, id) {
580 static void xe_device_remove_display(struct xe_device *xe)
582 xe_display_unregister(xe);
584 drm_dev_unplug(&xe->drm);
585 xe_display_driver_remove(xe);
588 void xe_device_remove(struct xe_device *xe)
593 xe_device_remove_display(xe);
597 xe_heci_gsc_fini(xe);
599 for_each_gt(gt, xe, id)
605 void xe_device_shutdown(struct xe_device *xe)
609 void xe_device_wmb(struct xe_device *xe)
611 struct xe_gt *gt = xe_root_mmio_gt(xe);
615 xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
618 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
620 return xe_device_has_flat_ccs(xe) ?
621 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
624 bool xe_device_mem_access_ongoing(struct xe_device *xe)
626 if (xe_pm_read_callback_task(xe) != NULL)
629 return atomic_read(&xe->mem_access.ref);
632 void xe_device_assert_mem_access(struct xe_device *xe)
634 XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
637 bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe)
641 if (xe_pm_read_callback_task(xe) == current)
644 active = xe_pm_runtime_get_if_active(xe);
646 int ref = atomic_inc_return(&xe->mem_access.ref);
648 xe_assert(xe, ref != S32_MAX);
654 void xe_device_mem_access_get(struct xe_device *xe)
659 * This looks racy, but should be fine since the pm_callback_task only
660 * transitions from NULL -> current (and back to NULL again), during the
661 * runtime_resume() or runtime_suspend() callbacks, for which there can
662 * only be a single one running for our device. We only need to prevent
663 * recursively calling the runtime_get or runtime_put from those
664 * callbacks, as well as preventing triggering any access_ongoing
667 if (xe_pm_read_callback_task(xe) == current)
671 * Since the resume here is synchronous it can be quite easy to deadlock
672 * if we are not careful. Also in practice it might be quite timing
673 * sensitive to ever see the 0 -> 1 transition with the callers locks
674 * held, so deadlocks might exist but are hard for lockdep to ever see.
675 * With this in mind, help lockdep learn about the potentially scary
676 * stuff that can happen inside the runtime_resume callback by acquiring
677 * a dummy lock (it doesn't protect anything and gets compiled out on
678 * non-debug builds). Lockdep then only needs to see the
679 * mem_access_lockdep_map -> runtime_resume callback once, and then can
680 * hopefully validate all the (callers_locks) -> mem_access_lockdep_map.
681 * For example if the (callers_locks) are ever grabbed in the
682 * runtime_resume callback, lockdep should give us a nice splat.
684 lock_map_acquire(&xe_device_mem_access_lockdep_map);
685 lock_map_release(&xe_device_mem_access_lockdep_map);
687 xe_pm_runtime_get(xe);
688 ref = atomic_inc_return(&xe->mem_access.ref);
690 xe_assert(xe, ref != S32_MAX);
694 void xe_device_mem_access_put(struct xe_device *xe)
698 if (xe_pm_read_callback_task(xe) == current)
701 ref = atomic_dec_return(&xe->mem_access.ref);
702 xe_pm_runtime_put(xe);
704 xe_assert(xe, ref >= 0);
707 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
712 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
713 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
715 for_each_gt(gt, xe, id) {
716 drm_printf(p, "GT id: %u\n", id);
717 drm_printf(p, "\tType: %s\n",
718 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
719 drm_printf(p, "\tIP ver: %u.%u.%u\n",
720 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
721 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
722 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
723 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
727 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
729 return sign_extend64(address, xe->info.va_bits - 1);
732 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
734 return address & GENMASK_ULL(xe->info.va_bits - 1, 0);