1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2023 Collabora ltd. */
5 #include <drm/drm_debugfs.h>
6 #include <drm/drm_drv.h>
7 #include <drm/drm_exec.h>
8 #include <drm/drm_gpuvm.h>
9 #include <drm/drm_managed.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panthor_drm.h>
13 #include <linux/atomic.h>
14 #include <linux/bitfield.h>
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-pgtable.h>
21 #include <linux/iommu.h>
22 #include <linux/kmemleak.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/rwsem.h>
26 #include <linux/sched.h>
27 #include <linux/shmem_fs.h>
28 #include <linux/sizes.h>
30 #include "panthor_device.h"
31 #include "panthor_gem.h"
32 #include "panthor_heap.h"
33 #include "panthor_mmu.h"
34 #include "panthor_regs.h"
35 #include "panthor_sched.h"
37 #define MAX_AS_SLOTS 32
42 * struct panthor_as_slot - Address space slot
44 struct panthor_as_slot {
45 /** @vm: VM bound to this slot. NULL is no VM is bound. */
46 struct panthor_vm *vm;
50 * struct panthor_mmu - MMU related data
53 /** @irq: The MMU irq. */
54 struct panthor_irq irq;
56 /** @as: Address space related fields.
58 * The GPU has a limited number of address spaces (AS) slots, forcing
59 * us to re-assign them to re-assign slots on-demand.
62 /** @slots_lock: Lock protecting access to all other AS fields. */
63 struct mutex slots_lock;
65 /** @alloc_mask: Bitmask encoding the allocated slots. */
66 unsigned long alloc_mask;
68 /** @faulty_mask: Bitmask encoding the faulty slots. */
69 unsigned long faulty_mask;
71 /** @slots: VMs currently bound to the AS slots. */
72 struct panthor_as_slot slots[MAX_AS_SLOTS];
75 * @lru_list: List of least recently used VMs.
77 * We use this list to pick a VM to evict when all slots are
80 * There should be no more active VMs than there are AS slots,
81 * so this LRU is just here to keep VMs bound until there's
82 * a need to release a slot, thus avoid unnecessary TLB/cache
85 struct list_head lru_list;
88 /** @vm: VMs management fields */
90 /** @lock: Lock protecting access to list. */
93 /** @list: List containing all VMs. */
94 struct list_head list;
96 /** @reset_in_progress: True if a reset is in progress. */
97 bool reset_in_progress;
99 /** @wq: Workqueue used for the VM_BIND queues. */
100 struct workqueue_struct *wq;
105 * struct panthor_vm_pool - VM pool object
107 struct panthor_vm_pool {
108 /** @xa: Array used for VM handle tracking. */
113 * struct panthor_vma - GPU mapping object
115 * This is used to track GEM mappings in GPU space.
118 /** @base: Inherits from drm_gpuva. */
119 struct drm_gpuva base;
121 /** @node: Used to implement deferred release of VMAs. */
122 struct list_head node;
125 * @flags: Combination of drm_panthor_vm_bind_op_flags.
127 * Only map related flags are accepted.
133 * struct panthor_vm_op_ctx - VM operation context
135 * With VM operations potentially taking place in a dma-signaling path, we
136 * need to make sure everything that might require resource allocation is
137 * pre-allocated upfront. This is what this operation context is far.
139 * We also collect resources that have been freed, so we can release them
140 * asynchronously, and let the VM_BIND scheduler process the next VM_BIND
143 struct panthor_vm_op_ctx {
144 /** @rsvd_page_tables: Pages reserved for the MMU page table update. */
146 /** @count: Number of pages reserved. */
149 /** @ptr: Point to the first unused page in the @pages table. */
153 * @page: Array of pages that can be used for an MMU page table update.
155 * After an VM operation, there might be free pages left in this array.
156 * They should be returned to the pt_cache as part of the op_ctx cleanup.
162 * @preallocated_vmas: Pre-allocated VMAs to handle the remap case.
164 * Partial unmap requests or map requests overlapping existing mappings will
165 * trigger a remap call, which need to register up to three panthor_vma objects
166 * (one for the new mapping, and two for the previous and next mappings).
168 struct panthor_vma *preallocated_vmas[3];
170 /** @flags: Combination of drm_panthor_vm_bind_op_flags. */
173 /** @va: Virtual range targeted by the VM operation. */
175 /** @addr: Start address. */
178 /** @range: Range size. */
183 * @returned_vmas: List of panthor_vma objects returned after a VM operation.
185 * For unmap operations, this will contain all VMAs that were covered by the
186 * specified VA range.
188 * For map operations, this will contain all VMAs that previously mapped to
189 * the specified VA range.
191 * Those VMAs, and the resources they point to will be released as part of
192 * the op_ctx cleanup operation.
194 struct list_head returned_vmas;
196 /** @map: Fields specific to a map operation. */
198 /** @vm_bo: Buffer object to map. */
199 struct drm_gpuvm_bo *vm_bo;
201 /** @bo_offset: Offset in the buffer object. */
205 * @sgt: sg-table pointing to pages backing the GEM object.
207 * This is gathered at job creation time, such that we don't have
208 * to allocate in ::run_job().
210 struct sg_table *sgt;
213 * @new_vma: The new VMA object that will be inserted to the VA tree.
215 struct panthor_vma *new_vma;
220 * struct panthor_vm - VM object
222 * A VM is an object representing a GPU (or MCU) virtual address space.
223 * It embeds the MMU page table for this address space, a tree containing
224 * all the virtual mappings of GEM objects, and other things needed to manage
227 * Except for the MCU VM, which is managed by the kernel, all other VMs are
228 * created by userspace and mostly managed by userspace, using the
229 * %DRM_IOCTL_PANTHOR_VM_BIND ioctl.
231 * A portion of the virtual address space is reserved for kernel objects,
232 * like heap chunks, and userspace gets to decide how much of the virtual
233 * address space is left to the kernel (half of the virtual address space
238 * @base: Inherit from drm_gpuvm.
240 * We delegate all the VA management to the common drm_gpuvm framework
241 * and only implement hooks to update the MMU page table.
243 struct drm_gpuvm base;
246 * @sched: Scheduler used for asynchronous VM_BIND request.
248 * We use a 1:1 scheduler here.
250 struct drm_gpu_scheduler sched;
253 * @entity: Scheduling entity representing the VM_BIND queue.
255 * There's currently one bind queue per VM. It doesn't make sense to
256 * allow more given the VM operations are serialized anyway.
258 struct drm_sched_entity entity;
260 /** @ptdev: Device. */
261 struct panthor_device *ptdev;
263 /** @memattr: Value to program to the AS_MEMATTR register. */
266 /** @pgtbl_ops: Page table operations. */
267 struct io_pgtable_ops *pgtbl_ops;
269 /** @root_page_table: Stores the root page table pointer. */
270 void *root_page_table;
273 * @op_lock: Lock used to serialize operations on a VM.
275 * The serialization of jobs queued to the VM_BIND queue is already
276 * taken care of by drm_sched, but we need to serialize synchronous
277 * and asynchronous VM_BIND request. This is what this lock is for.
279 struct mutex op_lock;
282 * @op_ctx: The context attached to the currently executing VM operation.
284 * NULL when no operation is in progress.
286 struct panthor_vm_op_ctx *op_ctx;
289 * @mm: Memory management object representing the auto-VA/kernel-VA.
291 * Used to auto-allocate VA space for kernel-managed objects (tiler
294 * For the MCU VM, this is managing the VA range that's used to map
295 * all shared interfaces.
297 * For user VMs, the range is specified by userspace, and must not
298 * exceed half of the VA space addressable.
302 /** @mm_lock: Lock protecting the @mm field. */
303 struct mutex mm_lock;
305 /** @kernel_auto_va: Automatic VA-range for kernel BOs. */
307 /** @start: Start of the automatic VA-range for kernel BOs. */
310 /** @size: Size of the automatic VA-range for kernel BOs. */
314 /** @as: Address space related fields. */
317 * @id: ID of the address space this VM is bound to.
319 * A value of -1 means the VM is inactive/not bound.
323 /** @active_cnt: Number of active users of this VM. */
324 refcount_t active_cnt;
327 * @lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
329 * Active VMs should not be inserted in the LRU list.
331 struct list_head lru_node;
335 * @heaps: Tiler heap related fields.
339 * @pool: The heap pool attached to this VM.
341 * Will stay NULL until someone creates a heap context on this VM.
343 struct panthor_heap_pool *pool;
345 /** @lock: Lock used to protect access to @pool. */
349 /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
350 struct list_head node;
352 /** @for_mcu: True if this is the MCU VM. */
356 * @destroyed: True if the VM was destroyed.
358 * No further bind requests should be queued to a destroyed VM.
363 * @unusable: True if the VM has turned unusable because something
364 * bad happened during an asynchronous request.
366 * We don't try to recover from such failures, because this implies
367 * informing userspace about the specific operation that failed, and
368 * hoping the userspace driver can replay things from there. This all
369 * sounds very complicated for little gain.
371 * Instead, we should just flag the VM as unusable, and fail any
372 * further request targeting this VM.
374 * We also provide a way to query a VM state, so userspace can destroy
375 * it and create a new one.
377 * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
378 * situation, where the logical device needs to be re-created.
383 * @unhandled_fault: Unhandled fault happened.
385 * This should be reported to the scheduler, and the queue/group be
386 * flagged as faulty as a result.
388 bool unhandled_fault;
392 * struct panthor_vm_bind_job - VM bind job
394 struct panthor_vm_bind_job {
395 /** @base: Inherit from drm_sched_job. */
396 struct drm_sched_job base;
398 /** @refcount: Reference count. */
399 struct kref refcount;
401 /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
402 struct work_struct cleanup_op_ctx_work;
404 /** @vm: VM targeted by the VM operation. */
405 struct panthor_vm *vm;
407 /** @ctx: Operation context. */
408 struct panthor_vm_op_ctx ctx;
412 * @pt_cache: Cache used to allocate MMU page tables.
414 * The pre-allocation pattern forces us to over-allocate to plan for
415 * the worst case scenario, and return the pages we didn't use.
417 * Having a kmem_cache allows us to speed allocations.
419 static struct kmem_cache *pt_cache;
422 * alloc_pt() - Custom page table allocator
423 * @cookie: Cookie passed at page table allocation time.
424 * @size: Size of the page table. This size should be fixed,
425 * and determined at creation time based on the granule size.
428 * We want a custom allocator so we can use a cache for page table
429 * allocations and amortize the cost of the over-reservation that's
430 * done to allow asynchronous VM operations.
432 * Return: non-NULL on success, NULL if the allocation failed for any
435 static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
437 struct panthor_vm *vm = cookie;
440 /* Allocation of the root page table happening during init. */
441 if (unlikely(!vm->root_page_table)) {
444 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx);
445 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev),
446 gfp | __GFP_ZERO, get_order(size));
447 page = p ? page_address(p) : NULL;
448 vm->root_page_table = page;
452 /* We're not supposed to have anything bigger than 4k here, because we picked a
453 * 4k granule size at init time.
455 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
458 /* We must have some op_ctx attached to the VM and it must have at least one
461 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) ||
462 drm_WARN_ON(&vm->ptdev->base,
463 vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count))
466 page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++];
467 memset(page, 0, SZ_4K);
469 /* Page table entries don't use virtual addresses, which trips out
470 * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
471 * are mixed with other fields, and I fear kmemleak won't detect that
474 * Let's just ignore memory passed to the page-table driver for now.
476 kmemleak_ignore(page);
481 * @free_pt() - Custom page table free function
482 * @cookie: Cookie passed at page table allocation time.
483 * @data: Page table to free.
484 * @size: Size of the page table. This size should be fixed,
485 * and determined at creation time based on the granule size.
487 static void free_pt(void *cookie, void *data, size_t size)
489 struct panthor_vm *vm = cookie;
491 if (unlikely(vm->root_page_table == data)) {
492 free_pages((unsigned long)data, get_order(size));
493 vm->root_page_table = NULL;
497 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
500 /* Return the page to the pt_cache. */
501 kmem_cache_free(pt_cache, data);
504 static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
509 /* Wait for the MMU status to indicate there is no active command, in
510 * case one is pending.
512 ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
513 val, !(val & AS_STATUS_AS_ACTIVE),
517 panthor_device_schedule_reset(ptdev);
518 drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n");
524 static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd)
528 /* write AS_COMMAND when MMU is ready to accept another command */
529 status = wait_ready(ptdev, as_nr);
531 gpu_write(ptdev, AS_COMMAND(as_nr), cmd);
536 static void lock_region(struct panthor_device *ptdev, u32 as_nr,
537 u64 region_start, u64 size)
541 u64 region_end = region_start + size;
547 * The locked region is a naturally aligned power of 2 block encoded as
549 * Calculate the desired start/end and look for the highest bit which
550 * differs. The smallest naturally aligned block must include this bit
551 * change, the desired region starts with this bit (and subsequent bits)
552 * zeroed and ends with the bit (and subsequent bits) set to one.
554 region_width = max(fls64(region_start ^ (region_end - 1)),
555 const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
558 * Mask off the low bits of region_start (which would be ignored by
559 * the hardware anyway)
561 region_start &= GENMASK_ULL(63, region_width);
563 region = region_width | region_start;
565 /* Lock the region that needs to be updated */
566 gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
567 gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
568 write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
571 static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
572 u64 iova, u64 size, u32 op)
574 lockdep_assert_held(&ptdev->mmu->as.slots_lock);
579 if (op != AS_COMMAND_UNLOCK)
580 lock_region(ptdev, as_nr, iova, size);
582 /* Run the MMU operation */
583 write_cmd(ptdev, as_nr, op);
585 /* Wait for the flush to complete */
586 return wait_ready(ptdev, as_nr);
589 static int mmu_hw_do_operation(struct panthor_vm *vm,
590 u64 iova, u64 size, u32 op)
592 struct panthor_device *ptdev = vm->ptdev;
595 mutex_lock(&ptdev->mmu->as.slots_lock);
596 ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op);
597 mutex_unlock(&ptdev->mmu->as.slots_lock);
602 static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
603 u64 transtab, u64 transcfg, u64 memattr)
607 ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
611 gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
612 gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
614 gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
615 gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
617 gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
618 gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
620 return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
623 static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
627 ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
631 gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
632 gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
634 gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
635 gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
637 gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
638 gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
640 return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
643 static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value)
645 /* Bits 16 to 31 mean REQ_COMPLETE. */
646 return value & GENMASK(15, 0);
649 static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
655 * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
658 * Return: true if the VM has unhandled faults, false otherwise.
660 bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm)
662 return vm->unhandled_fault;
666 * panthor_vm_is_unusable() - Check if the VM is still usable
669 * Return: true if the VM is unusable, false otherwise.
671 bool panthor_vm_is_unusable(struct panthor_vm *vm)
676 static void panthor_vm_release_as_locked(struct panthor_vm *vm)
678 struct panthor_device *ptdev = vm->ptdev;
680 lockdep_assert_held(&ptdev->mmu->as.slots_lock);
682 if (drm_WARN_ON(&ptdev->base, vm->as.id < 0))
685 ptdev->mmu->as.slots[vm->as.id].vm = NULL;
686 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
687 refcount_set(&vm->as.active_cnt, 0);
688 list_del_init(&vm->as.lru_node);
693 * panthor_vm_active() - Flag a VM as active
694 * @VM: VM to flag as active.
696 * Assigns an address space to a VM so it can be used by the GPU/MCU.
698 * Return: 0 on success, a negative error code otherwise.
700 int panthor_vm_active(struct panthor_vm *vm)
702 struct panthor_device *ptdev = vm->ptdev;
703 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
704 struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
705 int ret = 0, as, cookie;
706 u64 transtab, transcfg;
708 if (!drm_dev_enter(&ptdev->base, &cookie))
711 if (refcount_inc_not_zero(&vm->as.active_cnt))
714 mutex_lock(&ptdev->mmu->as.slots_lock);
716 if (refcount_inc_not_zero(&vm->as.active_cnt))
721 /* Unhandled pagefault on this AS, the MMU was disabled. We need to
722 * re-enable the MMU after clearing+unmasking the AS interrupts.
724 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as))
727 goto out_make_active;
730 /* Check for a free AS */
732 drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
735 as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
738 if (!(BIT(as) & ptdev->gpu_info.as_present)) {
739 struct panthor_vm *lru_vm;
741 lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
744 if (drm_WARN_ON(&ptdev->base, !lru_vm)) {
749 drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt));
751 panthor_vm_release_as_locked(lru_vm);
754 /* Assign the free or reclaimed AS to the FD */
756 set_bit(as, &ptdev->mmu->as.alloc_mask);
757 ptdev->mmu->as.slots[as].vm = vm;
760 transtab = cfg->arm_lpae_s1_cfg.ttbr;
761 transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
763 AS_TRANSCFG_ADRMODE_AARCH64_4K |
764 AS_TRANSCFG_INA_BITS(55 - va_bits);
766 transcfg |= AS_TRANSCFG_PTW_SH_OS;
768 /* If the VM is re-activated, we clear the fault. */
769 vm->unhandled_fault = false;
771 /* Unhandled pagefault on this AS, clear the fault and re-enable interrupts
772 * before enabling the AS.
774 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
775 gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
776 ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
777 gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
780 ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr);
784 refcount_set(&vm->as.active_cnt, 1);
785 list_del_init(&vm->as.lru_node);
789 mutex_unlock(&ptdev->mmu->as.slots_lock);
792 drm_dev_exit(cookie);
797 * panthor_vm_idle() - Flag a VM idle
798 * @VM: VM to flag as idle.
800 * When we know the GPU is done with the VM (no more jobs to process),
801 * we can relinquish the AS slot attached to this VM, if any.
803 * We don't release the slot immediately, but instead place the VM in
804 * the LRU list, so it can be evicted if another VM needs an AS slot.
805 * This way, VMs keep attached to the AS they were given until we run
806 * out of free slot, limiting the number of MMU operations (TLB flush
807 * and other AS updates).
809 void panthor_vm_idle(struct panthor_vm *vm)
811 struct panthor_device *ptdev = vm->ptdev;
813 if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock))
816 if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node)))
817 list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list);
819 refcount_set(&vm->as.active_cnt, 0);
820 mutex_unlock(&ptdev->mmu->as.slots_lock);
823 static void panthor_vm_stop(struct panthor_vm *vm)
825 drm_sched_stop(&vm->sched, NULL);
828 static void panthor_vm_start(struct panthor_vm *vm)
830 drm_sched_start(&vm->sched, true);
834 * panthor_vm_as() - Get the AS slot attached to a VM
835 * @vm: VM to get the AS slot of.
837 * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
839 int panthor_vm_as(struct panthor_vm *vm)
844 static size_t get_pgsize(u64 addr, size_t size, size_t *count)
847 * io-pgtable only operates on multiple pages within a single table
848 * entry, so we need to split at boundaries of the table size, i.e.
849 * the next block size up. The distance from address A to the next
850 * boundary of block size B is logically B - A % B, but in unsigned
851 * two's complement where B is a power of two we get the equivalence
852 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
854 size_t blk_offset = -addr % SZ_2M;
856 if (blk_offset || size < SZ_2M) {
857 *count = min_not_zero(blk_offset, size) / SZ_4K;
860 blk_offset = -addr % SZ_1G ?: SZ_1G;
861 *count = min(blk_offset, size) / SZ_2M;
865 static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
867 struct panthor_device *ptdev = vm->ptdev;
873 /* If the device is unplugged, we just silently skip the flush. */
874 if (!drm_dev_enter(&ptdev->base, &cookie))
877 /* Flush the PTs only if we're already awake */
878 if (pm_runtime_active(ptdev->base.dev))
879 ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
881 drm_dev_exit(cookie);
885 static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
887 struct panthor_device *ptdev = vm->ptdev;
888 struct io_pgtable_ops *ops = vm->pgtbl_ops;
891 drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size);
893 while (offset < size) {
894 size_t unmapped_sz = 0, pgcount;
895 size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
897 unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL);
899 if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) {
900 drm_err(&ptdev->base, "failed to unmap range %llx-%llx (requested range %llx-%llx)\n",
901 iova + offset + unmapped_sz,
902 iova + offset + pgsize * pgcount,
904 panthor_vm_flush_range(vm, iova, offset + unmapped_sz);
907 offset += unmapped_sz;
910 return panthor_vm_flush_range(vm, iova, size);
914 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
915 struct sg_table *sgt, u64 offset, u64 size)
917 struct panthor_device *ptdev = vm->ptdev;
919 struct scatterlist *sgl;
920 struct io_pgtable_ops *ops = vm->pgtbl_ops;
921 u64 start_iova = iova;
927 for_each_sgtable_dma_sg(sgt, sgl, count) {
928 dma_addr_t paddr = sg_dma_address(sgl);
929 size_t len = sg_dma_len(sgl);
938 len = min_t(size_t, len, size);
941 drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx",
942 vm->as.id, iova, &paddr, len);
945 size_t pgcount, mapped = 0;
946 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
948 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
949 GFP_KERNEL, &mapped);
954 if (drm_WARN_ON(&ptdev->base, !ret && !mapped))
958 /* If something failed, unmap what we've already mapped before
959 * returning. The unmap call is not supposed to fail.
961 drm_WARN_ON(&ptdev->base,
962 panthor_vm_unmap_pages(vm, start_iova,
972 return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
975 static int flags_to_prot(u32 flags)
979 if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC)
980 prot |= IOMMU_NOEXEC;
982 if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED))
985 if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY)
988 prot |= IOMMU_READ | IOMMU_WRITE;
994 * panthor_vm_alloc_va() - Allocate a region in the auto-va space
995 * @VM: VM to allocate a region on.
996 * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
997 * wants the VA to be automatically allocated from the auto-VA range.
998 * @size: size of the VA range.
999 * @va_node: drm_mm_node to initialize. Must be zero-initialized.
1001 * Some GPU objects, like heap chunks, are fully managed by the kernel and
1002 * need to be mapped to the userspace VM, in the region reserved for kernel
1005 * This function takes care of allocating a region in the kernel auto-VA space.
1007 * Return: 0 on success, an error code otherwise.
1010 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
1011 struct drm_mm_node *va_node)
1015 if (!size || (size & ~PAGE_MASK))
1018 if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
1021 mutex_lock(&vm->mm_lock);
1022 if (va != PANTHOR_VM_KERNEL_AUTO_VA) {
1023 va_node->start = va;
1024 va_node->size = size;
1025 ret = drm_mm_reserve_node(&vm->mm, va_node);
1027 ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size,
1028 size >= SZ_2M ? SZ_2M : SZ_4K,
1029 0, vm->kernel_auto_va.start,
1030 vm->kernel_auto_va.end,
1031 DRM_MM_INSERT_BEST);
1033 mutex_unlock(&vm->mm_lock);
1039 * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
1040 * @VM: VM to free the region on.
1041 * @va_node: Memory node representing the region to free.
1043 void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
1045 mutex_lock(&vm->mm_lock);
1046 drm_mm_remove_node(va_node);
1047 mutex_unlock(&vm->mm_lock);
1050 static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
1052 struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
1053 struct drm_gpuvm *vm = vm_bo->vm;
1056 /* We must retain the GEM before calling drm_gpuvm_bo_put(),
1057 * otherwise the mutex might be destroyed while we hold it.
1058 * Same goes for the VM, since we take the VM resv lock.
1060 drm_gem_object_get(&bo->base.base);
1063 /* We take the resv lock to protect against concurrent accesses to the
1064 * gpuvm evicted/extobj lists that are modified in
1065 * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
1066 * releases sthe last vm_bo reference.
1067 * We take the BO GPUVA list lock to protect the vm_bo removal from the
1070 dma_resv_lock(drm_gpuvm_resv(vm), NULL);
1071 mutex_lock(&bo->gpuva_list_lock);
1072 unpin = drm_gpuvm_bo_put(vm_bo);
1073 mutex_unlock(&bo->gpuva_list_lock);
1074 dma_resv_unlock(drm_gpuvm_resv(vm));
1076 /* If the vm_bo object was destroyed, release the pin reference that
1077 * was hold by this object.
1079 if (unpin && !bo->base.base.import_attach)
1080 drm_gem_shmem_unpin(&bo->base);
1083 drm_gem_object_put(&bo->base.base);
1086 static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1087 struct panthor_vm *vm)
1089 struct panthor_vma *vma, *tmp_vma;
1091 u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
1092 op_ctx->rsvd_page_tables.ptr;
1094 if (remaining_pt_count) {
1095 kmem_cache_free_bulk(pt_cache, remaining_pt_count,
1096 op_ctx->rsvd_page_tables.pages +
1097 op_ctx->rsvd_page_tables.ptr);
1100 kfree(op_ctx->rsvd_page_tables.pages);
1102 if (op_ctx->map.vm_bo)
1103 panthor_vm_bo_put(op_ctx->map.vm_bo);
1105 for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
1106 kfree(op_ctx->preallocated_vmas[i]);
1108 list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
1109 list_del(&vma->node);
1110 panthor_vm_bo_put(vma->base.vm_bo);
1115 static struct panthor_vma *
1116 panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx)
1118 for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
1119 struct panthor_vma *vma = op_ctx->preallocated_vmas[i];
1122 op_ctx->preallocated_vmas[i] = NULL;
1131 panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
1135 switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
1136 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
1137 /* One VMA for the new mapping, and two more VMAs for the remap case
1138 * which might contain both a prev and next VA.
1143 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
1144 /* Partial unmaps might trigger a remap with either a prev or a next VA,
1154 for (u32 i = 0; i < vma_count; i++) {
1155 struct panthor_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
1160 op_ctx->preallocated_vmas[i] = vma;
1166 #define PANTHOR_VM_BIND_OP_MAP_FLAGS \
1167 (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
1168 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
1169 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \
1170 DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
1172 static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1173 struct panthor_vm *vm,
1174 struct panthor_gem_object *bo,
1179 struct drm_gpuvm_bo *preallocated_vm_bo;
1180 struct sg_table *sgt = NULL;
1187 if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) ||
1188 (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
1191 /* Make sure the VA and size are aligned and in-bounds. */
1192 if (size > bo->base.base.size || offset > bo->base.base.size - size)
1195 /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */
1196 if (bo->exclusive_vm_root_gem &&
1197 bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm))
1200 memset(op_ctx, 0, sizeof(*op_ctx));
1201 INIT_LIST_HEAD(&op_ctx->returned_vmas);
1202 op_ctx->flags = flags;
1203 op_ctx->va.range = size;
1204 op_ctx->va.addr = va;
1206 ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
1210 if (!bo->base.base.import_attach) {
1211 /* Pre-reserve the BO pages, so the map operation doesn't have to
1214 ret = drm_gem_shmem_pin(&bo->base);
1219 sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
1221 if (!bo->base.base.import_attach)
1222 drm_gem_shmem_unpin(&bo->base);
1228 op_ctx->map.sgt = sgt;
1230 preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
1231 if (!preallocated_vm_bo) {
1232 if (!bo->base.base.import_attach)
1233 drm_gem_shmem_unpin(&bo->base);
1239 mutex_lock(&bo->gpuva_list_lock);
1240 op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
1241 mutex_unlock(&bo->gpuva_list_lock);
1243 /* If the a vm_bo for this <VM,BO> combination exists, it already
1244 * retains a pin ref, and we can release the one we took earlier.
1246 * If our pre-allocated vm_bo is picked, it now retains the pin ref,
1247 * which will be released in panthor_vm_bo_put().
1249 if (preallocated_vm_bo != op_ctx->map.vm_bo &&
1250 !bo->base.base.import_attach)
1251 drm_gem_shmem_unpin(&bo->base);
1253 op_ctx->map.bo_offset = offset;
1255 /* L1, L2 and L3 page tables.
1256 * We could optimize L3 allocation by iterating over the sgt and merging
1257 * 2M contiguous blocks, but it's simpler to over-provision and return
1258 * the pages if they're not used.
1260 pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) +
1261 ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) +
1262 ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21);
1264 op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
1265 sizeof(*op_ctx->rsvd_page_tables.pages),
1267 if (!op_ctx->rsvd_page_tables.pages)
1270 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
1271 op_ctx->rsvd_page_tables.pages);
1272 op_ctx->rsvd_page_tables.count = ret;
1273 if (ret != pt_count) {
1278 /* Insert BO into the extobj list last, when we know nothing can fail. */
1279 dma_resv_lock(panthor_vm_resv(vm), NULL);
1280 drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo);
1281 dma_resv_unlock(panthor_vm_resv(vm));
1286 panthor_vm_cleanup_op_ctx(op_ctx, vm);
1290 static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1291 struct panthor_vm *vm,
1297 memset(op_ctx, 0, sizeof(*op_ctx));
1298 INIT_LIST_HEAD(&op_ctx->returned_vmas);
1299 op_ctx->va.range = size;
1300 op_ctx->va.addr = va;
1301 op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
1303 /* Pre-allocate L3 page tables to account for the split-2M-block
1304 * situation on unmap.
1306 if (va != ALIGN(va, SZ_2M))
1309 if (va + size != ALIGN(va + size, SZ_2M) &&
1310 ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M))
1313 ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
1318 op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
1319 sizeof(*op_ctx->rsvd_page_tables.pages),
1321 if (!op_ctx->rsvd_page_tables.pages)
1324 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
1325 op_ctx->rsvd_page_tables.pages);
1326 if (ret != pt_count) {
1330 op_ctx->rsvd_page_tables.count = pt_count;
1336 panthor_vm_cleanup_op_ctx(op_ctx, vm);
1340 static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx,
1341 struct panthor_vm *vm)
1343 memset(op_ctx, 0, sizeof(*op_ctx));
1344 INIT_LIST_HEAD(&op_ctx->returned_vmas);
1345 op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
1349 * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address
1350 * @vm: VM to look into.
1351 * @va: Virtual address to search for.
1352 * @bo_offset: Offset of the GEM object mapped at this virtual address.
1353 * Only valid on success.
1355 * The object returned by this function might no longer be mapped when the
1356 * function returns. It's the caller responsibility to ensure there's no
1357 * concurrent map/unmap operations making the returned value invalid, or
1358 * make sure it doesn't matter if the object is no longer mapped.
1360 * Return: A valid pointer on success, an ERR_PTR() otherwise.
1362 struct panthor_gem_object *
1363 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset)
1365 struct panthor_gem_object *bo = ERR_PTR(-ENOENT);
1366 struct drm_gpuva *gpuva;
1367 struct panthor_vma *vma;
1369 /* Take the VM lock to prevent concurrent map/unmap operations. */
1370 mutex_lock(&vm->op_lock);
1371 gpuva = drm_gpuva_find_first(&vm->base, va, 1);
1372 vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL;
1373 if (vma && vma->base.gem.obj) {
1374 drm_gem_object_get(vma->base.gem.obj);
1375 bo = to_panthor_bo(vma->base.gem.obj);
1376 *bo_offset = vma->base.gem.offset + (va - vma->base.va.addr);
1378 mutex_unlock(&vm->op_lock);
1383 #define PANTHOR_VM_MIN_KERNEL_VA_SIZE SZ_256M
1386 panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args,
1391 /* Make sure we have a minimum amount of VA space for kernel objects. */
1392 if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE)
1395 if (args->user_va_range) {
1396 /* Use the user provided value if != 0. */
1397 user_va_range = args->user_va_range;
1398 } else if (TASK_SIZE_OF(current) < full_va_range) {
1399 /* If the task VM size is smaller than the GPU VA range, pick this
1400 * as our default user VA range, so userspace can CPU/GPU map buffers
1401 * at the same address.
1403 user_va_range = TASK_SIZE_OF(current);
1405 /* If the GPU VA range is smaller than the task VM size, we
1406 * just have to live with the fact we won't be able to map
1407 * all buffers at the same GPU/CPU address.
1409 * If the GPU VA range is bigger than 4G (more than 32-bit of
1410 * VA), we split the range in two, and assign half of it to
1411 * the user and the other half to the kernel, if it's not, we
1412 * keep the kernel VA space as small as possible.
1414 user_va_range = full_va_range > SZ_4G ?
1416 full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
1419 if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range)
1420 user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
1422 return user_va_range;
1425 #define PANTHOR_VM_CREATE_FLAGS 0
1428 panthor_vm_create_check_args(const struct panthor_device *ptdev,
1429 const struct drm_panthor_vm_create *args,
1430 u64 *kernel_va_start, u64 *kernel_va_range)
1432 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
1433 u64 full_va_range = 1ull << va_bits;
1436 if (args->flags & ~PANTHOR_VM_CREATE_FLAGS)
1439 user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range);
1440 if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range))
1443 /* Pick a kernel VA range that's a power of two, to have a clear split. */
1444 *kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range);
1445 *kernel_va_start = full_va_range - *kernel_va_range;
1450 * Only 32 VMs per open file. If that becomes a limiting factor, we can
1451 * increase this number.
1453 #define PANTHOR_MAX_VMS_PER_FILE 32
1456 * panthor_vm_pool_create_vm() - Create a VM
1457 * @pool: The VM to create this VM on.
1458 * @kernel_va_start: Start of the region reserved for kernel objects.
1459 * @kernel_va_range: Size of the region reserved for kernel objects.
1461 * Return: a positive VM ID on success, a negative error code otherwise.
1463 int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
1464 struct panthor_vm_pool *pool,
1465 struct drm_panthor_vm_create *args)
1467 u64 kernel_va_start, kernel_va_range;
1468 struct panthor_vm *vm;
1472 ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range);
1476 vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range,
1477 kernel_va_start, kernel_va_range);
1481 ret = xa_alloc(&pool->xa, &id, vm,
1482 XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL);
1489 args->user_va_range = kernel_va_start;
1493 static void panthor_vm_destroy(struct panthor_vm *vm)
1498 vm->destroyed = true;
1500 mutex_lock(&vm->heaps.lock);
1501 panthor_heap_pool_destroy(vm->heaps.pool);
1502 vm->heaps.pool = NULL;
1503 mutex_unlock(&vm->heaps.lock);
1505 drm_WARN_ON(&vm->ptdev->base,
1506 panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range));
1511 * panthor_vm_pool_destroy_vm() - Destroy a VM.
1513 * @handle: VM handle.
1515 * This function doesn't free the VM object or its resources, it just kills
1516 * all mappings, and makes sure nothing can be mapped after that point.
1518 * If there was any active jobs at the time this function is called, these
1519 * jobs should experience page faults and be killed as a result.
1521 * The VM resources are freed when the last reference on the VM object is
1524 int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
1526 struct panthor_vm *vm;
1528 vm = xa_erase(&pool->xa, handle);
1530 panthor_vm_destroy(vm);
1532 return vm ? 0 : -EINVAL;
1536 * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
1537 * @pool: VM pool to check.
1538 * @handle: Handle of the VM to retrieve.
1540 * Return: A valid pointer if the VM exists, NULL otherwise.
1543 panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
1545 struct panthor_vm *vm;
1547 vm = panthor_vm_get(xa_load(&pool->xa, handle));
1553 * panthor_vm_pool_destroy() - Destroy a VM pool.
1556 * Destroy all VMs in the pool, and release the pool resources.
1558 * Note that VMs can outlive the pool they were created from if other
1559 * objects hold a reference to there VMs.
1561 void panthor_vm_pool_destroy(struct panthor_file *pfile)
1563 struct panthor_vm *vm;
1569 xa_for_each(&pfile->vms->xa, i, vm)
1570 panthor_vm_destroy(vm);
1572 xa_destroy(&pfile->vms->xa);
1577 * panthor_vm_pool_create() - Create a VM pool
1580 * Return: 0 on success, a negative error code otherwise.
1582 int panthor_vm_pool_create(struct panthor_file *pfile)
1584 pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL);
1588 xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1);
1592 /* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */
1593 static void mmu_tlb_flush_all(void *cookie)
1597 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie)
1601 static const struct iommu_flush_ops mmu_tlb_ops = {
1602 .tlb_flush_all = mmu_tlb_flush_all,
1603 .tlb_flush_walk = mmu_tlb_flush_walk,
1606 static const char *access_type_name(struct panthor_device *ptdev,
1609 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
1610 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
1612 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
1614 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
1616 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
1619 drm_WARN_ON(&ptdev->base, 1);
1624 static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
1626 bool has_unhandled_faults = false;
1628 status = panthor_mmu_fault_mask(ptdev, status);
1630 u32 as = ffs(status | (status >> 16)) - 1;
1631 u32 mask = panthor_mmu_as_fault_mask(ptdev, as);
1639 fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
1640 addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
1641 addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
1643 /* decode the fault status */
1644 exception_type = fault_status & 0xFF;
1645 access_type = (fault_status >> 8) & 0x3;
1646 source_id = (fault_status >> 16);
1648 mutex_lock(&ptdev->mmu->as.slots_lock);
1650 ptdev->mmu->as.faulty_mask |= mask;
1652 panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask);
1654 /* terminal fault, print info about the fault */
1655 drm_err(&ptdev->base,
1656 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
1657 "raw fault status: 0x%X\n"
1658 "decoded fault status: %s\n"
1659 "exception type 0x%X: %s\n"
1660 "access type 0x%X: %s\n"
1664 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
1665 exception_type, panthor_exception_name(ptdev, exception_type),
1666 access_type, access_type_name(ptdev, fault_status),
1669 /* Ignore MMU interrupts on this AS until it's been
1672 ptdev->mmu->irq.mask = new_int_mask;
1673 gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
1675 if (ptdev->mmu->as.slots[as].vm)
1676 ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
1678 /* Disable the MMU to kill jobs on this AS. */
1679 panthor_mmu_as_disable(ptdev, as);
1680 mutex_unlock(&ptdev->mmu->as.slots_lock);
1683 has_unhandled_faults = true;
1686 if (has_unhandled_faults)
1687 panthor_sched_report_mmu_fault(ptdev);
1689 PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
1692 * panthor_mmu_suspend() - Suspend the MMU logic
1695 * All we do here is de-assign the AS slots on all active VMs, so things
1696 * get flushed to the main memory, and no further access to these VMs are
1699 * We also suspend the MMU IRQ.
1701 void panthor_mmu_suspend(struct panthor_device *ptdev)
1703 mutex_lock(&ptdev->mmu->as.slots_lock);
1704 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1705 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1708 drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
1709 panthor_vm_release_as_locked(vm);
1712 mutex_unlock(&ptdev->mmu->as.slots_lock);
1714 panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1718 * panthor_mmu_resume() - Resume the MMU logic
1723 * We don't re-enable previously active VMs. We assume other parts of the
1724 * driver will call panthor_vm_active() on the VMs they intend to use.
1726 void panthor_mmu_resume(struct panthor_device *ptdev)
1728 mutex_lock(&ptdev->mmu->as.slots_lock);
1729 ptdev->mmu->as.alloc_mask = 0;
1730 ptdev->mmu->as.faulty_mask = 0;
1731 mutex_unlock(&ptdev->mmu->as.slots_lock);
1733 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
1737 * panthor_mmu_pre_reset() - Prepare for a reset
1740 * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we
1741 * don't get asked to do a VM operation while the GPU is down.
1743 * We don't cleanly shutdown the AS slots here, because the reset might
1744 * come from an AS_ACTIVE_BIT stuck situation.
1746 void panthor_mmu_pre_reset(struct panthor_device *ptdev)
1748 struct panthor_vm *vm;
1750 panthor_mmu_irq_suspend(&ptdev->mmu->irq);
1752 mutex_lock(&ptdev->mmu->vm.lock);
1753 ptdev->mmu->vm.reset_in_progress = true;
1754 list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
1755 panthor_vm_stop(vm);
1756 mutex_unlock(&ptdev->mmu->vm.lock);
1760 * panthor_mmu_post_reset() - Restore things after a reset
1763 * Put the MMU logic back in action after a reset. That implies resuming the
1764 * IRQ and re-enabling the VM_BIND queues.
1766 void panthor_mmu_post_reset(struct panthor_device *ptdev)
1768 struct panthor_vm *vm;
1770 mutex_lock(&ptdev->mmu->as.slots_lock);
1772 /* Now that the reset is effective, we can assume that none of the
1773 * AS slots are setup, and clear the faulty flags too.
1775 ptdev->mmu->as.alloc_mask = 0;
1776 ptdev->mmu->as.faulty_mask = 0;
1778 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
1779 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
1782 panthor_vm_release_as_locked(vm);
1785 mutex_unlock(&ptdev->mmu->as.slots_lock);
1787 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
1789 /* Restart the VM_BIND queues. */
1790 mutex_lock(&ptdev->mmu->vm.lock);
1791 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
1792 panthor_vm_start(vm);
1794 ptdev->mmu->vm.reset_in_progress = false;
1795 mutex_unlock(&ptdev->mmu->vm.lock);
1798 static void panthor_vm_free(struct drm_gpuvm *gpuvm)
1800 struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base);
1801 struct panthor_device *ptdev = vm->ptdev;
1803 mutex_lock(&vm->heaps.lock);
1804 if (drm_WARN_ON(&ptdev->base, vm->heaps.pool))
1805 panthor_heap_pool_destroy(vm->heaps.pool);
1806 mutex_unlock(&vm->heaps.lock);
1807 mutex_destroy(&vm->heaps.lock);
1809 mutex_lock(&ptdev->mmu->vm.lock);
1810 list_del(&vm->node);
1811 /* Restore the scheduler state so we can call drm_sched_entity_destroy()
1812 * and drm_sched_fini(). If get there, that means we have no job left
1813 * and no new jobs can be queued, so we can start the scheduler without
1814 * risking interfering with the reset.
1816 if (ptdev->mmu->vm.reset_in_progress)
1817 panthor_vm_start(vm);
1818 mutex_unlock(&ptdev->mmu->vm.lock);
1820 drm_sched_entity_destroy(&vm->entity);
1821 drm_sched_fini(&vm->sched);
1823 mutex_lock(&ptdev->mmu->as.slots_lock);
1824 if (vm->as.id >= 0) {
1827 if (drm_dev_enter(&ptdev->base, &cookie)) {
1828 panthor_mmu_as_disable(ptdev, vm->as.id);
1829 drm_dev_exit(cookie);
1832 ptdev->mmu->as.slots[vm->as.id].vm = NULL;
1833 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
1834 list_del(&vm->as.lru_node);
1836 mutex_unlock(&ptdev->mmu->as.slots_lock);
1838 free_io_pgtable_ops(vm->pgtbl_ops);
1840 drm_mm_takedown(&vm->mm);
1845 * panthor_vm_put() - Release a reference on a VM
1846 * @vm: VM to release the reference on. Can be NULL.
1848 void panthor_vm_put(struct panthor_vm *vm)
1850 drm_gpuvm_put(vm ? &vm->base : NULL);
1854 * panthor_vm_get() - Get a VM reference
1855 * @vm: VM to get the reference on. Can be NULL.
1857 * Return: @vm value.
1859 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm)
1862 drm_gpuvm_get(&vm->base);
1868 * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
1869 * @vm: VM to query the heap pool on.
1870 * @create: True if the heap pool should be created when it doesn't exist.
1872 * Heap pools are per-VM. This function allows one to retrieve the heap pool
1875 * If no heap pool exists yet, and @create is true, we create one.
1877 * The returned panthor_heap_pool should be released with panthor_heap_pool_put().
1879 * Return: A valid pointer on success, an ERR_PTR() otherwise.
1881 struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create)
1883 struct panthor_heap_pool *pool;
1885 mutex_lock(&vm->heaps.lock);
1886 if (!vm->heaps.pool && create) {
1888 pool = ERR_PTR(-EINVAL);
1890 pool = panthor_heap_pool_create(vm->ptdev, vm);
1893 vm->heaps.pool = panthor_heap_pool_get(pool);
1895 pool = panthor_heap_pool_get(vm->heaps.pool);
1897 mutex_unlock(&vm->heaps.lock);
1902 static u64 mair_to_memattr(u64 mair)
1907 for (i = 0; i < 8; i++) {
1908 u8 in_attr = mair >> (8 * i), out_attr;
1909 u8 outer = in_attr >> 4, inner = in_attr & 0xf;
1911 /* For caching to be enabled, inner and outer caching policy
1912 * have to be both write-back, if one of them is write-through
1913 * or non-cacheable, we just choose non-cacheable. Device
1914 * memory is also translated to non-cacheable.
1916 if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
1917 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
1918 AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
1919 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
1921 /* Use SH_CPU_INNER mode so SH_IS, which is used when
1922 * IOMMU_CACHE is set, actually maps to the standard
1923 * definition of inner-shareable and not Mali's
1924 * internal-shareable mode.
1926 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
1927 AS_MEMATTR_AARCH64_SH_CPU_INNER |
1928 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
1931 memattr |= (u64)out_attr << (8 * i);
1937 static void panthor_vma_link(struct panthor_vm *vm,
1938 struct panthor_vma *vma,
1939 struct drm_gpuvm_bo *vm_bo)
1941 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
1943 mutex_lock(&bo->gpuva_list_lock);
1944 drm_gpuva_link(&vma->base, vm_bo);
1945 drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
1946 mutex_unlock(&bo->gpuva_list_lock);
1949 static void panthor_vma_unlink(struct panthor_vm *vm,
1950 struct panthor_vma *vma)
1952 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
1953 struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
1955 mutex_lock(&bo->gpuva_list_lock);
1956 drm_gpuva_unlink(&vma->base);
1957 mutex_unlock(&bo->gpuva_list_lock);
1959 /* drm_gpuva_unlink() release the vm_bo, but we manually retained it
1960 * when entering this function, so we can implement deferred VMA
1961 * destruction. Re-assign it here.
1963 vma->base.vm_bo = vm_bo;
1964 list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
1967 static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
1969 INIT_LIST_HEAD(&vma->node);
1973 #define PANTHOR_VM_MAP_FLAGS \
1974 (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
1975 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
1976 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)
1978 static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
1980 struct panthor_vm *vm = priv;
1981 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
1982 struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
1988 panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
1990 ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
1991 op_ctx->map.sgt, op->map.gem.offset,
1996 /* Ref owned by the mapping now, clear the obj field so we don't release the
1997 * pinning/obj ref behind GPUVA's back.
1999 drm_gpuva_map(&vm->base, &vma->base, &op->map);
2000 panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
2001 op_ctx->map.vm_bo = NULL;
2005 static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
2008 struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base);
2009 struct panthor_vm *vm = priv;
2010 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
2011 struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
2012 u64 unmap_start, unmap_range;
2015 drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
2016 ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
2020 if (op->remap.prev) {
2021 prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
2022 panthor_vma_init(prev_vma, unmap_vma->flags);
2025 if (op->remap.next) {
2026 next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
2027 panthor_vma_init(next_vma, unmap_vma->flags);
2030 drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL,
2031 next_vma ? &next_vma->base : NULL,
2035 /* panthor_vma_link() transfers the vm_bo ownership to
2036 * the VMA object. Since the vm_bo we're passing is still
2037 * owned by the old mapping which will be released when this
2038 * mapping is destroyed, we need to grab a ref here.
2040 panthor_vma_link(vm, prev_vma,
2041 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2045 panthor_vma_link(vm, next_vma,
2046 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2049 panthor_vma_unlink(vm, unmap_vma);
2053 static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
2056 struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base);
2057 struct panthor_vm *vm = priv;
2060 ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
2061 unmap_vma->base.va.range);
2062 if (drm_WARN_ON(&vm->ptdev->base, ret))
2065 drm_gpuva_unmap(&op->unmap);
2066 panthor_vma_unlink(vm, unmap_vma);
2070 static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
2071 .vm_free = panthor_vm_free,
2072 .sm_step_map = panthor_gpuva_sm_step_map,
2073 .sm_step_remap = panthor_gpuva_sm_step_remap,
2074 .sm_step_unmap = panthor_gpuva_sm_step_unmap,
2078 * panthor_vm_resv() - Get the dma_resv object attached to a VM.
2079 * @vm: VM to get the dma_resv of.
2081 * Return: A dma_resv object.
2083 struct dma_resv *panthor_vm_resv(struct panthor_vm *vm)
2085 return drm_gpuvm_resv(&vm->base);
2088 struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm)
2093 return vm->base.r_obj;
2097 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
2098 bool flag_vm_unusable_on_failure)
2100 u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK;
2103 if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY)
2106 mutex_lock(&vm->op_lock);
2109 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
2115 ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
2116 op->map.vm_bo->obj, op->map.bo_offset);
2119 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
2120 ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
2128 if (ret && flag_vm_unusable_on_failure)
2129 vm->unusable = true;
2132 mutex_unlock(&vm->op_lock);
2137 static struct dma_fence *
2138 panthor_vm_bind_run_job(struct drm_sched_job *sched_job)
2140 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2144 /* Not only we report an error whose result is propagated to the
2145 * drm_sched finished fence, but we also flag the VM as unusable, because
2146 * a failure in the async VM_BIND results in an inconsistent state. VM needs
2147 * to be destroyed and recreated.
2149 cookie = dma_fence_begin_signalling();
2150 ret = panthor_vm_exec_op(job->vm, &job->ctx, true);
2151 dma_fence_end_signalling(cookie);
2153 return ret ? ERR_PTR(ret) : NULL;
2156 static void panthor_vm_bind_job_release(struct kref *kref)
2158 struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount);
2160 if (job->base.s_fence)
2161 drm_sched_job_cleanup(&job->base);
2163 panthor_vm_cleanup_op_ctx(&job->ctx, job->vm);
2164 panthor_vm_put(job->vm);
2169 * panthor_vm_bind_job_put() - Release a VM_BIND job reference
2170 * @sched_job: Job to release the reference on.
2172 void panthor_vm_bind_job_put(struct drm_sched_job *sched_job)
2174 struct panthor_vm_bind_job *job =
2175 container_of(sched_job, struct panthor_vm_bind_job, base);
2178 kref_put(&job->refcount, panthor_vm_bind_job_release);
2182 panthor_vm_bind_free_job(struct drm_sched_job *sched_job)
2184 struct panthor_vm_bind_job *job =
2185 container_of(sched_job, struct panthor_vm_bind_job, base);
2187 drm_sched_job_cleanup(sched_job);
2189 /* Do the heavy cleanups asynchronously, so we're out of the
2190 * dma-signaling path and can acquire dma-resv locks safely.
2192 queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work);
2195 static enum drm_gpu_sched_stat
2196 panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
2198 WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
2199 return DRM_GPU_SCHED_STAT_NOMINAL;
2202 static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
2203 .run_job = panthor_vm_bind_run_job,
2204 .free_job = panthor_vm_bind_free_job,
2205 .timedout_job = panthor_vm_bind_timedout_job,
2209 * panthor_vm_create() - Create a VM
2211 * @for_mcu: True if this is the FW MCU VM.
2212 * @kernel_va_start: Start of the range reserved for kernel BO mapping.
2213 * @kernel_va_size: Size of the range reserved for kernel BO mapping.
2214 * @auto_kernel_va_start: Start of the auto-VA kernel range.
2215 * @auto_kernel_va_size: Size of the auto-VA kernel range.
2217 * Return: A valid pointer on success, an ERR_PTR() otherwise.
2220 panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
2221 u64 kernel_va_start, u64 kernel_va_size,
2222 u64 auto_kernel_va_start, u64 auto_kernel_va_size)
2224 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
2225 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
2226 u64 full_va_range = 1ull << va_bits;
2227 struct drm_gem_object *dummy_gem;
2228 struct drm_gpu_scheduler *sched;
2229 struct io_pgtable_cfg pgtbl_cfg;
2230 u64 mair, min_va, va_range;
2231 struct panthor_vm *vm;
2234 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2236 return ERR_PTR(-ENOMEM);
2238 /* We allocate a dummy GEM for the VM. */
2239 dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base);
2245 mutex_init(&vm->heaps.lock);
2246 vm->for_mcu = for_mcu;
2248 mutex_init(&vm->op_lock);
2251 /* CSF MCU is a cortex M7, and can only address 4G */
2256 va_range = full_va_range;
2259 mutex_init(&vm->mm_lock);
2260 drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size);
2261 vm->kernel_auto_va.start = auto_kernel_va_start;
2262 vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1;
2264 INIT_LIST_HEAD(&vm->node);
2265 INIT_LIST_HEAD(&vm->as.lru_node);
2267 refcount_set(&vm->as.active_cnt, 0);
2269 pgtbl_cfg = (struct io_pgtable_cfg) {
2270 .pgsize_bitmap = SZ_4K | SZ_2M,
2273 .coherent_walk = ptdev->coherent,
2274 .tlb = &mmu_tlb_ops,
2275 .iommu_dev = ptdev->base.dev,
2280 vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm);
2281 if (!vm->pgtbl_ops) {
2283 goto err_mm_takedown;
2286 /* Bind operations are synchronous for now, no timeout needed. */
2287 ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq,
2289 MAX_SCHEDULE_TIMEOUT, NULL, NULL,
2290 "panthor-vm-bind", ptdev->base.dev);
2292 goto err_free_io_pgtable;
2295 ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL);
2297 goto err_sched_fini;
2299 mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
2300 vm->memattr = mair_to_memattr(mair);
2302 mutex_lock(&ptdev->mmu->vm.lock);
2303 list_add_tail(&vm->node, &ptdev->mmu->vm.list);
2305 /* If a reset is in progress, stop the scheduler. */
2306 if (ptdev->mmu->vm.reset_in_progress)
2307 panthor_vm_stop(vm);
2308 mutex_unlock(&ptdev->mmu->vm.lock);
2310 /* We intentionally leave the reserved range to zero, because we want kernel VMAs
2311 * to be handled the same way user VMAs are.
2313 drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
2314 DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
2315 min_va, va_range, 0, 0, &panthor_gpuvm_ops);
2316 drm_gem_object_put(dummy_gem);
2320 drm_sched_fini(&vm->sched);
2322 err_free_io_pgtable:
2323 free_io_pgtable_ops(vm->pgtbl_ops);
2326 drm_mm_takedown(&vm->mm);
2327 drm_gem_object_put(dummy_gem);
2331 return ERR_PTR(ret);
2335 panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
2336 struct panthor_vm *vm,
2337 const struct drm_panthor_vm_bind_op *op,
2338 struct panthor_vm_op_ctx *op_ctx)
2340 struct drm_gem_object *gem;
2343 /* Aligned on page size. */
2344 if ((op->va | op->size) & ~PAGE_MASK)
2347 switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
2348 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
2349 gem = drm_gem_object_lookup(file, op->bo_handle);
2350 ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm,
2351 gem ? to_panthor_bo(gem) : NULL,
2356 drm_gem_object_put(gem);
2359 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
2360 if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
2363 if (op->bo_handle || op->bo_offset)
2366 return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size);
2368 case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY:
2369 if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
2372 if (op->bo_handle || op->bo_offset)
2375 if (op->va || op->size)
2378 if (!op->syncs.count)
2381 panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm);
2389 static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work)
2391 struct panthor_vm_bind_job *job =
2392 container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work);
2394 panthor_vm_bind_job_put(&job->base);
2398 * panthor_vm_bind_job_create() - Create a VM_BIND job
2400 * @vm: VM targeted by the VM_BIND job.
2401 * @op: VM operation data.
2403 * Return: A valid pointer on success, an ERR_PTR() otherwise.
2405 struct drm_sched_job *
2406 panthor_vm_bind_job_create(struct drm_file *file,
2407 struct panthor_vm *vm,
2408 const struct drm_panthor_vm_bind_op *op)
2410 struct panthor_vm_bind_job *job;
2414 return ERR_PTR(-EINVAL);
2416 if (vm->destroyed || vm->unusable)
2417 return ERR_PTR(-EINVAL);
2419 job = kzalloc(sizeof(*job), GFP_KERNEL);
2421 return ERR_PTR(-ENOMEM);
2423 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx);
2426 return ERR_PTR(ret);
2429 INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work);
2430 kref_init(&job->refcount);
2431 job->vm = panthor_vm_get(vm);
2433 ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
2440 panthor_vm_bind_job_put(&job->base);
2441 return ERR_PTR(ret);
2445 * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs
2446 * @exec: The locking/preparation context.
2447 * @sched_job: The job to prepare resvs on.
2449 * Locks and prepare the VM resv.
2451 * If this is a map operation, locks and prepares the GEM resv.
2453 * Return: 0 on success, a negative error code otherwise.
2455 int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
2456 struct drm_sched_job *sched_job)
2458 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2461 /* Acquire the VM lock an reserve a slot for this VM bind job. */
2462 ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1);
2466 if (job->ctx.map.vm_bo) {
2467 /* Lock/prepare the GEM being mapped. */
2468 ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1);
2477 * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job
2478 * @exec: drm_exec context.
2479 * @sched_job: Job to update the resvs on.
2481 void panthor_vm_bind_job_update_resvs(struct drm_exec *exec,
2482 struct drm_sched_job *sched_job)
2484 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
2486 /* Explicit sync => we just register our job finished fence as bookkeep. */
2487 drm_gpuvm_resv_add_fence(&job->vm->base, exec,
2488 &sched_job->s_fence->finished,
2489 DMA_RESV_USAGE_BOOKKEEP,
2490 DMA_RESV_USAGE_BOOKKEEP);
2493 void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
2494 struct dma_fence *fence,
2495 enum dma_resv_usage private_usage,
2496 enum dma_resv_usage extobj_usage)
2498 drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage);
2502 * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously.
2504 * @vm: VM targeted by the VM operation.
2505 * @op: Data describing the VM operation.
2507 * Return: 0 on success, a negative error code otherwise.
2509 int panthor_vm_bind_exec_sync_op(struct drm_file *file,
2510 struct panthor_vm *vm,
2511 struct drm_panthor_vm_bind_op *op)
2513 struct panthor_vm_op_ctx op_ctx;
2516 /* No sync objects allowed on synchronous operations. */
2517 if (op->syncs.count)
2523 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx);
2527 ret = panthor_vm_exec_op(vm, &op_ctx, false);
2528 panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2534 * panthor_vm_map_bo_range() - Map a GEM object range to a VM
2535 * @vm: VM to map the GEM to.
2536 * @bo: GEM object to map.
2537 * @offset: Offset in the GEM object.
2538 * @size: Size to map.
2539 * @va: Virtual address to map the object to.
2540 * @flags: Combination of drm_panthor_vm_bind_op_flags flags.
2541 * Only map-related flags are valid.
2543 * Internal use only. For userspace requests, use
2544 * panthor_vm_bind_exec_sync_op() instead.
2546 * Return: 0 on success, a negative error code otherwise.
2548 int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
2549 u64 offset, u64 size, u64 va, u32 flags)
2551 struct panthor_vm_op_ctx op_ctx;
2554 ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags);
2558 ret = panthor_vm_exec_op(vm, &op_ctx, false);
2559 panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2565 * panthor_vm_unmap_range() - Unmap a portion of the VA space
2566 * @vm: VM to unmap the region from.
2567 * @va: Virtual address to unmap. Must be 4k aligned.
2568 * @size: Size of the region to unmap. Must be 4k aligned.
2570 * Internal use only. For userspace requests, use
2571 * panthor_vm_bind_exec_sync_op() instead.
2573 * Return: 0 on success, a negative error code otherwise.
2575 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size)
2577 struct panthor_vm_op_ctx op_ctx;
2580 ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size);
2584 ret = panthor_vm_exec_op(vm, &op_ctx, false);
2585 panthor_vm_cleanup_op_ctx(&op_ctx, vm);
2591 * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
2592 * @exec: Locking/preparation context.
2593 * @vm: VM targeted by the GPU job.
2594 * @slot_count: Number of slots to reserve.
2596 * GPU jobs assume all BOs bound to the VM at the time the job is submitted
2597 * are available when the job is executed. In order to guarantee that, we
2598 * need to reserve a slot on all BOs mapped to a VM and update this slot with
2599 * the job fence after its submission.
2601 * Return: 0 on success, a negative error code otherwise.
2603 int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm,
2608 /* Acquire the VM lock and reserve a slot for this GPU job. */
2609 ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count);
2613 return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count);
2617 * panthor_mmu_unplug() - Unplug the MMU logic
2620 * No access to the MMU regs should be done after this function is called.
2621 * We suspend the IRQ and disable all VMs to guarantee that.
2623 void panthor_mmu_unplug(struct panthor_device *ptdev)
2625 panthor_mmu_irq_suspend(&ptdev->mmu->irq);
2627 mutex_lock(&ptdev->mmu->as.slots_lock);
2628 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
2629 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
2632 drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
2633 panthor_vm_release_as_locked(vm);
2636 mutex_unlock(&ptdev->mmu->as.slots_lock);
2639 static void panthor_mmu_release_wq(struct drm_device *ddev, void *res)
2641 destroy_workqueue(res);
2645 * panthor_mmu_init() - Initialize the MMU logic.
2648 * Return: 0 on success, a negative error code otherwise.
2650 int panthor_mmu_init(struct panthor_device *ptdev)
2652 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
2653 struct panthor_mmu *mmu;
2656 mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL);
2660 INIT_LIST_HEAD(&mmu->as.lru_list);
2662 ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock);
2666 INIT_LIST_HEAD(&mmu->vm.list);
2667 ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock);
2673 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu");
2677 ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq,
2678 panthor_mmu_fault_mask(ptdev, ~0));
2682 mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0);
2686 /* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction,
2687 * which passes iova as an unsigned long. Patch the mmu_features to reflect this
2690 if (sizeof(unsigned long) * 8 < va_bits) {
2691 ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
2692 ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8;
2695 return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
2698 #ifdef CONFIG_DEBUG_FS
2699 static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m)
2703 mutex_lock(&vm->op_lock);
2704 ret = drm_debugfs_gpuva_info(m, &vm->base);
2705 mutex_unlock(&vm->op_lock);
2710 static int show_each_vm(struct seq_file *m, void *arg)
2712 struct drm_info_node *node = (struct drm_info_node *)m->private;
2713 struct drm_device *ddev = node->minor->dev;
2714 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
2715 int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data;
2716 struct panthor_vm *vm;
2719 mutex_lock(&ptdev->mmu->vm.lock);
2720 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
2727 mutex_unlock(&ptdev->mmu->vm.lock);
2732 static struct drm_info_list panthor_mmu_debugfs_list[] = {
2733 DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas),
2737 * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries
2740 void panthor_mmu_debugfs_init(struct drm_minor *minor)
2742 drm_debugfs_create_files(panthor_mmu_debugfs_list,
2743 ARRAY_SIZE(panthor_mmu_debugfs_list),
2744 minor->debugfs_root, minor);
2746 #endif /* CONFIG_DEBUG_FS */
2749 * panthor_mmu_pt_cache_init() - Initialize the page table cache.
2751 * Return: 0 on success, a negative error code otherwise.
2753 int panthor_mmu_pt_cache_init(void)
2755 pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL);
2763 * panthor_mmu_pt_cache_fini() - Destroy the page table cache.
2765 void panthor_mmu_pt_cache_fini(void)
2767 kmem_cache_destroy(pt_cache);