1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4 #include <drm/panfrost_drm.h>
6 #include <linux/atomic.h>
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
12 #include <linux/iopoll.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/iommu.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/sizes.h>
20 #include "panfrost_device.h"
21 #include "panfrost_mmu.h"
22 #include "panfrost_gem.h"
23 #include "panfrost_features.h"
24 #include "panfrost_regs.h"
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
34 /* Wait for the MMU status to indicate there is no active command, in
35 * case one is pending. */
36 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
37 val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
40 dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
45 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
49 /* write AS_COMMAND when MMU is ready to accept another command */
50 status = wait_ready(pfdev, as_nr);
52 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
57 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
58 u64 iova, size_t size)
61 u64 region = iova & PAGE_MASK;
67 * results in the range (11 .. 42)
70 size = round_up(size, PAGE_SIZE);
72 region_width = 10 + fls(size >> PAGE_SHIFT);
73 if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
74 /* not pow2, so must go up to the next pow2 */
77 region |= region_width;
79 /* Lock the region that needs to be updated */
80 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
81 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
82 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
86 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
87 u64 iova, size_t size, u32 op)
92 if (op != AS_COMMAND_UNLOCK)
93 lock_region(pfdev, as_nr, iova, size);
95 /* Run the MMU operation */
96 write_cmd(pfdev, as_nr, op);
98 /* Wait for the flush to complete */
99 return wait_ready(pfdev, as_nr);
102 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
103 struct panfrost_mmu *mmu,
104 u64 iova, size_t size, u32 op)
108 spin_lock(&pfdev->as_lock);
109 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
110 spin_unlock(&pfdev->as_lock);
114 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
117 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
118 u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
119 u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
121 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
123 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
124 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
126 /* Need to revisit mem attrs.
127 * NC is the default, Mali driver is inner WT.
129 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
130 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
132 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
135 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
137 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
139 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
140 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
142 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
143 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
145 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
148 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
152 spin_lock(&pfdev->as_lock);
156 int en = atomic_inc_return(&mmu->as_count);
159 * AS can be retained by active jobs or a perfcnt context,
160 * hence the '+ 1' here.
162 WARN_ON(en >= (NUM_JOB_SLOTS + 1));
164 list_move(&mmu->list, &pfdev->as_lru_list);
168 /* Check for a free AS */
169 as = ffz(pfdev->as_alloc_mask);
170 if (!(BIT(as) & pfdev->features.as_present)) {
171 struct panfrost_mmu *lru_mmu;
173 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
174 if (!atomic_read(&lru_mmu->as_count))
177 WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
179 list_del_init(&lru_mmu->list);
186 /* Assign the free or reclaimed AS to the FD */
188 set_bit(as, &pfdev->as_alloc_mask);
189 atomic_set(&mmu->as_count, 1);
190 list_add(&mmu->list, &pfdev->as_lru_list);
192 dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
194 panfrost_mmu_enable(pfdev, mmu);
197 spin_unlock(&pfdev->as_lock);
201 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
203 atomic_dec(&mmu->as_count);
204 WARN_ON(atomic_read(&mmu->as_count) < 0);
207 void panfrost_mmu_reset(struct panfrost_device *pfdev)
209 struct panfrost_mmu *mmu, *mmu_tmp;
211 spin_lock(&pfdev->as_lock);
213 pfdev->as_alloc_mask = 0;
215 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
217 atomic_set(&mmu->as_count, 0);
218 list_del_init(&mmu->list);
221 spin_unlock(&pfdev->as_lock);
223 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
224 mmu_write(pfdev, MMU_INT_MASK, ~0);
227 static size_t get_pgsize(u64 addr, size_t size)
229 if (addr & (SZ_2M - 1) || size < SZ_2M)
235 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
236 struct panfrost_mmu *mmu,
237 u64 iova, size_t size)
242 pm_runtime_get_noresume(pfdev->dev);
244 /* Flush the PTs only if we're already awake */
245 if (pm_runtime_active(pfdev->dev))
246 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
248 pm_runtime_put_sync_autosuspend(pfdev->dev);
251 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
252 u64 iova, int prot, struct sg_table *sgt)
255 struct scatterlist *sgl;
256 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
257 u64 start_iova = iova;
259 for_each_sgtable_dma_sg(sgt, sgl, count) {
260 unsigned long paddr = sg_dma_address(sgl);
261 size_t len = sg_dma_len(sgl);
263 dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
266 size_t pgsize = get_pgsize(iova | paddr, len);
268 ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
275 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
280 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
282 struct panfrost_gem_object *bo = mapping->obj;
283 struct drm_gem_object *obj = &bo->base.base;
284 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
285 struct sg_table *sgt;
286 int prot = IOMMU_READ | IOMMU_WRITE;
288 if (WARN_ON(mapping->active))
292 prot |= IOMMU_NOEXEC;
294 sgt = drm_gem_shmem_get_pages_sgt(obj);
295 if (WARN_ON(IS_ERR(sgt)))
298 mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
300 mapping->active = true;
305 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
307 struct panfrost_gem_object *bo = mapping->obj;
308 struct drm_gem_object *obj = &bo->base.base;
309 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
310 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
311 u64 iova = mapping->mmnode.start << PAGE_SHIFT;
312 size_t len = mapping->mmnode.size << PAGE_SHIFT;
313 size_t unmapped_len = 0;
315 if (WARN_ON(!mapping->active))
318 dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
319 mapping->mmu->as, iova, len);
321 while (unmapped_len < len) {
322 size_t unmapped_page;
323 size_t pgsize = get_pgsize(iova, len - unmapped_len);
325 if (ops->iova_to_phys(ops, iova)) {
326 unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
327 WARN_ON(unmapped_page != pgsize);
330 unmapped_len += pgsize;
333 panfrost_mmu_flush_range(pfdev, mapping->mmu,
334 mapping->mmnode.start << PAGE_SHIFT, len);
335 mapping->active = false;
338 static void mmu_tlb_inv_context_s1(void *cookie)
341 static void mmu_tlb_sync_context(void *cookie)
343 //struct panfrost_mmu *mmu = cookie;
344 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
347 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
350 mmu_tlb_sync_context(cookie);
353 static const struct iommu_flush_ops mmu_tlb_ops = {
354 .tlb_flush_all = mmu_tlb_inv_context_s1,
355 .tlb_flush_walk = mmu_tlb_flush_walk,
358 static struct panfrost_gem_mapping *
359 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
361 struct panfrost_gem_mapping *mapping = NULL;
362 struct drm_mm_node *node;
363 u64 offset = addr >> PAGE_SHIFT;
364 struct panfrost_mmu *mmu;
366 spin_lock(&pfdev->as_lock);
367 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
375 spin_lock(&mmu->mm_lock);
377 drm_mm_for_each_node(node, &mmu->mm) {
378 if (offset >= node->start &&
379 offset < (node->start + node->size)) {
380 mapping = drm_mm_node_to_panfrost_mapping(node);
382 kref_get(&mapping->refcount);
387 spin_unlock(&mmu->mm_lock);
389 spin_unlock(&pfdev->as_lock);
393 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
395 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
399 struct panfrost_gem_mapping *bomapping;
400 struct panfrost_gem_object *bo;
401 struct address_space *mapping;
403 struct sg_table *sgt;
406 bomapping = addr_to_mapping(pfdev, as, addr);
412 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
413 bomapping->mmnode.start << PAGE_SHIFT);
417 WARN_ON(bomapping->mmu->as != as);
419 /* Assume 2MB alignment and size multiple */
420 addr &= ~((u64)SZ_2M - 1);
421 page_offset = addr >> PAGE_SHIFT;
422 page_offset -= bomapping->mmnode.start;
424 mutex_lock(&bo->base.pages_lock);
426 if (!bo->base.pages) {
427 bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
428 sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
430 mutex_unlock(&bo->base.pages_lock);
435 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
436 sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
440 mutex_unlock(&bo->base.pages_lock);
444 bo->base.pages = pages;
445 bo->base.pages_use_count = 1;
447 pages = bo->base.pages;
448 if (pages[page_offset]) {
449 /* Pages are already mapped, bail out. */
450 mutex_unlock(&bo->base.pages_lock);
455 mapping = bo->base.base.filp->f_mapping;
456 mapping_set_unevictable(mapping);
458 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
459 pages[i] = shmem_read_mapping_page(mapping, i);
460 if (IS_ERR(pages[i])) {
461 mutex_unlock(&bo->base.pages_lock);
462 ret = PTR_ERR(pages[i]);
467 mutex_unlock(&bo->base.pages_lock);
469 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
470 ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
471 NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
475 ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
479 mmu_map_sg(pfdev, bomapping->mmu, addr,
480 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
482 bomapping->active = true;
484 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
487 panfrost_gem_mapping_put(bomapping);
494 drm_gem_shmem_put_pages(&bo->base);
496 drm_gem_object_put(&bo->base.base);
500 static void panfrost_mmu_release_ctx(struct kref *kref)
502 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
504 struct panfrost_device *pfdev = mmu->pfdev;
506 spin_lock(&pfdev->as_lock);
508 pm_runtime_get_noresume(pfdev->dev);
509 if (pm_runtime_active(pfdev->dev))
510 panfrost_mmu_disable(pfdev, mmu->as);
511 pm_runtime_put_autosuspend(pfdev->dev);
513 clear_bit(mmu->as, &pfdev->as_alloc_mask);
514 clear_bit(mmu->as, &pfdev->as_in_use_mask);
515 list_del(&mmu->list);
517 spin_unlock(&pfdev->as_lock);
519 free_io_pgtable_ops(mmu->pgtbl_ops);
520 drm_mm_takedown(&mmu->mm);
524 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
526 kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
529 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
531 kref_get(&mmu->refcount);
536 #define PFN_4G (SZ_4G >> PAGE_SHIFT)
537 #define PFN_4G_MASK (PFN_4G - 1)
538 #define PFN_16M (SZ_16M >> PAGE_SHIFT)
540 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
542 u64 *start, u64 *end)
544 /* Executable buffers can't start or end on a 4GB boundary */
545 if (!(color & PANFROST_BO_NOEXEC)) {
548 if ((*start & PFN_4G_MASK) == 0)
551 if ((*end & PFN_4G_MASK) == 0)
554 next_seg = ALIGN(*start, PFN_4G);
555 if (next_seg - *start <= PFN_16M)
556 *start = next_seg + 1;
558 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
562 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
564 struct panfrost_mmu *mmu;
566 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
568 return ERR_PTR(-ENOMEM);
571 spin_lock_init(&mmu->mm_lock);
573 /* 4G enough for now. can be 48-bit */
574 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
575 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
577 INIT_LIST_HEAD(&mmu->list);
580 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
581 .pgsize_bitmap = SZ_4K | SZ_2M,
582 .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
583 .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
584 .coherent_walk = pfdev->coherent,
586 .iommu_dev = pfdev->dev,
589 mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
591 if (!mmu->pgtbl_ops) {
593 return ERR_PTR(-EINVAL);
596 kref_init(&mmu->refcount);
601 static const char *access_type_name(struct panfrost_device *pfdev,
604 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
605 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
606 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
610 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
612 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
614 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
622 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
624 struct panfrost_device *pfdev = data;
626 if (!mmu_read(pfdev, MMU_INT_STAT))
629 mmu_write(pfdev, MMU_INT_MASK, 0);
630 return IRQ_WAKE_THREAD;
633 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
635 struct panfrost_device *pfdev = data;
636 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
640 u32 as = ffs(status | (status >> 16)) - 1;
641 u32 mask = BIT(as) | BIT(as + 16);
648 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
649 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
650 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
652 /* decode the fault status */
653 exception_type = fault_status & 0xFF;
654 access_type = (fault_status >> 8) & 0x3;
655 source_id = (fault_status >> 16);
657 mmu_write(pfdev, MMU_INT_CLEAR, mask);
659 /* Page fault only */
661 if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
662 ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
665 /* terminal fault, print info about the fault */
667 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
669 "raw fault status: 0x%X\n"
670 "decoded fault status: %s\n"
671 "exception type 0x%X: %s\n"
672 "access type 0x%X: %s\n"
677 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
678 exception_type, panfrost_exception_name(pfdev, exception_type),
679 access_type, access_type_name(pfdev, fault_status),
684 /* If we received new MMU interrupts, process them before returning. */
686 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
689 mmu_write(pfdev, MMU_INT_MASK, ~0);
693 int panfrost_mmu_init(struct panfrost_device *pfdev)
697 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
701 err = devm_request_threaded_irq(pfdev->dev, irq,
702 panfrost_mmu_irq_handler,
703 panfrost_mmu_irq_handler_thread,
704 IRQF_SHARED, KBUILD_MODNAME "-mmu",
708 dev_err(pfdev->dev, "failed to request mmu irq");
715 void panfrost_mmu_fini(struct panfrost_device *pfdev)
717 mmu_write(pfdev, MMU_INT_MASK, 0);