drm/xe/oa/uapi: OA buffer mmap
authorAshutosh Dixit <ashutosh.dixit@intel.com>
Tue, 18 Jun 2024 01:46:05 +0000 (18:46 -0700)
committerAshutosh Dixit <ashutosh.dixit@intel.com>
Tue, 18 Jun 2024 19:40:41 +0000 (12:40 -0700)
Allow the OA buffer to be mmap'd to userspace. This is needed for the MMIO
trigger use case. Even otherwise, with whitelisted OA head/tail ptr
registers, userspace can receive/interpret OA data from the mmap'd buffer
without issuing read()'s on the OA stream fd.

v2: Remove unmap_mapping_range from xe_oa_release (Thomas H)
    Use vm_flags_mod (Umesh)

Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Suggested-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Reviewed-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240618014609.3233427-14-ashutosh.dixit@intel.com
drivers/gpu/drm/xe/xe_oa.c

index 038caeb7c9e7e9e6dfa63fe23fc4ce7c19f07b53..00654213cd936673a0612c3e1476155930d347eb 100644 (file)
@@ -824,6 +824,8 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
                return PTR_ERR(bo);
 
        stream->oa_buffer.bo = bo;
+       /* mmap implementation requires OA buffer to be in system memory */
+       xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0);
        stream->oa_buffer.vaddr = bo->vmap.vaddr;
        return 0;
 }
@@ -1125,6 +1127,49 @@ static int xe_oa_release(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct xe_oa_stream *stream = file->private_data;
+       struct xe_bo *bo = stream->oa_buffer.bo;
+       unsigned long start = vma->vm_start;
+       int i, ret;
+
+       if (xe_perf_stream_paranoid && !perfmon_capable()) {
+               drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n");
+               return -EACCES;
+       }
+
+       /* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
+       if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
+               drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Only support VM_READ, enforce MAP_PRIVATE by checking for
+        * VM_MAYSHARE, don't copy the vma on fork
+        */
+       if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_SHARED | VM_MAYSHARE)) {
+               drm_dbg(&stream->oa->xe->drm, "mmap must be read only\n");
+               return -EINVAL;
+       }
+       vm_flags_mod(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY,
+                    VM_MAYWRITE | VM_MAYEXEC);
+
+       xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages ==
+                 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+       for (i = 0; i < bo->ttm.ttm->num_pages; i++) {
+               ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]),
+                                     PAGE_SIZE, vma->vm_page_prot);
+               if (ret)
+                       break;
+
+               start += PAGE_SIZE;
+       }
+
+       return ret;
+}
+
 static const struct file_operations xe_oa_fops = {
        .owner          = THIS_MODULE,
        .llseek         = no_llseek,
@@ -1132,6 +1177,7 @@ static const struct file_operations xe_oa_fops = {
        .poll           = xe_oa_poll,
        .read           = xe_oa_read,
        .unlocked_ioctl = xe_oa_ioctl,
+       .mmap           = xe_oa_mmap,
 };
 
 static bool engine_supports_mi_query(struct xe_hw_engine *hwe)