1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
4 #include "pvr_device.h"
9 #include "pvr_rogue_defs.h"
10 #include "pvr_rogue_fwif_client.h"
11 #include "pvr_rogue_fwif_shared.h"
14 #include <uapi/drm/pvr_drm.h>
16 #include <drm/drm_device.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_file.h>
19 #include <drm/drm_gem.h>
20 #include <drm/drm_ioctl.h>
22 #include <linux/err.h>
23 #include <linux/export.h>
25 #include <linux/kernel.h>
26 #include <linux/mod_devicetable.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/of_device.h>
30 #include <linux/of_platform.h>
31 #include <linux/platform_device.h>
34 * DOC: PowerVR (Series 6 and later) and IMG Graphics Driver
36 * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
38 * * AXE-1-16M (found in Texas Instruments AM62)
42 * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object.
43 * @drm_dev: [IN] Target DRM device.
44 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
45 * &struct drm_pvr_ioctl_create_bo_args.
46 * @file: [IN] DRM file-private data.
48 * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO.
52 * * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero
53 * or wider than &typedef size_t,
54 * * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are
55 * reserved or undefined are set,
56 * * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not
58 * * Any error encountered while creating the object (see
59 * pvr_gem_object_create()), or
60 * * Any error encountered while transferring ownership of the object into a
61 * userspace-accessible handle (see pvr_gem_object_into_handle()).
64 pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args,
65 struct drm_file *file)
67 struct drm_pvr_ioctl_create_bo_args *args = raw_args;
68 struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
69 struct pvr_file *pvr_file = to_pvr_file(file);
71 struct pvr_gem_object *pvr_obj;
72 size_t sanitized_size;
77 if (!drm_dev_enter(drm_dev, &idx))
80 /* All padding fields must be zeroed. */
81 if (args->_padding_c != 0) {
83 goto err_drm_dev_exit;
87 * On 64-bit platforms (our primary target), size_t is a u64. However,
88 * on other architectures we have to check for overflow when casting
89 * down to size_t from u64.
91 * We also disallow zero-sized allocations, and reserved (kernel-only)
94 if (args->size > SIZE_MAX || args->size == 0 || args->flags &
95 ~DRM_PVR_BO_FLAGS_MASK || args->size & (PVR_DEVICE_PAGE_SIZE - 1)) {
97 goto err_drm_dev_exit;
100 sanitized_size = (size_t)args->size;
103 * Create a buffer object and transfer ownership to a userspace-
106 pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags);
107 if (IS_ERR(pvr_obj)) {
108 err = PTR_ERR(pvr_obj);
109 goto err_drm_dev_exit;
112 /* This function will not modify &args->handle unless it succeeds. */
113 err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle);
115 goto err_destroy_obj;
123 * GEM objects are refcounted, so there is no explicit destructor
124 * function. Instead, we release the singular reference we currently
125 * hold on the object and let GEM take care of the rest.
127 pvr_gem_object_put(pvr_obj);
136 * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be
137 * used when calling mmap() from userspace to map the given GEM buffer object
138 * @drm_dev: [IN] DRM device (unused).
139 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
140 * &struct drm_pvr_ioctl_get_bo_mmap_offset_args.
141 * @file: [IN] DRM file private data.
143 * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET.
145 * This IOCTL does *not* perform an mmap. See the docs on
146 * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details.
150 * * -%ENOENT if the handle does not reference a valid GEM buffer object,
151 * * -%EINVAL if any padding fields in &struct
152 * drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or
153 * * Any error returned by drm_gem_create_mmap_offset().
156 pvr_ioctl_get_bo_mmap_offset(struct drm_device *drm_dev, void *raw_args,
157 struct drm_file *file)
159 struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args;
160 struct pvr_file *pvr_file = to_pvr_file(file);
161 struct pvr_gem_object *pvr_obj;
162 struct drm_gem_object *gem_obj;
166 if (!drm_dev_enter(drm_dev, &idx))
169 /* All padding fields must be zeroed. */
170 if (args->_padding_4 != 0) {
172 goto err_drm_dev_exit;
176 * Obtain a kernel reference to the buffer object. This reference is
177 * counted and must be manually dropped before returning. If a buffer
178 * object cannot be found for the specified handle, return -%ENOENT (No
179 * such file or directory).
181 pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
184 goto err_drm_dev_exit;
187 gem_obj = gem_from_pvr_gem(pvr_obj);
190 * Allocate a fake offset which can be used in userspace calls to mmap
191 * on the DRM device file. If this fails, return the error code. This
192 * operation is idempotent.
194 ret = drm_gem_create_mmap_offset(gem_obj);
196 /* Drop our reference to the buffer object. */
197 drm_gem_object_put(gem_obj);
198 goto err_drm_dev_exit;
202 * Read out the fake offset allocated by the earlier call to
203 * drm_gem_create_mmap_offset.
205 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
207 /* Drop our reference to the buffer object. */
208 pvr_gem_object_put(pvr_obj);
216 static __always_inline u64
217 pvr_fw_version_packed(u32 major, u32 minor)
219 return ((u64)major << 32) | minor;
223 rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev)
225 u32 max_partitions = 0;
229 PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x);
230 PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y);
231 PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions);
233 if (tile_size_x == 16 && tile_size_y == 16) {
234 u32 usc_min_output_registers_per_pix = 0;
236 PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix,
237 &usc_min_output_registers_per_pix);
239 return tile_size_x * tile_size_y * max_partitions *
240 usc_min_output_registers_per_pix;
243 return max_partitions * 1024;
247 rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev)
249 u32 common_store_size_in_dwords = 512 * 4 * 4;
250 u32 alloc_region_size;
252 PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords);
254 alloc_region_size = common_store_size_in_dwords - (256U * 4U) -
255 rogue_get_common_store_partition_space_size(pvr_dev);
257 if (PVR_HAS_QUIRK(pvr_dev, 44079)) {
258 u32 common_store_split_point = (768U * 4U * 4U);
260 return min(common_store_split_point - (256U * 4U), alloc_region_size);
263 return alloc_region_size;
267 rogue_get_num_phantoms(struct pvr_device *pvr_dev)
269 u32 num_clusters = 1;
271 PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters);
273 return ROGUE_REQ_NUM_PHANTOMS(num_clusters);
277 rogue_get_max_coeffs(struct pvr_device *pvr_dev)
279 u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS;
280 u32 pending_allocation_shared_regs = 2U * 1024U;
281 u32 pending_allocation_coeff_regs = 0U;
282 u32 num_phantoms = rogue_get_num_phantoms(pvr_dev);
283 u32 tiles_in_flight = 0;
284 u32 max_coeff_pixel_portion;
286 PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight);
287 max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms);
288 max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS;
291 * Compute tasks on cores with BRN48492 and without compute overlap may lock
292 * up without two additional lines of coeffs.
294 if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap))
295 pending_allocation_coeff_regs = 2U * 1024U;
297 if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748))
298 pending_allocation_shared_regs = 0;
300 if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020))
301 max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS;
303 return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs -
304 (max_coeff_pixel_portion + max_coeff_additional_portion +
305 pending_allocation_shared_regs);
309 rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev)
311 u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev);
313 if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) &&
314 !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) {
315 /* Driver must not use the 2 reserved lines. */
316 available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2;
320 * The maximum amount of local memory available to a kernel is the minimum
321 * of the total number of coefficient registers available and the max common
322 * store allocation size which can be made by the CDM.
324 * If any coeff lines are reserved for tessellation or pixel then we need to
325 * subtract those too.
327 return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS);
331 * pvr_dev_query_gpu_info_get()
332 * @pvr_dev: Device pointer.
333 * @args: [IN] Device query arguments containing a pointer to a userspace
334 * struct drm_pvr_dev_query_gpu_info.
336 * If the query object pointer is NULL, the size field is updated with the
337 * expected size of the query object.
340 * * 0 on success, or if size is requested using a NULL pointer, or
341 * * -%E2BIG if the indicated length of the allocation is less than is
342 * required to contain the copied data, or
343 * * -%EFAULT if local memory could not be copied to userspace.
346 pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev,
347 struct drm_pvr_ioctl_dev_query_args *args)
349 struct drm_pvr_dev_query_gpu_info gpu_info = {0};
352 if (!args->pointer) {
353 args->size = sizeof(struct drm_pvr_dev_query_gpu_info);
358 pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id);
359 gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev);
361 err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info);
365 if (args->size > sizeof(gpu_info))
366 args->size = sizeof(gpu_info);
371 * pvr_dev_query_runtime_info_get()
372 * @pvr_dev: Device pointer.
373 * @args: [IN] Device query arguments containing a pointer to a userspace
374 * struct drm_pvr_dev_query_runtime_info.
376 * If the query object pointer is NULL, the size field is updated with the
377 * expected size of the query object.
380 * * 0 on success, or if size is requested using a NULL pointer, or
381 * * -%E2BIG if the indicated length of the allocation is less than is
382 * required to contain the copied data, or
383 * * -%EFAULT if local memory could not be copied to userspace.
386 pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev,
387 struct drm_pvr_ioctl_dev_query_args *args)
389 struct drm_pvr_dev_query_runtime_info runtime_info = {0};
392 if (!args->pointer) {
393 args->size = sizeof(struct drm_pvr_dev_query_runtime_info);
397 runtime_info.free_list_min_pages = 0; /* FIXME */
398 runtime_info.free_list_max_pages =
399 ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE;
400 runtime_info.common_store_alloc_region_size =
401 rogue_get_common_store_alloc_region_size(pvr_dev);
402 runtime_info.common_store_partition_space_size =
403 rogue_get_common_store_partition_space_size(pvr_dev);
404 runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev);
405 runtime_info.cdm_max_local_mem_size_regs =
406 rogue_get_cdm_max_local_mem_size_regs(pvr_dev);
408 err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info);
412 if (args->size > sizeof(runtime_info))
413 args->size = sizeof(runtime_info);
418 * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given
419 * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required
421 * @pvr_dev: Device pointer.
422 * @args: [IN] Device query arguments containing a pointer to a userspace
423 * struct drm_pvr_dev_query_query_quirks.
425 * If the query object pointer is NULL, the size field is updated with the
426 * expected size of the query object.
427 * If the userspace pointer in the query object is NULL, or the count is
428 * short, no data is copied.
429 * The count field will be updated to that copied, or if either pointer is
430 * NULL, that which would have been copied.
431 * The size field in the query object will be updated to the size copied.
434 * * 0 on success, or if size/count is requested using a NULL pointer, or
435 * * -%EINVAL if args contained non-zero reserved fields, or
436 * * -%E2BIG if the indicated length of the allocation is less than is
437 * required to contain the copied data, or
438 * * -%EFAULT if local memory could not be copied to userspace.
441 pvr_dev_query_quirks_get(struct pvr_device *pvr_dev,
442 struct drm_pvr_ioctl_dev_query_args *args)
445 * @FIXME - hardcoding of numbers here is intended as an
446 * intermediate step so the UAPI can be fixed, but requires a
447 * a refactor in the future to store them in a more appropriate
450 static const u32 umd_quirks_musthave[] = {
455 static const u32 umd_quirks[] = {
459 struct drm_pvr_dev_query_quirks query;
460 u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)];
461 size_t out_musthave_count = 0;
462 size_t out_count = 0;
465 if (!args->pointer) {
466 args->size = sizeof(struct drm_pvr_dev_query_quirks);
470 err = PVR_UOBJ_GET(query, args->size, args->pointer);
474 if (query._padding_c)
477 for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) {
478 if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) {
479 out[out_count++] = umd_quirks_musthave[i];
480 out_musthave_count++;
484 for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) {
485 if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i]))
486 out[out_count++] = umd_quirks[i];
491 if (query.count < out_count)
494 if (copy_to_user(u64_to_user_ptr(query.quirks), out,
495 out_count * sizeof(u32))) {
499 query.musthave_count = out_musthave_count;
502 query.count = out_count;
503 err = PVR_UOBJ_SET(args->pointer, args->size, query);
507 args->size = sizeof(query);
512 * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the
513 * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount
514 * of space required for it.
515 * @pvr_dev: Device pointer.
516 * @args: [IN] Device query arguments containing a pointer to a userspace
517 * struct drm_pvr_dev_query_enhancements.
519 * If the query object pointer is NULL, the size field is updated with the
520 * expected size of the query object.
521 * If the userspace pointer in the query object is NULL, or the count is
522 * short, no data is copied.
523 * The count field will be updated to that copied, or if either pointer is
524 * NULL, that which would have been copied.
525 * The size field in the query object will be updated to the size copied.
528 * * 0 on success, or if size/count is requested using a NULL pointer, or
529 * * -%EINVAL if args contained non-zero reserved fields, or
530 * * -%E2BIG if the indicated length of the allocation is less than is
531 * required to contain the copied data, or
532 * * -%EFAULT if local memory could not be copied to userspace.
535 pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev,
536 struct drm_pvr_ioctl_dev_query_args *args)
539 * @FIXME - hardcoding of numbers here is intended as an
540 * intermediate step so the UAPI can be fixed, but requires a
541 * a refactor in the future to store them in a more appropriate
544 const u32 umd_enhancements[] = {
548 struct drm_pvr_dev_query_enhancements query;
549 u32 out[ARRAY_SIZE(umd_enhancements)];
553 if (!args->pointer) {
554 args->size = sizeof(struct drm_pvr_dev_query_enhancements);
558 err = PVR_UOBJ_GET(query, args->size, args->pointer);
562 if (query._padding_a)
564 if (query._padding_c)
567 for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) {
568 if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i]))
569 out[out_idx++] = umd_enhancements[i];
572 if (!query.enhancements)
574 if (query.count < out_idx)
577 if (copy_to_user(u64_to_user_ptr(query.enhancements), out,
578 out_idx * sizeof(u32))) {
583 query.count = out_idx;
584 err = PVR_UOBJ_SET(args->pointer, args->size, query);
588 args->size = sizeof(query);
593 * pvr_ioctl_dev_query() - IOCTL to copy information about a device
594 * @drm_dev: [IN] DRM device.
595 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
596 * &struct drm_pvr_ioctl_dev_query_args.
597 * @file: [IN] DRM file private data.
599 * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY.
600 * If the given receiving struct pointer is NULL, or the indicated size is too
601 * small, the expected size of the struct type will be returned in the size
605 * * 0 on success or when fetching the size with args->pointer == NULL, or
606 * * -%E2BIG if the indicated size of the receiving struct is less than is
607 * required to contain the copied data, or
608 * * -%EINVAL if the indicated struct type is unknown, or
609 * * -%ENOMEM if local memory could not be allocated, or
610 * * -%EFAULT if local memory could not be copied to userspace.
613 pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args,
614 struct drm_file *file)
616 struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
617 struct drm_pvr_ioctl_dev_query_args *args = raw_args;
621 if (!drm_dev_enter(drm_dev, &idx))
624 switch ((enum drm_pvr_dev_query)args->type) {
625 case DRM_PVR_DEV_QUERY_GPU_INFO_GET:
626 ret = pvr_dev_query_gpu_info_get(pvr_dev, args);
629 case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET:
630 ret = pvr_dev_query_runtime_info_get(pvr_dev, args);
633 case DRM_PVR_DEV_QUERY_QUIRKS_GET:
634 ret = pvr_dev_query_quirks_get(pvr_dev, args);
637 case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET:
638 ret = pvr_dev_query_enhancements_get(pvr_dev, args);
641 case DRM_PVR_DEV_QUERY_HEAP_INFO_GET:
642 ret = pvr_heap_info_get(pvr_dev, args);
645 case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET:
646 ret = pvr_static_data_areas_get(pvr_dev, args);
656 * pvr_ioctl_create_context() - IOCTL to create a context
657 * @drm_dev: [IN] DRM device.
658 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
659 * &struct drm_pvr_ioctl_create_context_args.
660 * @file: [IN] DRM file private data.
662 * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT.
666 * * -%EINVAL if provided arguments are invalid, or
667 * * -%EFAULT if arguments can't be copied from userspace, or
668 * * Any error returned by pvr_create_render_context().
671 pvr_ioctl_create_context(struct drm_device *drm_dev, void *raw_args,
672 struct drm_file *file)
678 * pvr_ioctl_destroy_context() - IOCTL to destroy a context
679 * @drm_dev: [IN] DRM device.
680 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
681 * &struct drm_pvr_ioctl_destroy_context_args.
682 * @file: [IN] DRM file private data.
684 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT.
688 * * -%EINVAL if context not in context list.
691 pvr_ioctl_destroy_context(struct drm_device *drm_dev, void *raw_args,
692 struct drm_file *file)
698 * pvr_ioctl_create_free_list() - IOCTL to create a free list
699 * @drm_dev: [IN] DRM device.
700 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
701 * &struct drm_pvr_ioctl_create_free_list_args.
702 * @file: [IN] DRM file private data.
704 * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST.
708 * * Any error returned by pvr_free_list_create().
711 pvr_ioctl_create_free_list(struct drm_device *drm_dev, void *raw_args,
712 struct drm_file *file)
718 * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list
719 * @drm_dev: [IN] DRM device.
720 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
721 * &struct drm_pvr_ioctl_destroy_free_list_args.
722 * @file: [IN] DRM file private data.
724 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST.
728 * * -%EINVAL if free list not in object list.
731 pvr_ioctl_destroy_free_list(struct drm_device *drm_dev, void *raw_args,
732 struct drm_file *file)
738 * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset
739 * @drm_dev: [IN] DRM device.
740 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
741 * &struct drm_pvr_ioctl_create_hwrt_dataset_args.
742 * @file: [IN] DRM file private data.
744 * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET.
748 * * Any error returned by pvr_hwrt_dataset_create().
751 pvr_ioctl_create_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
752 struct drm_file *file)
758 * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset
759 * @drm_dev: [IN] DRM device.
760 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
761 * &struct drm_pvr_ioctl_destroy_hwrt_dataset_args.
762 * @file: [IN] DRM file private data.
764 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET.
768 * * -%EINVAL if HWRT dataset not in object list.
771 pvr_ioctl_destroy_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
772 struct drm_file *file)
778 * pvr_ioctl_create_vm_context() - IOCTL to create a VM context
779 * @drm_dev: [IN] DRM device.
780 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
781 * &struct drm_pvr_ioctl_create_vm_context_args.
782 * @file: [IN] DRM file private data.
784 * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT.
788 * * Any error returned by pvr_vm_create_context().
791 pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args,
792 struct drm_file *file)
794 struct drm_pvr_ioctl_create_vm_context_args *args = raw_args;
795 struct pvr_file *pvr_file = to_pvr_file(file);
796 struct pvr_vm_context *vm_ctx;
800 if (!drm_dev_enter(drm_dev, &idx))
803 if (args->_padding_4) {
805 goto err_drm_dev_exit;
808 vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true);
809 if (IS_ERR(vm_ctx)) {
810 err = PTR_ERR(vm_ctx);
811 goto err_drm_dev_exit;
814 /* Allocate object handle for userspace. */
815 err = xa_alloc(&pvr_file->vm_ctx_handles,
828 pvr_vm_context_put(vm_ctx);
837 * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context
838 * @drm_dev: [IN] DRM device.
839 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
840 * &struct drm_pvr_ioctl_destroy_vm_context_args.
841 * @file: [IN] DRM file private data.
843 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT.
846 * * 0 on success, or
847 * * -%EINVAL if object not in object list.
850 pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args,
851 struct drm_file *file)
853 struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args;
854 struct pvr_file *pvr_file = to_pvr_file(file);
855 struct pvr_vm_context *vm_ctx;
857 if (args->_padding_4)
860 vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle);
864 pvr_vm_context_put(vm_ctx);
869 * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space.
870 * @drm_dev: [IN] DRM device.
871 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
872 * &struct drm_pvr_ioctl_vm_map_args.
873 * @file: [IN] DRM file private data.
875 * Called from userspace with %DRM_IOCTL_PVR_VM_MAP.
879 * * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero,
880 * * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset
881 * and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall
882 * within the buffer object specified by
883 * &drm_pvr_ioctl_vm_op_map_args.handle,
884 * * -%EINVAL if the bounds specified by
885 * &drm_pvr_ioctl_vm_op_map_args.device_addr and
886 * &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual
887 * address range which falls entirely within a single heap, or
888 * * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a
889 * valid PowerVR buffer object.
892 pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args,
893 struct drm_file *file)
895 struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
896 struct drm_pvr_ioctl_vm_map_args *args = raw_args;
897 struct pvr_file *pvr_file = to_pvr_file(file);
898 struct pvr_vm_context *vm_ctx;
900 struct pvr_gem_object *pvr_obj;
903 u64 offset_plus_size;
907 if (!drm_dev_enter(drm_dev, &idx))
910 /* Initial validation of args. */
911 if (args->_padding_14) {
913 goto err_drm_dev_exit;
916 if (args->flags != 0 ||
917 check_add_overflow(args->offset, args->size, &offset_plus_size) ||
918 !pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) {
920 goto err_drm_dev_exit;
923 vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
926 goto err_drm_dev_exit;
929 pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
932 goto err_put_vm_context;
935 pvr_obj_size = pvr_gem_object_size(pvr_obj);
938 * Validate offset and size args. The alignment of these will be
939 * checked when mapping; for now just check that they're within valid
942 if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) {
944 goto err_put_pvr_object;
947 err = pvr_vm_map(vm_ctx, pvr_obj, args->offset,
948 args->device_addr, args->size);
950 goto err_put_pvr_object;
953 * In order to set up the mapping, we needed a reference to &pvr_obj.
954 * However, pvr_vm_map() obtains and stores its own reference, so we
955 * must release ours before returning.
959 pvr_gem_object_put(pvr_obj);
962 pvr_vm_context_put(vm_ctx);
971 * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space.
972 * @drm_dev: [IN] DRM device.
973 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
974 * &struct drm_pvr_ioctl_vm_unmap_args.
975 * @file: [IN] DRM file private data.
977 * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP.
981 * * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid
982 * device page-aligned device-virtual address, or
983 * * -%ENOENT if there is currently no PowerVR buffer object mapped at
984 * &drm_pvr_ioctl_vm_op_unmap_args.device_addr.
987 pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args,
988 struct drm_file *file)
990 struct drm_pvr_ioctl_vm_unmap_args *args = raw_args;
991 struct pvr_file *pvr_file = to_pvr_file(file);
992 struct pvr_vm_context *vm_ctx;
995 /* Initial validation of args. */
996 if (args->_padding_4)
999 vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
1003 err = pvr_vm_unmap(vm_ctx, args->device_addr, args->size);
1005 pvr_vm_context_put(vm_ctx);
1011 * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU
1012 * @drm_dev: [IN] DRM device.
1013 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1014 * &struct drm_pvr_ioctl_submit_job_args.
1015 * @file: [IN] DRM file private data.
1017 * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB.
1020 * * 0 on success, or
1021 * * -%EINVAL if arguments are invalid.
1024 pvr_ioctl_submit_jobs(struct drm_device *drm_dev, void *raw_args,
1025 struct drm_file *file)
1031 pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out)
1033 if (usr_stride < min_stride)
1036 return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride);
1040 pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in)
1042 if (usr_stride < min_stride)
1045 if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size)))
1048 if (usr_stride > obj_size &&
1049 clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) {
1057 pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out)
1062 if (in->stride < min_stride)
1068 out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
1072 if (obj_size == in->stride) {
1073 if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
1074 (unsigned long)obj_size * in->count))
1077 void __user *in_ptr = u64_to_user_ptr(in->array);
1078 void *out_ptr = out_alloc;
1080 for (u32 i = 0; i < in->count; i++) {
1081 ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
1085 out_ptr += obj_size;
1086 in_ptr += in->stride;
1100 pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
1103 if (out->stride < min_stride)
1109 if (obj_size == out->stride) {
1110 if (copy_to_user(u64_to_user_ptr(out->array), in,
1111 (unsigned long)obj_size * out->count))
1114 u32 cpy_elem_size = min_t(u32, out->stride, obj_size);
1115 void __user *out_ptr = u64_to_user_ptr(out->array);
1116 const void *in_ptr = in;
1118 for (u32 i = 0; i < out->count; i++) {
1119 if (copy_to_user(out_ptr, in_ptr, cpy_elem_size))
1122 out_ptr += obj_size;
1123 in_ptr += out->stride;
1126 if (out->stride > obj_size &&
1127 clear_user(u64_to_user_ptr(out->array + obj_size),
1128 out->stride - obj_size)) {
1136 #define DRM_PVR_IOCTL(_name, _func, _flags) \
1137 DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags)
1139 /* clang-format off */
1141 static const struct drm_ioctl_desc pvr_drm_driver_ioctls[] = {
1142 DRM_PVR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
1143 DRM_PVR_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
1144 DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET, get_bo_mmap_offset, DRM_RENDER_ALLOW),
1145 DRM_PVR_IOCTL(CREATE_VM_CONTEXT, create_vm_context, DRM_RENDER_ALLOW),
1146 DRM_PVR_IOCTL(DESTROY_VM_CONTEXT, destroy_vm_context, DRM_RENDER_ALLOW),
1147 DRM_PVR_IOCTL(VM_MAP, vm_map, DRM_RENDER_ALLOW),
1148 DRM_PVR_IOCTL(VM_UNMAP, vm_unmap, DRM_RENDER_ALLOW),
1149 DRM_PVR_IOCTL(CREATE_CONTEXT, create_context, DRM_RENDER_ALLOW),
1150 DRM_PVR_IOCTL(DESTROY_CONTEXT, destroy_context, DRM_RENDER_ALLOW),
1151 DRM_PVR_IOCTL(CREATE_FREE_LIST, create_free_list, DRM_RENDER_ALLOW),
1152 DRM_PVR_IOCTL(DESTROY_FREE_LIST, destroy_free_list, DRM_RENDER_ALLOW),
1153 DRM_PVR_IOCTL(CREATE_HWRT_DATASET, create_hwrt_dataset, DRM_RENDER_ALLOW),
1154 DRM_PVR_IOCTL(DESTROY_HWRT_DATASET, destroy_hwrt_dataset, DRM_RENDER_ALLOW),
1155 DRM_PVR_IOCTL(SUBMIT_JOBS, submit_jobs, DRM_RENDER_ALLOW),
1158 /* clang-format on */
1160 #undef DRM_PVR_IOCTL
1163 * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened
1164 * @drm_dev: [IN] DRM device.
1165 * @file: [IN] DRM file private data.
1167 * Allocates powervr-specific file private data (&struct pvr_file).
1169 * Registered in &pvr_drm_driver.
1173 * * -%ENOMEM if the allocation of a &struct ipvr_file fails, or
1174 * * Any error returned by pvr_memory_context_init().
1177 pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
1179 struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1180 struct pvr_file *pvr_file;
1182 pvr_file = kzalloc(sizeof(*pvr_file), GFP_KERNEL);
1187 * Store reference to base DRM file private data for use by
1190 pvr_file->file = file;
1193 * Store reference to powervr-specific outer device struct in file
1194 * private data for convenient access.
1196 pvr_file->pvr_dev = pvr_dev;
1198 xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1);
1201 * Store reference to powervr-specific file private data in DRM file
1204 file->driver_priv = pvr_file;
1210 * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct
1211 * drm_file is closed.
1212 * @drm_dev: [IN] DRM device (unused).
1213 * @file: [IN] DRM file private data.
1215 * Frees powervr-specific file private data (&struct pvr_file).
1217 * Registered in &pvr_drm_driver.
1220 pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
1221 struct drm_file *file)
1223 struct pvr_file *pvr_file = to_pvr_file(file);
1225 /* Drop references on any remaining objects. */
1226 pvr_destroy_vm_contexts_for_file(pvr_file);
1229 file->driver_priv = NULL;
1232 DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops);
1234 static struct drm_driver pvr_drm_driver = {
1235 .driver_features = DRIVER_GEM | DRIVER_GEM_GPUVA | DRIVER_RENDER,
1236 .open = pvr_drm_driver_open,
1237 .postclose = pvr_drm_driver_postclose,
1238 .ioctls = pvr_drm_driver_ioctls,
1239 .num_ioctls = ARRAY_SIZE(pvr_drm_driver_ioctls),
1240 .fops = &pvr_drm_driver_fops,
1242 .name = PVR_DRIVER_NAME,
1243 .desc = PVR_DRIVER_DESC,
1244 .date = PVR_DRIVER_DATE,
1245 .major = PVR_DRIVER_MAJOR,
1246 .minor = PVR_DRIVER_MINOR,
1247 .patchlevel = PVR_DRIVER_PATCHLEVEL,
1249 .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
1250 .gem_create_object = pvr_gem_create_object,
1254 pvr_probe(struct platform_device *plat_dev)
1256 struct pvr_device *pvr_dev;
1257 struct drm_device *drm_dev;
1260 pvr_dev = devm_drm_dev_alloc(&plat_dev->dev, &pvr_drm_driver,
1261 struct pvr_device, base);
1262 if (IS_ERR(pvr_dev))
1263 return PTR_ERR(pvr_dev);
1265 drm_dev = &pvr_dev->base;
1267 platform_set_drvdata(plat_dev, drm_dev);
1269 devm_pm_runtime_enable(&plat_dev->dev);
1270 pm_runtime_mark_last_busy(&plat_dev->dev);
1272 pm_runtime_set_autosuspend_delay(&plat_dev->dev, 50);
1273 pm_runtime_use_autosuspend(&plat_dev->dev);
1274 pvr_watchdog_init(pvr_dev);
1276 err = pvr_device_init(pvr_dev);
1278 goto err_watchdog_fini;
1280 err = drm_dev_register(drm_dev, 0);
1282 goto err_device_fini;
1287 pvr_device_fini(pvr_dev);
1290 pvr_watchdog_fini(pvr_dev);
1296 pvr_remove(struct platform_device *plat_dev)
1298 struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
1299 struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1301 pm_runtime_suspend(drm_dev->dev);
1302 pvr_device_fini(pvr_dev);
1303 drm_dev_unplug(drm_dev);
1304 pvr_watchdog_fini(pvr_dev);
1309 static const struct of_device_id dt_match[] = {
1310 { .compatible = "img,img-axe", .data = NULL },
1313 MODULE_DEVICE_TABLE(of, dt_match);
1315 static const struct dev_pm_ops pvr_pm_ops = {
1316 RUNTIME_PM_OPS(pvr_power_device_suspend, pvr_power_device_resume, pvr_power_device_idle)
1319 static struct platform_driver pvr_driver = {
1321 .remove = pvr_remove,
1323 .name = PVR_DRIVER_NAME,
1325 .of_match_table = dt_match,
1328 module_platform_driver(pvr_driver);
1330 MODULE_AUTHOR("Imagination Technologies Ltd.");
1331 MODULE_DESCRIPTION(PVR_DRIVER_DESC);
1332 MODULE_LICENSE("Dual MIT/GPL");
1333 MODULE_IMPORT_NS(DMA_BUF);
1334 MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");