drm/xe: Don't use __user error pointers
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 17 Jan 2024 13:40:47 +0000 (14:40 +0100)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 1 Feb 2024 10:26:50 +0000 (11:26 +0100)
The error pointer macros are not aware of __user pointers and as a
consequence sparse warns.

Have the copy_mask() function return an integer instead of a __user
pointer.

Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240117134048.165425-5-thomas.hellstrom@linux.intel.com
(cherry picked from commit 78366eed6853aa6a5deccb2eb182f9334d2bd208)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
drivers/gpu/drm/xe/xe_query.c

index 9b35673b286c80c1c2332d6ece9add9f754f3ca8..7e924faeeea0b0f8ebc2f7fe89ade231412a3892 100644 (file)
@@ -459,21 +459,21 @@ static size_t calc_topo_query_size(struct xe_device *xe)
                 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
 }
 
-static void __user *copy_mask(void __user *ptr,
-                             struct drm_xe_query_topology_mask *topo,
-                             void *mask, size_t mask_size)
+static int copy_mask(void __user **ptr,
+                    struct drm_xe_query_topology_mask *topo,
+                    void *mask, size_t mask_size)
 {
        topo->num_bytes = mask_size;
 
-       if (copy_to_user(ptr, topo, sizeof(*topo)))
-               return ERR_PTR(-EFAULT);
-       ptr += sizeof(topo);
+       if (copy_to_user(*ptr, topo, sizeof(*topo)))
+               return -EFAULT;
+       *ptr += sizeof(topo);
 
-       if (copy_to_user(ptr, mask, mask_size))
-               return ERR_PTR(-EFAULT);
-       ptr += mask_size;
+       if (copy_to_user(*ptr, mask, mask_size))
+               return -EFAULT;
+       *ptr += mask_size;
 
-       return ptr;
+       return 0;
 }
 
 static int query_gt_topology(struct xe_device *xe,
@@ -493,28 +493,28 @@ static int query_gt_topology(struct xe_device *xe,
        }
 
        for_each_gt(gt, xe, id) {
+               int err;
+
                topo.gt_id = id;
 
                topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
-               query_ptr = copy_mask(query_ptr, &topo,
-                                     gt->fuse_topo.g_dss_mask,
-                                     sizeof(gt->fuse_topo.g_dss_mask));
-               if (IS_ERR(query_ptr))
-                       return PTR_ERR(query_ptr);
+               err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
+                               sizeof(gt->fuse_topo.g_dss_mask));
+               if (err)
+                       return err;
 
                topo.type = DRM_XE_TOPO_DSS_COMPUTE;
-               query_ptr = copy_mask(query_ptr, &topo,
-                                     gt->fuse_topo.c_dss_mask,
-                                     sizeof(gt->fuse_topo.c_dss_mask));
-               if (IS_ERR(query_ptr))
-                       return PTR_ERR(query_ptr);
+               err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
+                               sizeof(gt->fuse_topo.c_dss_mask));
+               if (err)
+                       return err;
 
                topo.type = DRM_XE_TOPO_EU_PER_DSS;
-               query_ptr = copy_mask(query_ptr, &topo,
-                                     gt->fuse_topo.eu_mask_per_dss,
-                                     sizeof(gt->fuse_topo.eu_mask_per_dss));
-               if (IS_ERR(query_ptr))
-                       return PTR_ERR(query_ptr);
+               err = copy_mask(&query_ptr, &topo,
+                               gt->fuse_topo.eu_mask_per_dss,
+                               sizeof(gt->fuse_topo.eu_mask_per_dss));
+               if (err)
+                       return err;
        }
 
        return 0;