drm/shmem-helper: Use refcount_t for vmap_use_count
authorDmitry Osipenko <dmitry.osipenko@collabora.com>
Sat, 22 Mar 2025 21:26:08 +0000 (00:26 +0300)
committerDmitry Osipenko <dmitry.osipenko@collabora.com>
Wed, 26 Mar 2025 20:00:22 +0000 (23:00 +0300)
Use refcount_t helper for vmap_use_count to make refcounting consistent
with pages_use_count and pages_pin_count that use refcount_t. This also
makes vmapping to benefit from the refcount_t's overflow checks.

Acked-by: Maxime Ripard <mripard@kernel.org>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.d>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-11-dmitry.osipenko@collabora.com
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/tests/drm_gem_shmem_test.c
include/drm/drm_gem_shmem_helper.h

index 84a196bbe44fad76801aaacd8bbf9d643439dbc9..2d924d547a519fdd19211eaca4968bcbbb5db154 100644 (file)
@@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
        } else {
                dma_resv_lock(shmem->base.resv, NULL);
 
-               drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+               drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
 
                if (shmem->sgt) {
                        dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -355,23 +355,25 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
 
                dma_resv_assert_held(shmem->base.resv);
 
-               if (shmem->vmap_use_count++ > 0) {
+               if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
                        iosys_map_set_vaddr(map, shmem->vaddr);
                        return 0;
                }
 
                ret = drm_gem_shmem_pin_locked(shmem);
                if (ret)
-                       goto err_zero_use;
+                       return ret;
 
                if (shmem->map_wc)
                        prot = pgprot_writecombine(prot);
                shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
                                    VM_MAP, prot);
-               if (!shmem->vaddr)
+               if (!shmem->vaddr) {
                        ret = -ENOMEM;
-               else
+               } else {
                        iosys_map_set_vaddr(map, shmem->vaddr);
+                       refcount_set(&shmem->vmap_use_count, 1);
+               }
        }
 
        if (ret) {
@@ -384,8 +386,6 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
 err_put_pages:
        if (!drm_gem_is_imported(obj))
                drm_gem_shmem_unpin_locked(shmem);
-err_zero_use:
-       shmem->vmap_use_count = 0;
 
        return ret;
 }
@@ -413,14 +413,10 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
        } else {
                dma_resv_assert_held(shmem->base.resv);
 
-               if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
-                       return;
-
-               if (--shmem->vmap_use_count > 0)
-                       return;
-
-               vunmap(shmem->vaddr);
-               drm_gem_shmem_unpin_locked(shmem);
+               if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+                       vunmap(shmem->vaddr);
+                       drm_gem_shmem_unpin_locked(shmem);
+               }
        }
 
        shmem->vaddr = NULL;
@@ -672,7 +668,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
 
        drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
        drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
-       drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+       drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
        drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
index 1459cdb0c41331ab10ee52f16f5f71d9d3d0540c..81cadaecdd4f6ed65945b56e9f33263af32d848f 100644 (file)
@@ -168,7 +168,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
        shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
        KUNIT_EXPECT_NULL(test, shmem->vaddr);
-       KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+       KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
 
        ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
        KUNIT_ASSERT_EQ(test, ret, 0);
@@ -177,7 +177,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
        KUNIT_ASSERT_EQ(test, ret, 0);
        KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
        KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
-       KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+       KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
 
        iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
        for (i = 0; i < TEST_SIZE; i++)
@@ -185,7 +185,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
 
        drm_gem_shmem_vunmap_locked(shmem, &map);
        KUNIT_EXPECT_NULL(test, shmem->vaddr);
-       KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+       KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
 }
 
 /*
index 8b9bba87ae63d8d598f8c7e15e02ed4b545984ae..b4f993da3caec83ae17208be13f7c80b18d04e0d 100644 (file)
@@ -82,7 +82,7 @@ struct drm_gem_shmem_object {
         * Reference count on the virtual address.
         * The address are un-mapped when the count reaches zero.
         */
-       unsigned int vmap_use_count;
+       refcount_t vmap_use_count;
 
        /**
         * @pages_mark_dirty_on_put: