mempool: do not use ksize() for poisoning
authorKees Cook <keescook@chromium.org>
Fri, 28 Oct 2022 15:53:01 +0000 (08:53 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:58:41 +0000 (15:58 -0800)
Nothing appears to be using ksize() within the kmalloc-backed mempools
except the mempool poisoning logic.  Use the actual pool size instead of
the ksize() to avoid needing any special handling of the memory as needed
by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE.

[vbabka@suse.cz: for slab mempools pool_data is not object size]
Link: https://lkml.kernel.org/r/13c4bd6e-09d3-efce-43a5-5a99be8bc96b@suse.cz
Link: https://lkml.kernel.org/r/20221028154823.you.615-kees@kernel.org
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Link: https://lore.kernel.org/lkml/f4fc52c4-7c18-1d76-0c7a-4058ea2486b9@suse.cz/
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reported-by: Anders Roxell <anders.roxell@linaro.org>
Link: https://lore.kernel.org/all/20221031105514.GB69385@mutt/
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mempool.c

index 96488b13a1ef15fd99bec80b88f86cc1ad6f4cca..734bcf5afbb78325ee44432326e48daf6986fa44 100644 (file)
@@ -57,8 +57,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
 static void check_element(mempool_t *pool, void *element)
 {
        /* Mempools backed by slab allocator */
-       if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
-               __check_element(pool, element, ksize(element));
+       if (pool->free == mempool_kfree) {
+               __check_element(pool, element, (size_t)pool->pool_data);
+       } else if (pool->free == mempool_free_slab) {
+               __check_element(pool, element, kmem_cache_size(pool->pool_data));
        } else if (pool->free == mempool_free_pages) {
                /* Mempools backed by page allocator */
                int order = (int)(long)pool->pool_data;
@@ -80,8 +82,10 @@ static void __poison_element(void *element, size_t size)
 static void poison_element(mempool_t *pool, void *element)
 {
        /* Mempools backed by slab allocator */
-       if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
-               __poison_element(element, ksize(element));
+       if (pool->alloc == mempool_kmalloc) {
+               __poison_element(element, (size_t)pool->pool_data);
+       } else if (pool->alloc == mempool_alloc_slab) {
+               __poison_element(element, kmem_cache_size(pool->pool_data));
        } else if (pool->alloc == mempool_alloc_pages) {
                /* Mempools backed by page allocator */
                int order = (int)(long)pool->pool_data;
@@ -111,8 +115,10 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 
 static void kasan_unpoison_element(mempool_t *pool, void *element)
 {
-       if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
-               kasan_unpoison_range(element, __ksize(element));
+       if (pool->alloc == mempool_kmalloc)
+               kasan_unpoison_range(element, (size_t)pool->pool_data);
+       else if (pool->alloc == mempool_alloc_slab)
+               kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
        else if (pool->alloc == mempool_alloc_pages)
                kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
                                     false);