page_pool: disable direct recycling based on pool->cpuid on destroy
authorAlexander Lobakin <aleksander.lobakin@intel.com>
Thu, 15 Feb 2024 11:39:05 +0000 (12:39 +0100)
committerJakub Kicinski <kuba@kernel.org>
Mon, 19 Feb 2024 19:48:00 +0000 (11:48 -0800)
Now that direct recycling is performed basing on pool->cpuid when set,
memory leaks are possible:

1. A pool is destroyed.
2. Alloc cache is emptied (it's done only once).
3. pool->cpuid is still set.
4. napi_pp_put_page() does direct recycling basing on pool->cpuid.
5. Now alloc cache is not empty, but it won't ever be freed.

In order to avoid that, rewrite pool->cpuid to -1 when unlinking NAPI to
make sure no direct recycling will be possible after emptying the cache.
This involves a bit of overhead as pool->cpuid now must be accessed
via READ_ONCE() to avoid partial reads.
Rename page_pool_unlink_napi() -> page_pool_disable_direct_recycling()
to reflect what it actually does and unexport it.

Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20240215113905.96817-1-aleksander.lobakin@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/page_pool/types.h
net/core/page_pool.c
net/core/skbuff.c

index 3828396ae60c2e55001b8d95d495d9d1ca78ad68..3590fbe6e3f15477b63bc2362ec360bdc93dd8db 100644 (file)
@@ -210,17 +210,12 @@ struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
 struct xdp_mem_info;
 
 #ifdef CONFIG_PAGE_POOL
-void page_pool_unlink_napi(struct page_pool *pool);
 void page_pool_destroy(struct page_pool *pool);
 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
                           struct xdp_mem_info *mem);
 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                             int count);
 #else
-static inline void page_pool_unlink_napi(struct page_pool *pool)
-{
-}
-
 static inline void page_pool_destroy(struct page_pool *pool)
 {
 }
index 89c835fcf094481d3b5d89f84a60db92aea69b6f..e8b9399d8e32494c9d093b80c421646e666af832 100644 (file)
@@ -949,8 +949,13 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
        pool->xdp_mem_id = mem->id;
 }
 
-void page_pool_unlink_napi(struct page_pool *pool)
+static void page_pool_disable_direct_recycling(struct page_pool *pool)
 {
+       /* Disable direct recycling based on pool->cpuid.
+        * Paired with READ_ONCE() in napi_pp_put_page().
+        */
+       WRITE_ONCE(pool->cpuid, -1);
+
        if (!pool->p.napi)
                return;
 
@@ -962,7 +967,6 @@ void page_pool_unlink_napi(struct page_pool *pool)
 
        WRITE_ONCE(pool->p.napi, NULL);
 }
-EXPORT_SYMBOL(page_pool_unlink_napi);
 
 void page_pool_destroy(struct page_pool *pool)
 {
@@ -972,7 +976,7 @@ void page_pool_destroy(struct page_pool *pool)
        if (!page_pool_put(pool))
                return;
 
-       page_pool_unlink_napi(pool);
+       page_pool_disable_direct_recycling(pool);
        page_pool_free_frag(pool);
 
        if (!page_pool_release(pool))
index 0d9a489e6ae1805c397ea70e0596e4fb7c990aa7..b41856585c24ec60180ac5660608a8af38c1a4df 100644 (file)
@@ -1018,7 +1018,7 @@ bool napi_pp_put_page(struct page *page, bool napi_safe)
                unsigned int cpuid = smp_processor_id();
 
                allow_direct = napi && READ_ONCE(napi->list_owner) == cpuid;
-               allow_direct |= (pp->cpuid == cpuid);
+               allow_direct |= READ_ONCE(pp->cpuid) == cpuid;
        }
 
        /* Driver set this to memory recycling info. Reset it on recycle.