mm: page_frag: avoid caller accessing 'page_frag_cache' directly
authorYunsheng Lin <linyunsheng@huawei.com>
Mon, 28 Oct 2024 11:53:39 +0000 (19:53 +0800)
committerJakub Kicinski <kuba@kernel.org>
Mon, 11 Nov 2024 18:56:27 +0000 (10:56 -0800)
Use appropriate frag_page API instead of caller accessing
'page_frag_cache' directly.

CC: Andrew Morton <akpm@linux-foundation.org>
CC: Linux-MM <linux-mm@kvack.org>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Acked-by: Chuck Lever <chuck.lever@oracle.com>
Link: https://patch.msgid.link/20241028115343.3405838-5-linyunsheng@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/vhost/net.c
include/linux/page_frag_cache.h
net/core/skbuff.c
net/rxrpc/conn_object.c
net/rxrpc/local_object.c
net/sunrpc/svcsock.c
tools/testing/selftests/mm/page_frag/page_frag_test.c

index f16279351db56e8a55db21c12c27742877ae0e71..9ad37c0121890ee5b756ca71c31ec00d8f097707 100644 (file)
@@ -1325,7 +1325,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
                        vqs[VHOST_NET_VQ_RX]);
 
        f->private_data = n;
-       n->pf_cache.va = NULL;
+       page_frag_cache_init(&n->pf_cache);
 
        return 0;
 }
index 67ac8626ed9bba1dbc151c9bf023ad7c39553020..0a52f7a179c8f9c1400fb92990aa07142b5b0548 100644 (file)
@@ -7,6 +7,16 @@
 #include <linux/mm_types_task.h>
 #include <linux/types.h>
 
+static inline void page_frag_cache_init(struct page_frag_cache *nc)
+{
+       nc->va = NULL;
+}
+
+static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
+{
+       return !!nc->pfmemalloc;
+}
+
 void page_frag_cache_drain(struct page_frag_cache *nc);
 void __page_frag_cache_drain(struct page *page, unsigned int count);
 void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
index 00afeb90c23a9714043c2f720128676732f50ea8..6841e61a6bd0b66e7b1df0545697604479c6b7a1 100644 (file)
@@ -753,14 +753,14 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
        if (in_hardirq() || irqs_disabled()) {
                nc = this_cpu_ptr(&netdev_alloc_cache);
                data = page_frag_alloc(nc, len, gfp_mask);
-               pfmemalloc = nc->pfmemalloc;
+               pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
        } else {
                local_bh_disable();
                local_lock_nested_bh(&napi_alloc_cache.bh_lock);
 
                nc = this_cpu_ptr(&napi_alloc_cache.page);
                data = page_frag_alloc(nc, len, gfp_mask);
-               pfmemalloc = nc->pfmemalloc;
+               pfmemalloc = page_frag_cache_is_pfmemalloc(nc);
 
                local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
                local_bh_enable();
@@ -850,7 +850,7 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
                len = SKB_HEAD_ALIGN(len);
 
                data = page_frag_alloc(&nc->page, len, gfp_mask);
-               pfmemalloc = nc->page.pfmemalloc;
+               pfmemalloc = page_frag_cache_is_pfmemalloc(&nc->page);
        }
        local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
 
index 1539d315afe74ace265dd7ac32ffc7031d0b2323..694c4df7a1a31e8f0c005be9625c412c6f441be7 100644 (file)
@@ -337,9 +337,7 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
         */
        rxrpc_purge_queue(&conn->rx_queue);
 
-       if (conn->tx_data_alloc.va)
-               __page_frag_cache_drain(virt_to_page(conn->tx_data_alloc.va),
-                                       conn->tx_data_alloc.pagecnt_bias);
+       page_frag_cache_drain(&conn->tx_data_alloc);
        call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
 }
 
index f9623ace22016f622ce2b4a5ccfac0e5112cf741..2792d230460532625cfb6c810d09d51f353d9f3c 100644 (file)
@@ -452,9 +452,7 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
 #endif
        rxrpc_purge_queue(&local->rx_queue);
        rxrpc_purge_client_connections(local);
-       if (local->tx_alloc.va)
-               __page_frag_cache_drain(virt_to_page(local->tx_alloc.va),
-                                       local->tx_alloc.pagecnt_bias);
+       page_frag_cache_drain(&local->tx_alloc);
 }
 
 /*
index 825ec53576912ae4c9f69c031f6dd171f087d413..b785425c33151445252510fd3a428dd0bf7976aa 100644 (file)
@@ -1608,7 +1608,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
 static void svc_sock_free(struct svc_xprt *xprt)
 {
        struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
-       struct page_frag_cache *pfc = &svsk->sk_frag_cache;
        struct socket *sock = svsk->sk_sock;
 
        trace_svcsock_free(svsk, sock);
@@ -1618,8 +1617,7 @@ static void svc_sock_free(struct svc_xprt *xprt)
                sockfd_put(sock);
        else
                sock_release(sock);
-       if (pfc->va)
-               __page_frag_cache_drain(virt_to_head_page(pfc->va),
-                                       pfc->pagecnt_bias);
+
+       page_frag_cache_drain(&svsk->sk_frag_cache);
        kfree(svsk);
 }
index 13c44133e009bfccf36165525a0622f83d6e212b..e806c1866e36661f8e9cdcfc2e7f8c40130f8c65 100644 (file)
@@ -126,7 +126,7 @@ static int __init page_frag_test_init(void)
        u64 duration;
        int ret;
 
-       test_nc.va = NULL;
+       page_frag_cache_init(&test_nc);
        atomic_set(&nthreads, 2);
        init_completion(&wait);