1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _NET_PAGE_POOL_TYPES_H
4 #define _NET_PAGE_POOL_TYPES_H
6 #include <linux/dma-direction.h>
7 #include <linux/ptr_ring.h>
8 #include <linux/types.h>
10 #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
13 #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
14 * from page_pool will be
15 * DMA-synced-for-device according to
16 * the length provided by the device
18 * Please note DMA-sync-for-CPU is still
19 * device driver responsibility
21 #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
25 * Fast allocation side cache array/stack
27 * The cache size and refill watermark is related to the network
28 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
29 * ring is usually refilled and the max consumed elements will be 64,
30 * thus a natural max size of objects needed in the cache.
32 * Keeping room for more objects, is due to XDP_DROP use-case. As
33 * XDP_DROP allows the opportunity to recycle objects directly into
34 * this array, as it shares the same softirq/NAPI protection. If
35 * cache is already full (or partly full) then the XDP_DROP recycles
36 * would have to take a slower code path.
38 #define PP_ALLOC_CACHE_SIZE 128
39 #define PP_ALLOC_CACHE_REFILL 64
40 struct pp_alloc_cache {
42 struct page *cache[PP_ALLOC_CACHE_SIZE];
46 * struct page_pool_params - page pool parameters
47 * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
48 * @order: 2^order pages on allocation
49 * @pool_size: size of the ptr_ring
50 * @nid: NUMA node id to allocate from pages from
51 * @dev: device, for DMA pre-mapping purposes
52 * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
53 * @napi: NAPI which is the sole consumer of pages, otherwise NULL
54 * @dma_dir: DMA mapping direction
55 * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
56 * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
58 struct page_pool_params {
59 struct_group_tagged(page_pool_params_fast, fast,
62 unsigned int pool_size;
65 struct napi_struct *napi;
66 enum dma_data_direction dma_dir;
70 struct_group_tagged(page_pool_params_slow, slow,
71 struct net_device *netdev;
72 /* private: used by test code only */
73 void (*init_callback)(struct page *page, void *arg);
78 #ifdef CONFIG_PAGE_POOL_STATS
80 * struct page_pool_alloc_stats - allocation statistics
81 * @fast: successful fast path allocations
82 * @slow: slow path order-0 allocations
83 * @slow_high_order: slow path high order allocations
84 * @empty: ptr ring is empty, so a slow path allocation was forced
85 * @refill: an allocation which triggered a refill of the cache
86 * @waive: pages obtained from the ptr ring that cannot be added to
87 * the cache due to a NUMA mismatch
89 struct page_pool_alloc_stats {
99 * struct page_pool_recycle_stats - recycling (freeing) statistics
100 * @cached: recycling placed page in the page pool cache
101 * @cache_full: page pool cache was full
102 * @ring: page placed into the ptr ring
103 * @ring_full: page released from page pool because the ptr ring was full
104 * @released_refcnt: page released (and not recycled) because refcnt > 1
106 struct page_pool_recycle_stats {
115 * struct page_pool_stats - combined page pool use statistics
116 * @alloc_stats: see struct page_pool_alloc_stats
117 * @recycle_stats: see struct page_pool_recycle_stats
119 * Wrapper struct for combining page pool stats with different storage
122 struct page_pool_stats {
123 struct page_pool_alloc_stats alloc_stats;
124 struct page_pool_recycle_stats recycle_stats;
129 struct page_pool_params_fast p;
132 bool has_init_callback;
135 struct page *frag_page;
136 unsigned int frag_offset;
137 u32 pages_state_hold_cnt;
139 struct delayed_work release_dw;
140 void (*disconnect)(void *pool);
141 unsigned long defer_start;
142 unsigned long defer_warn;
144 #ifdef CONFIG_PAGE_POOL_STATS
145 /* these stats are incremented while in softirq context */
146 struct page_pool_alloc_stats alloc_stats;
151 * Data structure for allocation side
153 * Drivers allocation side usually already perform some kind
154 * of resource protection. Piggyback on this protection, and
155 * require driver to protect allocation side.
157 * For NIC drivers this means, allocate a page_pool per
158 * RX-queue. As the RX-queue is already protected by
159 * Softirq/BH scheduling and napi_schedule. NAPI schedule
160 * guarantee that a single napi_struct will only be scheduled
161 * on a single CPU (see napi_schedule).
163 struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
165 /* Data structure for storing recycled pages.
167 * Returning/freeing pages is more complicated synchronization
168 * wise, because free's can happen on remote CPUs, with no
169 * association with allocation resource.
171 * Use ptr_ring, as it separates consumer and producer
172 * efficiently, it a way that doesn't bounce cache-lines.
174 * TODO: Implement bulk return pages into this structure.
176 struct ptr_ring ring;
178 #ifdef CONFIG_PAGE_POOL_STATS
179 /* recycle stats are per-cpu to avoid locking */
180 struct page_pool_recycle_stats __percpu *recycle_stats;
182 atomic_t pages_state_release_cnt;
184 /* A page_pool is strictly tied to a single RX-queue being
185 * protected by NAPI, due to above pp_alloc_cache. This
186 * refcnt serves purpose is to simplify drivers error handling.
192 /* Slow/Control-path information follows */
193 struct page_pool_params_slow slow;
194 /* User-facing fields, protected by page_pools_lock */
196 struct hlist_node list;
203 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
204 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
205 unsigned int size, gfp_t gfp);
206 struct page_pool *page_pool_create(const struct page_pool_params *params);
207 struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
212 #ifdef CONFIG_PAGE_POOL
213 void page_pool_destroy(struct page_pool *pool);
214 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
215 struct xdp_mem_info *mem);
216 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
219 static inline void page_pool_destroy(struct page_pool *pool)
223 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
224 void (*disconnect)(void *),
225 struct xdp_mem_info *mem)
229 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
235 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
236 unsigned int dma_sync_size,
239 static inline bool is_page_pool_compiled_in(void)
241 #ifdef CONFIG_PAGE_POOL
248 /* Caller must provide appropriate safe context, e.g. NAPI. */
249 void page_pool_update_nid(struct page_pool *pool, int new_nid);
251 #endif /* _NET_PAGE_POOL_H */