Commit | Line | Data |
---|---|---|
ff7d6b27 JDB |
1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | |
3 | * page_pool.h | |
4 | * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> | |
5 | * Copyright (C) 2016 Red Hat, Inc. | |
6 | */ | |
7 | ||
8 | /** | |
9 | * DOC: page_pool allocator | |
10 | * | |
11 | * This page_pool allocator is optimized for the XDP mode that | |
12 | * uses one-frame-per-page, but have fallbacks that act like the | |
13 | * regular page allocator APIs. | |
14 | * | |
15 | * Basic use involve replacing alloc_pages() calls with the | |
16 | * page_pool_alloc_pages() call. Drivers should likely use | |
17 | * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). | |
18 | * | |
99c07c43 JDB |
19 | * API keeps track of in-flight pages, in-order to let API user know |
20 | * when it is safe to dealloactor page_pool object. Thus, API users | |
21 | * must make sure to call page_pool_release_page() when a page is | |
22 | * "leaving" the page_pool. Or call page_pool_put_page() where | |
23 | * appropiate. For maintaining correct accounting. | |
ff7d6b27 | 24 | * |
99c07c43 JDB |
25 | * API user must only call page_pool_put_page() once on a page, as it |
26 | * will either recycle the page, or in case of elevated refcnt, it | |
27 | * will release the DMA mapping and in-flight state accounting. We | |
28 | * hope to lift this requirement in the future. | |
ff7d6b27 JDB |
29 | */ |
30 | #ifndef _NET_PAGE_POOL_H | |
31 | #define _NET_PAGE_POOL_H | |
32 | ||
33 | #include <linux/mm.h> /* Needed by ptr_ring */ | |
34 | #include <linux/ptr_ring.h> | |
35 | #include <linux/dma-direction.h> | |
36 | ||
e68bc756 LB |
37 | #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA |
38 | * map/unmap | |
39 | */ | |
40 | #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets | |
41 | * from page_pool will be | |
42 | * DMA-synced-for-device according to | |
43 | * the length provided by the device | |
44 | * driver. | |
45 | * Please note DMA-sync-for-CPU is still | |
46 | * device driver responsibility | |
47 | */ | |
0e9d2a0a YL |
48 | #define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ |
49 | #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ | |
50 | PP_FLAG_DMA_SYNC_DEV |\ | |
51 | PP_FLAG_PAGE_FRAG) | |
ff7d6b27 JDB |
52 | |
53 | /* | |
54 | * Fast allocation side cache array/stack | |
55 | * | |
56 | * The cache size and refill watermark is related to the network | |
57 | * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX | |
58 | * ring is usually refilled and the max consumed elements will be 64, | |
59 | * thus a natural max size of objects needed in the cache. | |
60 | * | |
61 | * Keeping room for more objects, is due to XDP_DROP use-case. As | |
62 | * XDP_DROP allows the opportunity to recycle objects directly into | |
63 | * this array, as it shares the same softirq/NAPI protection. If | |
64 | * cache is already full (or partly full) then the XDP_DROP recycles | |
65 | * would have to take a slower code path. | |
66 | */ | |
67 | #define PP_ALLOC_CACHE_SIZE 128 | |
68 | #define PP_ALLOC_CACHE_REFILL 64 | |
69 | struct pp_alloc_cache { | |
70 | u32 count; | |
be5dba25 | 71 | struct page *cache[PP_ALLOC_CACHE_SIZE]; |
ff7d6b27 JDB |
72 | }; |
73 | ||
74 | struct page_pool_params { | |
75 | unsigned int flags; | |
76 | unsigned int order; | |
77 | unsigned int pool_size; | |
78 | int nid; /* Numa node id to allocate from pages from */ | |
79 | struct device *dev; /* device, for DMA pre-mapping purposes */ | |
80 | enum dma_data_direction dma_dir; /* DMA mapping direction */ | |
e68bc756 LB |
81 | unsigned int max_len; /* max DMA sync memory size */ |
82 | unsigned int offset; /* DMA addr offset */ | |
35b2e549 THJ |
83 | void (*init_callback)(struct page *page, void *arg); |
84 | void *init_arg; | |
ff7d6b27 JDB |
85 | }; |
86 | ||
8610037e JD |
87 | #ifdef CONFIG_PAGE_POOL_STATS |
88 | struct page_pool_alloc_stats { | |
89 | u64 fast; /* fast path allocations */ | |
90 | u64 slow; /* slow-path order 0 allocations */ | |
91 | u64 slow_high_order; /* slow-path high order allocations */ | |
92 | u64 empty; /* failed refills due to empty ptr ring, forcing | |
93 | * slow path allocation | |
94 | */ | |
95 | u64 refill; /* allocations via successful refill */ | |
96 | u64 waive; /* failed refills due to numa zone mismatch */ | |
97 | }; | |
98 | #endif | |
99 | ||
ff7d6b27 | 100 | struct page_pool { |
ff7d6b27 JDB |
101 | struct page_pool_params p; |
102 | ||
c3f812ce JL |
103 | struct delayed_work release_dw; |
104 | void (*disconnect)(void *); | |
105 | unsigned long defer_start; | |
106 | unsigned long defer_warn; | |
107 | ||
108 | u32 pages_state_hold_cnt; | |
53e0961d YL |
109 | unsigned int frag_offset; |
110 | struct page *frag_page; | |
111 | long frag_users; | |
8610037e JD |
112 | |
113 | #ifdef CONFIG_PAGE_POOL_STATS | |
114 | /* these stats are incremented while in softirq context */ | |
115 | struct page_pool_alloc_stats alloc_stats; | |
116 | #endif | |
64693ec7 | 117 | u32 xdp_mem_id; |
99c07c43 | 118 | |
ff7d6b27 JDB |
119 | /* |
120 | * Data structure for allocation side | |
121 | * | |
122 | * Drivers allocation side usually already perform some kind | |
123 | * of resource protection. Piggyback on this protection, and | |
124 | * require driver to protect allocation side. | |
125 | * | |
126 | * For NIC drivers this means, allocate a page_pool per | |
127 | * RX-queue. As the RX-queue is already protected by | |
128 | * Softirq/BH scheduling and napi_schedule. NAPI schedule | |
129 | * guarantee that a single napi_struct will only be scheduled | |
130 | * on a single CPU (see napi_schedule). | |
131 | */ | |
132 | struct pp_alloc_cache alloc ____cacheline_aligned_in_smp; | |
133 | ||
134 | /* Data structure for storing recycled pages. | |
135 | * | |
136 | * Returning/freeing pages is more complicated synchronization | |
137 | * wise, because free's can happen on remote CPUs, with no | |
138 | * association with allocation resource. | |
139 | * | |
140 | * Use ptr_ring, as it separates consumer and producer | |
141 | * effeciently, it a way that doesn't bounce cache-lines. | |
142 | * | |
143 | * TODO: Implement bulk return pages into this structure. | |
144 | */ | |
145 | struct ptr_ring ring; | |
99c07c43 JDB |
146 | |
147 | atomic_t pages_state_release_cnt; | |
1da4bbef IK |
148 | |
149 | /* A page_pool is strictly tied to a single RX-queue being | |
150 | * protected by NAPI, due to above pp_alloc_cache. This | |
151 | * refcnt serves purpose is to simplify drivers error handling. | |
152 | */ | |
153 | refcount_t user_cnt; | |
7c9e6942 JDB |
154 | |
155 | u64 destroy_cnt; | |
ff7d6b27 JDB |
156 | }; |
157 | ||
158 | struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); | |
159 | ||
160 | static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) | |
161 | { | |
162 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); | |
163 | ||
164 | return page_pool_alloc_pages(pool, gfp); | |
165 | } | |
166 | ||
53e0961d YL |
167 | struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, |
168 | unsigned int size, gfp_t gfp); | |
169 | ||
170 | static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, | |
171 | unsigned int *offset, | |
172 | unsigned int size) | |
173 | { | |
174 | gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); | |
175 | ||
176 | return page_pool_alloc_frag(pool, offset, size, gfp); | |
177 | } | |
178 | ||
bb005f2a IA |
179 | /* get the stored dma direction. A driver might decide to treat this locally and |
180 | * avoid the extra cache line from page_pool to determine the direction | |
181 | */ | |
182 | static | |
183 | inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) | |
184 | { | |
185 | return pool->p.dma_dir; | |
186 | } | |
187 | ||
6a5bcd84 IA |
188 | bool page_pool_return_skb_page(struct page *page); |
189 | ||
ff7d6b27 JDB |
190 | struct page_pool *page_pool_create(const struct page_pool_params *params); |
191 | ||
64693ec7 THJ |
192 | struct xdp_mem_info; |
193 | ||
e54cfd7e | 194 | #ifdef CONFIG_PAGE_POOL |
c3f812ce | 195 | void page_pool_destroy(struct page_pool *pool); |
64693ec7 THJ |
196 | void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), |
197 | struct xdp_mem_info *mem); | |
458de8a9 | 198 | void page_pool_release_page(struct page_pool *pool, struct page *page); |
78862447 LB |
199 | void page_pool_put_page_bulk(struct page_pool *pool, void **data, |
200 | int count); | |
c3f812ce | 201 | #else |
1da4bbef IK |
202 | static inline void page_pool_destroy(struct page_pool *pool) |
203 | { | |
c3f812ce | 204 | } |
1da4bbef | 205 | |
c3f812ce | 206 | static inline void page_pool_use_xdp_mem(struct page_pool *pool, |
64693ec7 THJ |
207 | void (*disconnect)(void *), |
208 | struct xdp_mem_info *mem) | |
c3f812ce | 209 | { |
1da4bbef | 210 | } |
458de8a9 IA |
211 | static inline void page_pool_release_page(struct page_pool *pool, |
212 | struct page *page) | |
213 | { | |
214 | } | |
78862447 LB |
215 | |
216 | static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, | |
217 | int count) | |
218 | { | |
219 | } | |
c3f812ce | 220 | #endif |
1da4bbef | 221 | |
52cc6ffc AD |
222 | void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, |
223 | unsigned int dma_sync_size, | |
224 | bool allow_direct); | |
ff7d6b27 | 225 | |
52cc6ffc AD |
226 | static inline void page_pool_fragment_page(struct page *page, long nr) |
227 | { | |
228 | atomic_long_set(&page->pp_frag_count, nr); | |
229 | } | |
230 | ||
231 | static inline long page_pool_defrag_page(struct page *page, long nr) | |
232 | { | |
233 | long ret; | |
234 | ||
235 | /* If nr == pp_frag_count then we have cleared all remaining | |
236 | * references to the page. No need to actually overwrite it, instead | |
237 | * we can leave this to be overwritten by the calling function. | |
238 | * | |
239 | * The main advantage to doing this is that an atomic_read is | |
240 | * generally a much cheaper operation than an atomic update, | |
241 | * especially when dealing with a page that may be partitioned | |
242 | * into only 2 or 3 pieces. | |
243 | */ | |
244 | if (atomic_long_read(&page->pp_frag_count) == nr) | |
245 | return 0; | |
246 | ||
247 | ret = atomic_long_sub_return(nr, &page->pp_frag_count); | |
248 | WARN_ON(ret < 0); | |
249 | return ret; | |
250 | } | |
251 | ||
252 | static inline bool page_pool_is_last_frag(struct page_pool *pool, | |
253 | struct page *page) | |
254 | { | |
255 | /* If fragments aren't enabled or count is 0 we were the last user */ | |
256 | return !(pool->p.flags & PP_FLAG_PAGE_FRAG) || | |
257 | (page_pool_defrag_page(page, 1) == 0); | |
258 | } | |
259 | ||
260 | static inline void page_pool_put_page(struct page_pool *pool, | |
261 | struct page *page, | |
262 | unsigned int dma_sync_size, | |
263 | bool allow_direct) | |
ff7d6b27 | 264 | { |
57d0a1c1 JDB |
265 | /* When page_pool isn't compiled-in, net/core/xdp.c doesn't |
266 | * allow registering MEM_TYPE_PAGE_POOL, but shield linker. | |
267 | */ | |
268 | #ifdef CONFIG_PAGE_POOL | |
52cc6ffc AD |
269 | if (!page_pool_is_last_frag(pool, page)) |
270 | return; | |
271 | ||
272 | page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct); | |
57d0a1c1 | 273 | #endif |
ff7d6b27 | 274 | } |
458de8a9 | 275 | |
52cc6ffc AD |
276 | /* Same as above but will try to sync the entire area pool->max_len */ |
277 | static inline void page_pool_put_full_page(struct page_pool *pool, | |
278 | struct page *page, bool allow_direct) | |
279 | { | |
280 | page_pool_put_page(pool, page, -1, allow_direct); | |
281 | } | |
282 | ||
458de8a9 | 283 | /* Same as above but the caller must guarantee safe context. e.g NAPI */ |
ff7d6b27 JDB |
284 | static inline void page_pool_recycle_direct(struct page_pool *pool, |
285 | struct page *page) | |
286 | { | |
458de8a9 | 287 | page_pool_put_full_page(pool, page, true); |
6bf071bf JDB |
288 | } |
289 | ||
f915b75b YL |
290 | #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ |
291 | (sizeof(dma_addr_t) > sizeof(unsigned long)) | |
292 | ||
0afdeeed IA |
293 | static inline dma_addr_t page_pool_get_dma_addr(struct page *page) |
294 | { | |
f915b75b YL |
295 | dma_addr_t ret = page->dma_addr; |
296 | ||
297 | if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) | |
298 | ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; | |
299 | ||
300 | return ret; | |
9ddb3c14 MWO |
301 | } |
302 | ||
303 | static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) | |
304 | { | |
0e9d2a0a | 305 | page->dma_addr = addr; |
f915b75b YL |
306 | if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) |
307 | page->dma_addr_upper = upper_32_bits(addr); | |
0e9d2a0a YL |
308 | } |
309 | ||
57d0a1c1 JDB |
310 | static inline bool is_page_pool_compiled_in(void) |
311 | { | |
312 | #ifdef CONFIG_PAGE_POOL | |
313 | return true; | |
314 | #else | |
315 | return false; | |
316 | #endif | |
317 | } | |
318 | ||
1da4bbef IK |
319 | static inline bool page_pool_put(struct page_pool *pool) |
320 | { | |
321 | return refcount_dec_and_test(&pool->user_cnt); | |
322 | } | |
323 | ||
bc836748 SM |
324 | /* Caller must provide appropriate safe context, e.g. NAPI. */ |
325 | void page_pool_update_nid(struct page_pool *pool, int new_nid); | |
326 | static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) | |
327 | { | |
328 | if (unlikely(pool->p.nid != new_nid)) | |
329 | page_pool_update_nid(pool, new_nid); | |
330 | } | |
78862447 LB |
331 | |
332 | static inline void page_pool_ring_lock(struct page_pool *pool) | |
333 | __acquires(&pool->ring.producer_lock) | |
334 | { | |
335 | if (in_serving_softirq()) | |
336 | spin_lock(&pool->ring.producer_lock); | |
337 | else | |
338 | spin_lock_bh(&pool->ring.producer_lock); | |
339 | } | |
340 | ||
341 | static inline void page_pool_ring_unlock(struct page_pool *pool) | |
342 | __releases(&pool->ring.producer_lock) | |
343 | { | |
344 | if (in_serving_softirq()) | |
345 | spin_unlock(&pool->ring.producer_lock); | |
346 | else | |
347 | spin_unlock_bh(&pool->ring.producer_lock); | |
348 | } | |
349 | ||
ff7d6b27 | 350 | #endif /* _NET_PAGE_POOL_H */ |