Commit | Line | Data |
---|---|---|
ff7d6b27 JDB |
1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | |
3 | * page_pool.c | |
4 | * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> | |
5 | * Copyright (C) 2016 Red Hat, Inc. | |
6 | */ | |
32c28f7e | 7 | |
ff7d6b27 JDB |
8 | #include <linux/types.h> |
9 | #include <linux/kernel.h> | |
10 | #include <linux/slab.h> | |
f71fec47 | 11 | #include <linux/device.h> |
ff7d6b27 JDB |
12 | |
13 | #include <net/page_pool.h> | |
78862447 LB |
14 | #include <net/xdp.h> |
15 | ||
ff7d6b27 JDB |
16 | #include <linux/dma-direction.h> |
17 | #include <linux/dma-mapping.h> | |
18 | #include <linux/page-flags.h> | |
8d29c703 | 19 | #include <linux/mm.h> /* for put_page() */ |
c07aea3e | 20 | #include <linux/poison.h> |
f3c5264f | 21 | #include <linux/ethtool.h> |
8c48eea3 | 22 | #include <linux/netdevice.h> |
ff7d6b27 | 23 | |
32c28f7e JDB |
24 | #include <trace/events/page_pool.h> |
25 | ||
c3f812ce JL |
26 | #define DEFER_TIME (msecs_to_jiffies(1000)) |
27 | #define DEFER_WARN_INTERVAL (60 * HZ) | |
28 | ||
53e0961d YL |
29 | #define BIAS_MAX LONG_MAX |
30 | ||
8610037e JD |
31 | #ifdef CONFIG_PAGE_POOL_STATS |
32 | /* alloc_stat_inc is intended to be used in softirq context */ | |
33 | #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) | |
ad6fa1e1 JD |
34 | /* recycle_stat_inc is safe to use when preemption is possible. */ |
35 | #define recycle_stat_inc(pool, __stat) \ | |
36 | do { \ | |
37 | struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ | |
38 | this_cpu_inc(s->__stat); \ | |
39 | } while (0) | |
6b95e338 | 40 | |
590032a4 LB |
41 | #define recycle_stat_add(pool, __stat, val) \ |
42 | do { \ | |
43 | struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ | |
44 | this_cpu_add(s->__stat, val); \ | |
45 | } while (0) | |
46 | ||
f3c5264f LB |
47 | static const char pp_stats[][ETH_GSTRING_LEN] = { |
48 | "rx_pp_alloc_fast", | |
49 | "rx_pp_alloc_slow", | |
50 | "rx_pp_alloc_slow_ho", | |
51 | "rx_pp_alloc_empty", | |
52 | "rx_pp_alloc_refill", | |
53 | "rx_pp_alloc_waive", | |
54 | "rx_pp_recycle_cached", | |
55 | "rx_pp_recycle_cache_full", | |
56 | "rx_pp_recycle_ring", | |
57 | "rx_pp_recycle_ring_full", | |
58 | "rx_pp_recycle_released_ref", | |
59 | }; | |
60 | ||
6b95e338 JD |
61 | bool page_pool_get_stats(struct page_pool *pool, |
62 | struct page_pool_stats *stats) | |
63 | { | |
64 | int cpu = 0; | |
65 | ||
66 | if (!stats) | |
67 | return false; | |
68 | ||
f3c5264f LB |
69 | /* The caller is responsible to initialize stats. */ |
70 | stats->alloc_stats.fast += pool->alloc_stats.fast; | |
71 | stats->alloc_stats.slow += pool->alloc_stats.slow; | |
72 | stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; | |
73 | stats->alloc_stats.empty += pool->alloc_stats.empty; | |
74 | stats->alloc_stats.refill += pool->alloc_stats.refill; | |
75 | stats->alloc_stats.waive += pool->alloc_stats.waive; | |
6b95e338 JD |
76 | |
77 | for_each_possible_cpu(cpu) { | |
78 | const struct page_pool_recycle_stats *pcpu = | |
79 | per_cpu_ptr(pool->recycle_stats, cpu); | |
80 | ||
81 | stats->recycle_stats.cached += pcpu->cached; | |
82 | stats->recycle_stats.cache_full += pcpu->cache_full; | |
83 | stats->recycle_stats.ring += pcpu->ring; | |
84 | stats->recycle_stats.ring_full += pcpu->ring_full; | |
85 | stats->recycle_stats.released_refcnt += pcpu->released_refcnt; | |
86 | } | |
87 | ||
88 | return true; | |
89 | } | |
90 | EXPORT_SYMBOL(page_pool_get_stats); | |
f3c5264f LB |
91 | |
92 | u8 *page_pool_ethtool_stats_get_strings(u8 *data) | |
93 | { | |
94 | int i; | |
95 | ||
96 | for (i = 0; i < ARRAY_SIZE(pp_stats); i++) { | |
97 | memcpy(data, pp_stats[i], ETH_GSTRING_LEN); | |
98 | data += ETH_GSTRING_LEN; | |
99 | } | |
100 | ||
101 | return data; | |
102 | } | |
103 | EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings); | |
104 | ||
105 | int page_pool_ethtool_stats_get_count(void) | |
106 | { | |
107 | return ARRAY_SIZE(pp_stats); | |
108 | } | |
109 | EXPORT_SYMBOL(page_pool_ethtool_stats_get_count); | |
110 | ||
111 | u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) | |
112 | { | |
113 | struct page_pool_stats *pool_stats = stats; | |
114 | ||
115 | *data++ = pool_stats->alloc_stats.fast; | |
116 | *data++ = pool_stats->alloc_stats.slow; | |
117 | *data++ = pool_stats->alloc_stats.slow_high_order; | |
118 | *data++ = pool_stats->alloc_stats.empty; | |
119 | *data++ = pool_stats->alloc_stats.refill; | |
120 | *data++ = pool_stats->alloc_stats.waive; | |
121 | *data++ = pool_stats->recycle_stats.cached; | |
122 | *data++ = pool_stats->recycle_stats.cache_full; | |
123 | *data++ = pool_stats->recycle_stats.ring; | |
124 | *data++ = pool_stats->recycle_stats.ring_full; | |
125 | *data++ = pool_stats->recycle_stats.released_refcnt; | |
126 | ||
127 | return data; | |
128 | } | |
129 | EXPORT_SYMBOL(page_pool_ethtool_stats_get); | |
130 | ||
8610037e JD |
131 | #else |
132 | #define alloc_stat_inc(pool, __stat) | |
ad6fa1e1 | 133 | #define recycle_stat_inc(pool, __stat) |
590032a4 | 134 | #define recycle_stat_add(pool, __stat, val) |
8610037e JD |
135 | #endif |
136 | ||
ff7d6b27 JDB |
137 | static int page_pool_init(struct page_pool *pool, |
138 | const struct page_pool_params *params) | |
139 | { | |
140 | unsigned int ring_qsize = 1024; /* Default */ | |
141 | ||
142 | memcpy(&pool->p, params, sizeof(pool->p)); | |
143 | ||
144 | /* Validate only known flags were used */ | |
145 | if (pool->p.flags & ~(PP_FLAG_ALL)) | |
146 | return -EINVAL; | |
147 | ||
148 | if (pool->p.pool_size) | |
149 | ring_qsize = pool->p.pool_size; | |
150 | ||
151 | /* Sanity limit mem that can be pinned down */ | |
152 | if (ring_qsize > 32768) | |
153 | return -E2BIG; | |
154 | ||
155 | /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. | |
156 | * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, | |
157 | * which is the XDP_TX use-case. | |
158 | */ | |
798dda81 DK |
159 | if (pool->p.flags & PP_FLAG_DMA_MAP) { |
160 | if ((pool->p.dma_dir != DMA_FROM_DEVICE) && | |
161 | (pool->p.dma_dir != DMA_BIDIRECTIONAL)) | |
162 | return -EINVAL; | |
163 | } | |
ff7d6b27 | 164 | |
e68bc756 LB |
165 | if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { |
166 | /* In order to request DMA-sync-for-device the page | |
167 | * needs to be mapped | |
168 | */ | |
169 | if (!(pool->p.flags & PP_FLAG_DMA_MAP)) | |
170 | return -EINVAL; | |
171 | ||
172 | if (!pool->p.max_len) | |
173 | return -EINVAL; | |
174 | ||
175 | /* pool->p.offset has to be set according to the address | |
176 | * offset used by the DMA engine to start copying rx data | |
177 | */ | |
178 | } | |
179 | ||
f915b75b YL |
180 | if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT && |
181 | pool->p.flags & PP_FLAG_PAGE_FRAG) | |
182 | return -EINVAL; | |
183 | ||
ad6fa1e1 JD |
184 | #ifdef CONFIG_PAGE_POOL_STATS |
185 | pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); | |
186 | if (!pool->recycle_stats) | |
187 | return -ENOMEM; | |
188 | #endif | |
189 | ||
ff7d6b27 JDB |
190 | if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) |
191 | return -ENOMEM; | |
192 | ||
99c07c43 JDB |
193 | atomic_set(&pool->pages_state_release_cnt, 0); |
194 | ||
1da4bbef IK |
195 | /* Driver calling page_pool_create() also call page_pool_destroy() */ |
196 | refcount_set(&pool->user_cnt, 1); | |
197 | ||
f71fec47 JDB |
198 | if (pool->p.flags & PP_FLAG_DMA_MAP) |
199 | get_device(pool->p.dev); | |
200 | ||
ff7d6b27 JDB |
201 | return 0; |
202 | } | |
203 | ||
204 | struct page_pool *page_pool_create(const struct page_pool_params *params) | |
205 | { | |
206 | struct page_pool *pool; | |
873343e7 | 207 | int err; |
ff7d6b27 JDB |
208 | |
209 | pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); | |
210 | if (!pool) | |
211 | return ERR_PTR(-ENOMEM); | |
212 | ||
213 | err = page_pool_init(pool, params); | |
214 | if (err < 0) { | |
215 | pr_warn("%s() gave up with errno %d\n", __func__, err); | |
216 | kfree(pool); | |
217 | return ERR_PTR(err); | |
218 | } | |
1da4bbef | 219 | |
ff7d6b27 JDB |
220 | return pool; |
221 | } | |
222 | EXPORT_SYMBOL(page_pool_create); | |
223 | ||
458de8a9 | 224 | static void page_pool_return_page(struct page_pool *pool, struct page *page); |
44768dec JDB |
225 | |
226 | noinline | |
304db6cb | 227 | static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) |
44768dec JDB |
228 | { |
229 | struct ptr_ring *r = &pool->ring; | |
230 | struct page *page; | |
231 | int pref_nid; /* preferred NUMA node */ | |
232 | ||
233 | /* Quicker fallback, avoid locks when ring is empty */ | |
8610037e JD |
234 | if (__ptr_ring_empty(r)) { |
235 | alloc_stat_inc(pool, empty); | |
44768dec | 236 | return NULL; |
8610037e | 237 | } |
44768dec JDB |
238 | |
239 | /* Softirq guarantee CPU and thus NUMA node is stable. This, | |
240 | * assumes CPU refilling driver RX-ring will also run RX-NAPI. | |
241 | */ | |
f13fc107 | 242 | #ifdef CONFIG_NUMA |
44768dec | 243 | pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; |
f13fc107 JDB |
244 | #else |
245 | /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ | |
246 | pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */ | |
247 | #endif | |
44768dec | 248 | |
44768dec JDB |
249 | /* Refill alloc array, but only if NUMA match */ |
250 | do { | |
251 | page = __ptr_ring_consume(r); | |
252 | if (unlikely(!page)) | |
253 | break; | |
254 | ||
255 | if (likely(page_to_nid(page) == pref_nid)) { | |
256 | pool->alloc.cache[pool->alloc.count++] = page; | |
257 | } else { | |
258 | /* NUMA mismatch; | |
259 | * (1) release 1 page to page-allocator and | |
260 | * (2) break out to fallthrough to alloc_pages_node. | |
261 | * This limit stress on page buddy alloactor. | |
262 | */ | |
458de8a9 | 263 | page_pool_return_page(pool, page); |
8610037e | 264 | alloc_stat_inc(pool, waive); |
44768dec JDB |
265 | page = NULL; |
266 | break; | |
267 | } | |
304db6cb | 268 | } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); |
44768dec JDB |
269 | |
270 | /* Return last page */ | |
8610037e | 271 | if (likely(pool->alloc.count > 0)) { |
44768dec | 272 | page = pool->alloc.cache[--pool->alloc.count]; |
8610037e JD |
273 | alloc_stat_inc(pool, refill); |
274 | } | |
44768dec | 275 | |
44768dec JDB |
276 | return page; |
277 | } | |
278 | ||
ff7d6b27 JDB |
279 | /* fast path */ |
280 | static struct page *__page_pool_get_cached(struct page_pool *pool) | |
281 | { | |
ff7d6b27 JDB |
282 | struct page *page; |
283 | ||
304db6cb LR |
284 | /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */ |
285 | if (likely(pool->alloc.count)) { | |
286 | /* Fast-path */ | |
287 | page = pool->alloc.cache[--pool->alloc.count]; | |
8610037e | 288 | alloc_stat_inc(pool, fast); |
304db6cb LR |
289 | } else { |
290 | page = page_pool_refill_alloc_cache(pool); | |
ff7d6b27 JDB |
291 | } |
292 | ||
ff7d6b27 JDB |
293 | return page; |
294 | } | |
295 | ||
e68bc756 LB |
296 | static void page_pool_dma_sync_for_device(struct page_pool *pool, |
297 | struct page *page, | |
298 | unsigned int dma_sync_size) | |
299 | { | |
9ddb3c14 MWO |
300 | dma_addr_t dma_addr = page_pool_get_dma_addr(page); |
301 | ||
e68bc756 | 302 | dma_sync_size = min(dma_sync_size, pool->p.max_len); |
9ddb3c14 | 303 | dma_sync_single_range_for_device(pool->p.dev, dma_addr, |
e68bc756 LB |
304 | pool->p.offset, dma_sync_size, |
305 | pool->p.dma_dir); | |
306 | } | |
307 | ||
dfa59717 JDB |
308 | static bool page_pool_dma_map(struct page_pool *pool, struct page *page) |
309 | { | |
310 | dma_addr_t dma; | |
311 | ||
312 | /* Setup DMA mapping: use 'struct page' area for storing DMA-addr | |
313 | * since dma_addr_t can be either 32 or 64 bits and does not always fit | |
314 | * into page private data (i.e 32bit cpu with 64bit DMA caps) | |
315 | * This mapping is kept for lifetime of page, until leaving pool. | |
316 | */ | |
317 | dma = dma_map_page_attrs(pool->p.dev, page, 0, | |
318 | (PAGE_SIZE << pool->p.order), | |
8e4c62c7 JK |
319 | pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | |
320 | DMA_ATTR_WEAK_ORDERING); | |
dfa59717 JDB |
321 | if (dma_mapping_error(pool->p.dev, dma)) |
322 | return false; | |
323 | ||
9ddb3c14 | 324 | page_pool_set_dma_addr(page, dma); |
dfa59717 JDB |
325 | |
326 | if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) | |
327 | page_pool_dma_sync_for_device(pool, page, pool->p.max_len); | |
328 | ||
329 | return true; | |
330 | } | |
331 | ||
57f05bc2 YL |
332 | static void page_pool_set_pp_info(struct page_pool *pool, |
333 | struct page *page) | |
334 | { | |
335 | page->pp = pool; | |
336 | page->pp_magic |= PP_SIGNATURE; | |
35b2e549 THJ |
337 | if (pool->p.init_callback) |
338 | pool->p.init_callback(page, pool->p.init_arg); | |
57f05bc2 YL |
339 | } |
340 | ||
341 | static void page_pool_clear_pp_info(struct page *page) | |
342 | { | |
343 | page->pp_magic = 0; | |
344 | page->pp = NULL; | |
345 | } | |
346 | ||
be5dba25 JDB |
347 | static struct page *__page_pool_alloc_page_order(struct page_pool *pool, |
348 | gfp_t gfp) | |
ff7d6b27 JDB |
349 | { |
350 | struct page *page; | |
ff7d6b27 | 351 | |
be5dba25 | 352 | gfp |= __GFP_COMP; |
ff7d6b27 | 353 | page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); |
be5dba25 | 354 | if (unlikely(!page)) |
ff7d6b27 JDB |
355 | return NULL; |
356 | ||
be5dba25 | 357 | if ((pool->p.flags & PP_FLAG_DMA_MAP) && |
dfa59717 | 358 | unlikely(!page_pool_dma_map(pool, page))) { |
ff7d6b27 JDB |
359 | put_page(page); |
360 | return NULL; | |
361 | } | |
ff7d6b27 | 362 | |
8610037e | 363 | alloc_stat_inc(pool, slow_high_order); |
57f05bc2 | 364 | page_pool_set_pp_info(pool, page); |
c07aea3e | 365 | |
99c07c43 JDB |
366 | /* Track how many pages are held 'in-flight' */ |
367 | pool->pages_state_hold_cnt++; | |
32c28f7e | 368 | trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); |
be5dba25 JDB |
369 | return page; |
370 | } | |
371 | ||
372 | /* slow path */ | |
373 | noinline | |
374 | static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, | |
375 | gfp_t gfp) | |
376 | { | |
377 | const int bulk = PP_ALLOC_CACHE_REFILL; | |
378 | unsigned int pp_flags = pool->p.flags; | |
379 | unsigned int pp_order = pool->p.order; | |
380 | struct page *page; | |
381 | int i, nr_pages; | |
382 | ||
383 | /* Don't support bulk alloc for high-order pages */ | |
384 | if (unlikely(pp_order)) | |
385 | return __page_pool_alloc_page_order(pool, gfp); | |
386 | ||
387 | /* Unnecessary as alloc cache is empty, but guarantees zero count */ | |
388 | if (unlikely(pool->alloc.count > 0)) | |
389 | return pool->alloc.cache[--pool->alloc.count]; | |
390 | ||
391 | /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */ | |
392 | memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); | |
393 | ||
d810d367 JW |
394 | nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk, |
395 | pool->alloc.cache); | |
be5dba25 JDB |
396 | if (unlikely(!nr_pages)) |
397 | return NULL; | |
398 | ||
399 | /* Pages have been filled into alloc.cache array, but count is zero and | |
400 | * page element have not been (possibly) DMA mapped. | |
401 | */ | |
402 | for (i = 0; i < nr_pages; i++) { | |
403 | page = pool->alloc.cache[i]; | |
404 | if ((pp_flags & PP_FLAG_DMA_MAP) && | |
405 | unlikely(!page_pool_dma_map(pool, page))) { | |
406 | put_page(page); | |
407 | continue; | |
408 | } | |
57f05bc2 YL |
409 | |
410 | page_pool_set_pp_info(pool, page); | |
be5dba25 JDB |
411 | pool->alloc.cache[pool->alloc.count++] = page; |
412 | /* Track how many pages are held 'in-flight' */ | |
413 | pool->pages_state_hold_cnt++; | |
414 | trace_page_pool_state_hold(pool, page, | |
415 | pool->pages_state_hold_cnt); | |
416 | } | |
417 | ||
418 | /* Return last page */ | |
8610037e | 419 | if (likely(pool->alloc.count > 0)) { |
be5dba25 | 420 | page = pool->alloc.cache[--pool->alloc.count]; |
8610037e JD |
421 | alloc_stat_inc(pool, slow); |
422 | } else { | |
be5dba25 | 423 | page = NULL; |
8610037e | 424 | } |
32c28f7e | 425 | |
ff7d6b27 JDB |
426 | /* When page just alloc'ed is should/must have refcnt 1. */ |
427 | return page; | |
428 | } | |
429 | ||
430 | /* For using page_pool replace: alloc_pages() API calls, but provide | |
431 | * synchronization guarantee for allocation side. | |
432 | */ | |
433 | struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) | |
434 | { | |
435 | struct page *page; | |
436 | ||
437 | /* Fast-path: Get a page from cache */ | |
438 | page = __page_pool_get_cached(pool); | |
439 | if (page) | |
440 | return page; | |
441 | ||
442 | /* Slow-path: cache empty, do real allocation */ | |
443 | page = __page_pool_alloc_pages_slow(pool, gfp); | |
444 | return page; | |
445 | } | |
446 | EXPORT_SYMBOL(page_pool_alloc_pages); | |
447 | ||
99c07c43 JDB |
448 | /* Calculate distance between two u32 values, valid if distance is below 2^(31) |
449 | * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution | |
450 | */ | |
451 | #define _distance(a, b) (s32)((a) - (b)) | |
452 | ||
453 | static s32 page_pool_inflight(struct page_pool *pool) | |
454 | { | |
455 | u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); | |
456 | u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); | |
c3f812ce | 457 | s32 inflight; |
99c07c43 | 458 | |
c3f812ce | 459 | inflight = _distance(hold_cnt, release_cnt); |
99c07c43 | 460 | |
7c9e6942 | 461 | trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); |
99c07c43 JDB |
462 | WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); |
463 | ||
c3f812ce | 464 | return inflight; |
99c07c43 JDB |
465 | } |
466 | ||
458de8a9 IA |
467 | /* Disconnects a page (from a page_pool). API users can have a need |
468 | * to disconnect a page (from a page_pool), to allow it to be used as | |
469 | * a regular page (that will eventually be returned to the normal | |
470 | * page-allocator via put_page). | |
471 | */ | |
472 | void page_pool_release_page(struct page_pool *pool, struct page *page) | |
ff7d6b27 | 473 | { |
1567b85e | 474 | dma_addr_t dma; |
c3f812ce | 475 | int count; |
1567b85e | 476 | |
ff7d6b27 | 477 | if (!(pool->p.flags & PP_FLAG_DMA_MAP)) |
458de8a9 IA |
478 | /* Always account for inflight pages, even if we didn't |
479 | * map them | |
480 | */ | |
99c07c43 | 481 | goto skip_dma_unmap; |
ff7d6b27 | 482 | |
9ddb3c14 | 483 | dma = page_pool_get_dma_addr(page); |
458de8a9 | 484 | |
9ddb3c14 | 485 | /* When page is unmapped, it cannot be returned to our pool */ |
13f16d9d JDB |
486 | dma_unmap_page_attrs(pool->p.dev, dma, |
487 | PAGE_SIZE << pool->p.order, pool->p.dma_dir, | |
8e4c62c7 | 488 | DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); |
9ddb3c14 | 489 | page_pool_set_dma_addr(page, 0); |
99c07c43 | 490 | skip_dma_unmap: |
57f05bc2 | 491 | page_pool_clear_pp_info(page); |
c07aea3e | 492 | |
c3f812ce JL |
493 | /* This may be the last page returned, releasing the pool, so |
494 | * it is not safe to reference pool afterwards. | |
495 | */ | |
7fb9b66d | 496 | count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); |
c3f812ce | 497 | trace_page_pool_state_release(pool, page, count); |
ff7d6b27 | 498 | } |
458de8a9 | 499 | EXPORT_SYMBOL(page_pool_release_page); |
a25d50bf | 500 | |
ff7d6b27 | 501 | /* Return a page to the page allocator, cleaning up our state */ |
458de8a9 | 502 | static void page_pool_return_page(struct page_pool *pool, struct page *page) |
ff7d6b27 | 503 | { |
458de8a9 | 504 | page_pool_release_page(pool, page); |
99c07c43 | 505 | |
ff7d6b27 JDB |
506 | put_page(page); |
507 | /* An optimization would be to call __free_pages(page, pool->p.order) | |
508 | * knowing page is not part of page-cache (thus avoiding a | |
509 | * __page_cache_release() call). | |
510 | */ | |
511 | } | |
512 | ||
458de8a9 | 513 | static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) |
ff7d6b27 JDB |
514 | { |
515 | int ret; | |
542bcea4 QD |
516 | /* BH protection not needed if current is softirq */ |
517 | if (in_softirq()) | |
ff7d6b27 JDB |
518 | ret = ptr_ring_produce(&pool->ring, page); |
519 | else | |
520 | ret = ptr_ring_produce_bh(&pool->ring, page); | |
521 | ||
ad6fa1e1 JD |
522 | if (!ret) { |
523 | recycle_stat_inc(pool, ring); | |
524 | return true; | |
525 | } | |
526 | ||
527 | return false; | |
ff7d6b27 JDB |
528 | } |
529 | ||
530 | /* Only allow direct recycling in special circumstances, into the | |
531 | * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case. | |
532 | * | |
533 | * Caller must provide appropriate safe context. | |
534 | */ | |
458de8a9 | 535 | static bool page_pool_recycle_in_cache(struct page *page, |
ff7d6b27 JDB |
536 | struct page_pool *pool) |
537 | { | |
ad6fa1e1 JD |
538 | if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { |
539 | recycle_stat_inc(pool, cache_full); | |
ff7d6b27 | 540 | return false; |
ad6fa1e1 | 541 | } |
ff7d6b27 JDB |
542 | |
543 | /* Caller MUST have verified/know (page_ref_count(page) == 1) */ | |
544 | pool->alloc.cache[pool->alloc.count++] = page; | |
ad6fa1e1 | 545 | recycle_stat_inc(pool, cached); |
ff7d6b27 JDB |
546 | return true; |
547 | } | |
548 | ||
458de8a9 IA |
549 | /* If the page refcnt == 1, this will try to recycle the page. |
550 | * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for | |
551 | * the configured size min(dma_sync_size, pool->max_len). | |
552 | * If the page refcnt != 1, then the page will be returned to memory | |
553 | * subsystem. | |
554 | */ | |
78862447 LB |
555 | static __always_inline struct page * |
556 | __page_pool_put_page(struct page_pool *pool, struct page *page, | |
557 | unsigned int dma_sync_size, bool allow_direct) | |
ff7d6b27 JDB |
558 | { |
559 | /* This allocator is optimized for the XDP mode that uses | |
560 | * one-frame-per-page, but have fallbacks that act like the | |
561 | * regular page allocator APIs. | |
562 | * | |
563 | * refcnt == 1 means page_pool owns page, and can recycle it. | |
05656132 AL |
564 | * |
565 | * page is NOT reusable when allocated when system is under | |
566 | * some pressure. (page_is_pfmemalloc) | |
ff7d6b27 | 567 | */ |
05656132 | 568 | if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { |
ff7d6b27 JDB |
569 | /* Read barrier done in page_ref_count / READ_ONCE */ |
570 | ||
e68bc756 LB |
571 | if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) |
572 | page_pool_dma_sync_for_device(pool, page, | |
573 | dma_sync_size); | |
574 | ||
542bcea4 | 575 | if (allow_direct && in_softirq() && |
78862447 LB |
576 | page_pool_recycle_in_cache(page, pool)) |
577 | return NULL; | |
ff7d6b27 | 578 | |
78862447 LB |
579 | /* Page found as candidate for recycling */ |
580 | return page; | |
ff7d6b27 JDB |
581 | } |
582 | /* Fallback/non-XDP mode: API user have elevated refcnt. | |
583 | * | |
584 | * Many drivers split up the page into fragments, and some | |
585 | * want to keep doing this to save memory and do refcnt based | |
586 | * recycling. Support this use case too, to ease drivers | |
587 | * switching between XDP/non-XDP. | |
588 | * | |
589 | * In-case page_pool maintains the DMA mapping, API user must | |
590 | * call page_pool_put_page once. In this elevated refcnt | |
591 | * case, the DMA is unmapped/released, as driver is likely | |
592 | * doing refcnt based recycle tricks, meaning another process | |
593 | * will be invoking put_page. | |
594 | */ | |
ad6fa1e1 | 595 | recycle_stat_inc(pool, released_refcnt); |
458de8a9 IA |
596 | /* Do not replace this with page_pool_return_page() */ |
597 | page_pool_release_page(pool, page); | |
ff7d6b27 | 598 | put_page(page); |
78862447 LB |
599 | |
600 | return NULL; | |
601 | } | |
602 | ||
52cc6ffc AD |
603 | void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, |
604 | unsigned int dma_sync_size, bool allow_direct) | |
78862447 LB |
605 | { |
606 | page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); | |
607 | if (page && !page_pool_recycle_in_ring(pool, page)) { | |
608 | /* Cache full, fallback to free pages */ | |
ad6fa1e1 | 609 | recycle_stat_inc(pool, ring_full); |
78862447 LB |
610 | page_pool_return_page(pool, page); |
611 | } | |
ff7d6b27 | 612 | } |
52cc6ffc | 613 | EXPORT_SYMBOL(page_pool_put_defragged_page); |
ff7d6b27 | 614 | |
78862447 LB |
615 | /* Caller must not use data area after call, as this function overwrites it */ |
616 | void page_pool_put_page_bulk(struct page_pool *pool, void **data, | |
617 | int count) | |
618 | { | |
619 | int i, bulk_len = 0; | |
620 | ||
621 | for (i = 0; i < count; i++) { | |
622 | struct page *page = virt_to_head_page(data[i]); | |
623 | ||
52cc6ffc AD |
624 | /* It is not the last user for the page frag case */ |
625 | if (!page_pool_is_last_frag(pool, page)) | |
626 | continue; | |
627 | ||
78862447 LB |
628 | page = __page_pool_put_page(pool, page, -1, false); |
629 | /* Approved for bulk recycling in ptr_ring cache */ | |
630 | if (page) | |
631 | data[bulk_len++] = page; | |
632 | } | |
633 | ||
634 | if (unlikely(!bulk_len)) | |
635 | return; | |
636 | ||
637 | /* Bulk producer into ptr_ring page_pool cache */ | |
638 | page_pool_ring_lock(pool); | |
639 | for (i = 0; i < bulk_len; i++) { | |
590032a4 LB |
640 | if (__ptr_ring_produce(&pool->ring, data[i])) { |
641 | /* ring full */ | |
642 | recycle_stat_inc(pool, ring_full); | |
643 | break; | |
644 | } | |
78862447 | 645 | } |
590032a4 | 646 | recycle_stat_add(pool, ring, i); |
78862447 LB |
647 | page_pool_ring_unlock(pool); |
648 | ||
649 | /* Hopefully all pages was return into ptr_ring */ | |
650 | if (likely(i == bulk_len)) | |
651 | return; | |
652 | ||
653 | /* ptr_ring cache full, free remaining pages outside producer lock | |
654 | * since put_page() with refcnt == 1 can be an expensive operation | |
655 | */ | |
656 | for (; i < bulk_len; i++) | |
657 | page_pool_return_page(pool, data[i]); | |
658 | } | |
659 | EXPORT_SYMBOL(page_pool_put_page_bulk); | |
660 | ||
53e0961d YL |
661 | static struct page *page_pool_drain_frag(struct page_pool *pool, |
662 | struct page *page) | |
663 | { | |
664 | long drain_count = BIAS_MAX - pool->frag_users; | |
665 | ||
666 | /* Some user is still using the page frag */ | |
52cc6ffc | 667 | if (likely(page_pool_defrag_page(page, drain_count))) |
53e0961d YL |
668 | return NULL; |
669 | ||
670 | if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) { | |
671 | if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) | |
672 | page_pool_dma_sync_for_device(pool, page, -1); | |
673 | ||
674 | return page; | |
675 | } | |
676 | ||
677 | page_pool_return_page(pool, page); | |
678 | return NULL; | |
679 | } | |
680 | ||
681 | static void page_pool_free_frag(struct page_pool *pool) | |
682 | { | |
683 | long drain_count = BIAS_MAX - pool->frag_users; | |
684 | struct page *page = pool->frag_page; | |
685 | ||
686 | pool->frag_page = NULL; | |
687 | ||
52cc6ffc | 688 | if (!page || page_pool_defrag_page(page, drain_count)) |
53e0961d YL |
689 | return; |
690 | ||
691 | page_pool_return_page(pool, page); | |
692 | } | |
693 | ||
694 | struct page *page_pool_alloc_frag(struct page_pool *pool, | |
695 | unsigned int *offset, | |
696 | unsigned int size, gfp_t gfp) | |
697 | { | |
698 | unsigned int max_size = PAGE_SIZE << pool->p.order; | |
699 | struct page *page = pool->frag_page; | |
700 | ||
701 | if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) || | |
702 | size > max_size)) | |
703 | return NULL; | |
704 | ||
705 | size = ALIGN(size, dma_get_cache_alignment()); | |
706 | *offset = pool->frag_offset; | |
707 | ||
708 | if (page && *offset + size > max_size) { | |
709 | page = page_pool_drain_frag(pool, page); | |
0f6deac3 JW |
710 | if (page) { |
711 | alloc_stat_inc(pool, fast); | |
53e0961d | 712 | goto frag_reset; |
0f6deac3 | 713 | } |
53e0961d YL |
714 | } |
715 | ||
716 | if (!page) { | |
717 | page = page_pool_alloc_pages(pool, gfp); | |
718 | if (unlikely(!page)) { | |
719 | pool->frag_page = NULL; | |
720 | return NULL; | |
721 | } | |
722 | ||
723 | pool->frag_page = page; | |
724 | ||
725 | frag_reset: | |
726 | pool->frag_users = 1; | |
727 | *offset = 0; | |
728 | pool->frag_offset = size; | |
52cc6ffc | 729 | page_pool_fragment_page(page, BIAS_MAX); |
53e0961d YL |
730 | return page; |
731 | } | |
732 | ||
733 | pool->frag_users++; | |
734 | pool->frag_offset = *offset + size; | |
0f6deac3 | 735 | alloc_stat_inc(pool, fast); |
53e0961d YL |
736 | return page; |
737 | } | |
738 | EXPORT_SYMBOL(page_pool_alloc_frag); | |
739 | ||
458de8a9 | 740 | static void page_pool_empty_ring(struct page_pool *pool) |
ff7d6b27 JDB |
741 | { |
742 | struct page *page; | |
743 | ||
744 | /* Empty recycle ring */ | |
4905bd9a | 745 | while ((page = ptr_ring_consume_bh(&pool->ring))) { |
ff7d6b27 JDB |
746 | /* Verify the refcnt invariant of cached pages */ |
747 | if (!(page_ref_count(page) == 1)) | |
748 | pr_crit("%s() page_pool refcnt %d violation\n", | |
749 | __func__, page_ref_count(page)); | |
750 | ||
458de8a9 | 751 | page_pool_return_page(pool, page); |
ff7d6b27 JDB |
752 | } |
753 | } | |
754 | ||
c3f812ce | 755 | static void page_pool_free(struct page_pool *pool) |
d956a048 | 756 | { |
c3f812ce JL |
757 | if (pool->disconnect) |
758 | pool->disconnect(pool); | |
e54cfd7e JDB |
759 | |
760 | ptr_ring_cleanup(&pool->ring, NULL); | |
f71fec47 JDB |
761 | |
762 | if (pool->p.flags & PP_FLAG_DMA_MAP) | |
763 | put_device(pool->p.dev); | |
764 | ||
ad6fa1e1 JD |
765 | #ifdef CONFIG_PAGE_POOL_STATS |
766 | free_percpu(pool->recycle_stats); | |
767 | #endif | |
e54cfd7e JDB |
768 | kfree(pool); |
769 | } | |
e54cfd7e | 770 | |
7c9e6942 | 771 | static void page_pool_empty_alloc_cache_once(struct page_pool *pool) |
ff7d6b27 JDB |
772 | { |
773 | struct page *page; | |
774 | ||
7c9e6942 JDB |
775 | if (pool->destroy_cnt) |
776 | return; | |
777 | ||
ff7d6b27 JDB |
778 | /* Empty alloc cache, assume caller made sure this is |
779 | * no-longer in use, and page_pool_alloc_pages() cannot be | |
780 | * call concurrently. | |
781 | */ | |
782 | while (pool->alloc.count) { | |
783 | page = pool->alloc.cache[--pool->alloc.count]; | |
458de8a9 | 784 | page_pool_return_page(pool, page); |
ff7d6b27 | 785 | } |
7c9e6942 JDB |
786 | } |
787 | ||
788 | static void page_pool_scrub(struct page_pool *pool) | |
789 | { | |
790 | page_pool_empty_alloc_cache_once(pool); | |
791 | pool->destroy_cnt++; | |
ff7d6b27 JDB |
792 | |
793 | /* No more consumers should exist, but producers could still | |
794 | * be in-flight. | |
795 | */ | |
458de8a9 | 796 | page_pool_empty_ring(pool); |
c3f812ce JL |
797 | } |
798 | ||
799 | static int page_pool_release(struct page_pool *pool) | |
800 | { | |
801 | int inflight; | |
802 | ||
803 | page_pool_scrub(pool); | |
804 | inflight = page_pool_inflight(pool); | |
805 | if (!inflight) | |
806 | page_pool_free(pool); | |
807 | ||
808 | return inflight; | |
809 | } | |
810 | ||
811 | static void page_pool_release_retry(struct work_struct *wq) | |
812 | { | |
813 | struct delayed_work *dwq = to_delayed_work(wq); | |
814 | struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); | |
815 | int inflight; | |
816 | ||
817 | inflight = page_pool_release(pool); | |
818 | if (!inflight) | |
819 | return; | |
820 | ||
821 | /* Periodic warning */ | |
822 | if (time_after_eq(jiffies, pool->defer_warn)) { | |
823 | int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; | |
824 | ||
825 | pr_warn("%s() stalled pool shutdown %d inflight %d sec\n", | |
826 | __func__, inflight, sec); | |
827 | pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; | |
828 | } | |
829 | ||
830 | /* Still not ready to be disconnected, retry later */ | |
831 | schedule_delayed_work(&pool->release_dw, DEFER_TIME); | |
832 | } | |
833 | ||
64693ec7 THJ |
834 | void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), |
835 | struct xdp_mem_info *mem) | |
c3f812ce JL |
836 | { |
837 | refcount_inc(&pool->user_cnt); | |
838 | pool->disconnect = disconnect; | |
64693ec7 | 839 | pool->xdp_mem_id = mem->id; |
c3f812ce JL |
840 | } |
841 | ||
dd64b232 JK |
842 | void page_pool_unlink_napi(struct page_pool *pool) |
843 | { | |
844 | if (!pool->p.napi) | |
845 | return; | |
846 | ||
847 | /* To avoid races with recycling and additional barriers make sure | |
848 | * pool and NAPI are unlinked when NAPI is disabled. | |
849 | */ | |
850 | WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) || | |
851 | READ_ONCE(pool->p.napi->list_owner) != -1); | |
852 | ||
853 | WRITE_ONCE(pool->p.napi, NULL); | |
854 | } | |
855 | EXPORT_SYMBOL(page_pool_unlink_napi); | |
856 | ||
c3f812ce JL |
857 | void page_pool_destroy(struct page_pool *pool) |
858 | { | |
859 | if (!pool) | |
860 | return; | |
861 | ||
862 | if (!page_pool_put(pool)) | |
863 | return; | |
864 | ||
dd64b232 | 865 | page_pool_unlink_napi(pool); |
53e0961d YL |
866 | page_pool_free_frag(pool); |
867 | ||
c3f812ce JL |
868 | if (!page_pool_release(pool)) |
869 | return; | |
870 | ||
871 | pool->defer_start = jiffies; | |
872 | pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; | |
ff7d6b27 | 873 | |
c3f812ce JL |
874 | INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); |
875 | schedule_delayed_work(&pool->release_dw, DEFER_TIME); | |
ff7d6b27 | 876 | } |
c3f812ce | 877 | EXPORT_SYMBOL(page_pool_destroy); |
bc836748 SM |
878 | |
879 | /* Caller must provide appropriate safe context, e.g. NAPI. */ | |
880 | void page_pool_update_nid(struct page_pool *pool, int new_nid) | |
881 | { | |
44768dec JDB |
882 | struct page *page; |
883 | ||
bc836748 SM |
884 | trace_page_pool_update_nid(pool, new_nid); |
885 | pool->p.nid = new_nid; | |
44768dec JDB |
886 | |
887 | /* Flush pool alloc cache, as refill will check NUMA node */ | |
888 | while (pool->alloc.count) { | |
889 | page = pool->alloc.cache[--pool->alloc.count]; | |
458de8a9 | 890 | page_pool_return_page(pool, page); |
44768dec | 891 | } |
bc836748 SM |
892 | } |
893 | EXPORT_SYMBOL(page_pool_update_nid); | |
6a5bcd84 | 894 | |
8c48eea3 | 895 | bool page_pool_return_skb_page(struct page *page, bool napi_safe) |
6a5bcd84 | 896 | { |
8c48eea3 | 897 | struct napi_struct *napi; |
6a5bcd84 | 898 | struct page_pool *pp; |
8c48eea3 | 899 | bool allow_direct; |
6a5bcd84 IA |
900 | |
901 | page = compound_head(page); | |
0fa32ca4 YL |
902 | |
903 | /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation | |
904 | * in order to preserve any existing bits, such as bit 0 for the | |
905 | * head page of compound page and bit 1 for pfmemalloc page, so | |
906 | * mask those bits for freeing side when doing below checking, | |
907 | * and page_is_pfmemalloc() is checked in __page_pool_put_page() | |
908 | * to avoid recycling the pfmemalloc page. | |
909 | */ | |
910 | if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE)) | |
6a5bcd84 IA |
911 | return false; |
912 | ||
913 | pp = page->pp; | |
914 | ||
8c48eea3 JK |
915 | /* Allow direct recycle if we have reasons to believe that we are |
916 | * in the same context as the consumer would run, so there's | |
917 | * no possible race. | |
918 | */ | |
dd64b232 | 919 | napi = READ_ONCE(pp->p.napi); |
8c48eea3 JK |
920 | allow_direct = napi_safe && napi && |
921 | READ_ONCE(napi->list_owner) == smp_processor_id(); | |
922 | ||
6a5bcd84 IA |
923 | /* Driver set this to memory recycling info. Reset it on recycle. |
924 | * This will *not* work for NIC using a split-page memory model. | |
925 | * The page will be returned to the pool here regardless of the | |
926 | * 'flipped' fragment being in use or not. | |
927 | */ | |
8c48eea3 | 928 | page_pool_put_full_page(pp, page, allow_direct); |
6a5bcd84 IA |
929 | |
930 | return true; | |
931 | } | |
932 | EXPORT_SYMBOL(page_pool_return_skb_page); |