Merge branch 'mlxsw-preparations-for-support-of-cff-flood-mode'
[linux-block.git] / net / core / page_pool.c
CommitLineData
ff7d6b27
JDB
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * page_pool.c
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
6 */
32c28f7e 7
ff7d6b27
JDB
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/slab.h>
f71fec47 11#include <linux/device.h>
ff7d6b27 12
a9ca9f9c 13#include <net/page_pool/helpers.h>
78862447
LB
14#include <net/xdp.h>
15
ff7d6b27
JDB
16#include <linux/dma-direction.h>
17#include <linux/dma-mapping.h>
18#include <linux/page-flags.h>
8d29c703 19#include <linux/mm.h> /* for put_page() */
c07aea3e 20#include <linux/poison.h>
f3c5264f 21#include <linux/ethtool.h>
8c48eea3 22#include <linux/netdevice.h>
ff7d6b27 23
32c28f7e
JDB
24#include <trace/events/page_pool.h>
25
c3f812ce
JL
26#define DEFER_TIME (msecs_to_jiffies(1000))
27#define DEFER_WARN_INTERVAL (60 * HZ)
28
53e0961d
YL
29#define BIAS_MAX LONG_MAX
30
8610037e
JD
31#ifdef CONFIG_PAGE_POOL_STATS
32/* alloc_stat_inc is intended to be used in softirq context */
33#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
ad6fa1e1
JD
34/* recycle_stat_inc is safe to use when preemption is possible. */
35#define recycle_stat_inc(pool, __stat) \
36 do { \
37 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
38 this_cpu_inc(s->__stat); \
39 } while (0)
6b95e338 40
590032a4
LB
41#define recycle_stat_add(pool, __stat, val) \
42 do { \
43 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
44 this_cpu_add(s->__stat, val); \
45 } while (0)
46
f3c5264f
LB
47static const char pp_stats[][ETH_GSTRING_LEN] = {
48 "rx_pp_alloc_fast",
49 "rx_pp_alloc_slow",
50 "rx_pp_alloc_slow_ho",
51 "rx_pp_alloc_empty",
52 "rx_pp_alloc_refill",
53 "rx_pp_alloc_waive",
54 "rx_pp_recycle_cached",
55 "rx_pp_recycle_cache_full",
56 "rx_pp_recycle_ring",
57 "rx_pp_recycle_ring_full",
58 "rx_pp_recycle_released_ref",
59};
60
82e896d9
JK
61/**
62 * page_pool_get_stats() - fetch page pool stats
63 * @pool: pool from which page was allocated
64 * @stats: struct page_pool_stats to fill in
65 *
66 * Retrieve statistics about the page_pool. This API is only available
67 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``.
68 * A pointer to a caller allocated struct page_pool_stats structure
69 * is passed to this API which is filled in. The caller can then report
70 * those stats to the user (perhaps via ethtool, debugfs, etc.).
71 */
6b95e338
JD
72bool page_pool_get_stats(struct page_pool *pool,
73 struct page_pool_stats *stats)
74{
75 int cpu = 0;
76
77 if (!stats)
78 return false;
79
f3c5264f
LB
80 /* The caller is responsible to initialize stats. */
81 stats->alloc_stats.fast += pool->alloc_stats.fast;
82 stats->alloc_stats.slow += pool->alloc_stats.slow;
83 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
84 stats->alloc_stats.empty += pool->alloc_stats.empty;
85 stats->alloc_stats.refill += pool->alloc_stats.refill;
86 stats->alloc_stats.waive += pool->alloc_stats.waive;
6b95e338
JD
87
88 for_each_possible_cpu(cpu) {
89 const struct page_pool_recycle_stats *pcpu =
90 per_cpu_ptr(pool->recycle_stats, cpu);
91
92 stats->recycle_stats.cached += pcpu->cached;
93 stats->recycle_stats.cache_full += pcpu->cache_full;
94 stats->recycle_stats.ring += pcpu->ring;
95 stats->recycle_stats.ring_full += pcpu->ring_full;
96 stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
97 }
98
99 return true;
100}
101EXPORT_SYMBOL(page_pool_get_stats);
f3c5264f
LB
102
103u8 *page_pool_ethtool_stats_get_strings(u8 *data)
104{
105 int i;
106
107 for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
108 memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
109 data += ETH_GSTRING_LEN;
110 }
111
112 return data;
113}
114EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
115
116int page_pool_ethtool_stats_get_count(void)
117{
118 return ARRAY_SIZE(pp_stats);
119}
120EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
121
122u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
123{
124 struct page_pool_stats *pool_stats = stats;
125
126 *data++ = pool_stats->alloc_stats.fast;
127 *data++ = pool_stats->alloc_stats.slow;
128 *data++ = pool_stats->alloc_stats.slow_high_order;
129 *data++ = pool_stats->alloc_stats.empty;
130 *data++ = pool_stats->alloc_stats.refill;
131 *data++ = pool_stats->alloc_stats.waive;
132 *data++ = pool_stats->recycle_stats.cached;
133 *data++ = pool_stats->recycle_stats.cache_full;
134 *data++ = pool_stats->recycle_stats.ring;
135 *data++ = pool_stats->recycle_stats.ring_full;
136 *data++ = pool_stats->recycle_stats.released_refcnt;
137
138 return data;
139}
140EXPORT_SYMBOL(page_pool_ethtool_stats_get);
141
8610037e
JD
142#else
143#define alloc_stat_inc(pool, __stat)
ad6fa1e1 144#define recycle_stat_inc(pool, __stat)
590032a4 145#define recycle_stat_add(pool, __stat, val)
8610037e
JD
146#endif
147
368d3cb4
YL
148static bool page_pool_producer_lock(struct page_pool *pool)
149 __acquires(&pool->ring.producer_lock)
150{
151 bool in_softirq = in_softirq();
152
153 if (in_softirq)
154 spin_lock(&pool->ring.producer_lock);
155 else
156 spin_lock_bh(&pool->ring.producer_lock);
157
158 return in_softirq;
159}
160
161static void page_pool_producer_unlock(struct page_pool *pool,
162 bool in_softirq)
163 __releases(&pool->ring.producer_lock)
164{
165 if (in_softirq)
166 spin_unlock(&pool->ring.producer_lock);
167 else
168 spin_unlock_bh(&pool->ring.producer_lock);
169}
170
ff7d6b27
JDB
171static int page_pool_init(struct page_pool *pool,
172 const struct page_pool_params *params)
173{
174 unsigned int ring_qsize = 1024; /* Default */
175
176 memcpy(&pool->p, params, sizeof(pool->p));
177
178 /* Validate only known flags were used */
179 if (pool->p.flags & ~(PP_FLAG_ALL))
180 return -EINVAL;
181
182 if (pool->p.pool_size)
183 ring_qsize = pool->p.pool_size;
184
185 /* Sanity limit mem that can be pinned down */
186 if (ring_qsize > 32768)
187 return -E2BIG;
188
189 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
190 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
191 * which is the XDP_TX use-case.
192 */
798dda81
DK
193 if (pool->p.flags & PP_FLAG_DMA_MAP) {
194 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
195 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
196 return -EINVAL;
197 }
ff7d6b27 198
e68bc756
LB
199 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
200 /* In order to request DMA-sync-for-device the page
201 * needs to be mapped
202 */
203 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
204 return -EINVAL;
205
206 if (!pool->p.max_len)
207 return -EINVAL;
208
209 /* pool->p.offset has to be set according to the address
210 * offset used by the DMA engine to start copying rx data
211 */
212 }
213
ad6fa1e1
JD
214#ifdef CONFIG_PAGE_POOL_STATS
215 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
216 if (!pool->recycle_stats)
217 return -ENOMEM;
218#endif
219
8ffbd166
JS
220 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
221#ifdef CONFIG_PAGE_POOL_STATS
222 free_percpu(pool->recycle_stats);
223#endif
ff7d6b27 224 return -ENOMEM;
8ffbd166 225 }
ff7d6b27 226
99c07c43
JDB
227 atomic_set(&pool->pages_state_release_cnt, 0);
228
1da4bbef
IK
229 /* Driver calling page_pool_create() also call page_pool_destroy() */
230 refcount_set(&pool->user_cnt, 1);
231
f71fec47
JDB
232 if (pool->p.flags & PP_FLAG_DMA_MAP)
233 get_device(pool->p.dev);
234
ff7d6b27
JDB
235 return 0;
236}
237
82e896d9
JK
238/**
239 * page_pool_create() - create a page pool.
240 * @params: parameters, see struct page_pool_params
241 */
ff7d6b27
JDB
242struct page_pool *page_pool_create(const struct page_pool_params *params)
243{
244 struct page_pool *pool;
873343e7 245 int err;
ff7d6b27
JDB
246
247 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
248 if (!pool)
249 return ERR_PTR(-ENOMEM);
250
251 err = page_pool_init(pool, params);
252 if (err < 0) {
253 pr_warn("%s() gave up with errno %d\n", __func__, err);
254 kfree(pool);
255 return ERR_PTR(err);
256 }
1da4bbef 257
ff7d6b27
JDB
258 return pool;
259}
260EXPORT_SYMBOL(page_pool_create);
261
458de8a9 262static void page_pool_return_page(struct page_pool *pool, struct page *page);
44768dec
JDB
263
264noinline
304db6cb 265static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
44768dec
JDB
266{
267 struct ptr_ring *r = &pool->ring;
268 struct page *page;
269 int pref_nid; /* preferred NUMA node */
270
271 /* Quicker fallback, avoid locks when ring is empty */
8610037e
JD
272 if (__ptr_ring_empty(r)) {
273 alloc_stat_inc(pool, empty);
44768dec 274 return NULL;
8610037e 275 }
44768dec
JDB
276
277 /* Softirq guarantee CPU and thus NUMA node is stable. This,
278 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
279 */
f13fc107 280#ifdef CONFIG_NUMA
44768dec 281 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
f13fc107
JDB
282#else
283 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
284 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
285#endif
44768dec 286
44768dec
JDB
287 /* Refill alloc array, but only if NUMA match */
288 do {
289 page = __ptr_ring_consume(r);
290 if (unlikely(!page))
291 break;
292
293 if (likely(page_to_nid(page) == pref_nid)) {
294 pool->alloc.cache[pool->alloc.count++] = page;
295 } else {
296 /* NUMA mismatch;
297 * (1) release 1 page to page-allocator and
298 * (2) break out to fallthrough to alloc_pages_node.
299 * This limit stress on page buddy alloactor.
300 */
458de8a9 301 page_pool_return_page(pool, page);
8610037e 302 alloc_stat_inc(pool, waive);
44768dec
JDB
303 page = NULL;
304 break;
305 }
304db6cb 306 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
44768dec
JDB
307
308 /* Return last page */
8610037e 309 if (likely(pool->alloc.count > 0)) {
44768dec 310 page = pool->alloc.cache[--pool->alloc.count];
8610037e
JD
311 alloc_stat_inc(pool, refill);
312 }
44768dec 313
44768dec
JDB
314 return page;
315}
316
ff7d6b27
JDB
317/* fast path */
318static struct page *__page_pool_get_cached(struct page_pool *pool)
319{
ff7d6b27
JDB
320 struct page *page;
321
304db6cb
LR
322 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
323 if (likely(pool->alloc.count)) {
324 /* Fast-path */
325 page = pool->alloc.cache[--pool->alloc.count];
8610037e 326 alloc_stat_inc(pool, fast);
304db6cb
LR
327 } else {
328 page = page_pool_refill_alloc_cache(pool);
ff7d6b27
JDB
329 }
330
ff7d6b27
JDB
331 return page;
332}
333
e68bc756
LB
334static void page_pool_dma_sync_for_device(struct page_pool *pool,
335 struct page *page,
336 unsigned int dma_sync_size)
337{
9ddb3c14
MWO
338 dma_addr_t dma_addr = page_pool_get_dma_addr(page);
339
e68bc756 340 dma_sync_size = min(dma_sync_size, pool->p.max_len);
9ddb3c14 341 dma_sync_single_range_for_device(pool->p.dev, dma_addr,
e68bc756
LB
342 pool->p.offset, dma_sync_size,
343 pool->p.dma_dir);
344}
345
dfa59717
JDB
346static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
347{
348 dma_addr_t dma;
349
350 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
351 * since dma_addr_t can be either 32 or 64 bits and does not always fit
352 * into page private data (i.e 32bit cpu with 64bit DMA caps)
353 * This mapping is kept for lifetime of page, until leaving pool.
354 */
355 dma = dma_map_page_attrs(pool->p.dev, page, 0,
356 (PAGE_SIZE << pool->p.order),
8e4c62c7
JK
357 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC |
358 DMA_ATTR_WEAK_ORDERING);
dfa59717
JDB
359 if (dma_mapping_error(pool->p.dev, dma))
360 return false;
361
90de47f0
YL
362 if (page_pool_set_dma_addr(page, dma))
363 goto unmap_failed;
dfa59717
JDB
364
365 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
366 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
367
368 return true;
90de47f0
YL
369
370unmap_failed:
371 WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
372 dma_unmap_page_attrs(pool->p.dev, dma,
373 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
374 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
375 return false;
dfa59717
JDB
376}
377
57f05bc2
YL
378static void page_pool_set_pp_info(struct page_pool *pool,
379 struct page *page)
380{
381 page->pp = pool;
382 page->pp_magic |= PP_SIGNATURE;
58d53d8f
YL
383
384 /* Ensuring all pages have been split into one fragment initially:
385 * page_pool_set_pp_info() is only called once for every page when it
386 * is allocated from the page allocator and page_pool_fragment_page()
387 * is dirtying the same cache line as the page->pp_magic above, so
388 * the overhead is negligible.
389 */
390 page_pool_fragment_page(page, 1);
35b2e549
THJ
391 if (pool->p.init_callback)
392 pool->p.init_callback(page, pool->p.init_arg);
57f05bc2
YL
393}
394
395static void page_pool_clear_pp_info(struct page *page)
396{
397 page->pp_magic = 0;
398 page->pp = NULL;
399}
400
be5dba25
JDB
401static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
402 gfp_t gfp)
ff7d6b27
JDB
403{
404 struct page *page;
ff7d6b27 405
be5dba25 406 gfp |= __GFP_COMP;
ff7d6b27 407 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
be5dba25 408 if (unlikely(!page))
ff7d6b27
JDB
409 return NULL;
410
be5dba25 411 if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
dfa59717 412 unlikely(!page_pool_dma_map(pool, page))) {
ff7d6b27
JDB
413 put_page(page);
414 return NULL;
415 }
ff7d6b27 416
8610037e 417 alloc_stat_inc(pool, slow_high_order);
57f05bc2 418 page_pool_set_pp_info(pool, page);
c07aea3e 419
99c07c43
JDB
420 /* Track how many pages are held 'in-flight' */
421 pool->pages_state_hold_cnt++;
32c28f7e 422 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
be5dba25
JDB
423 return page;
424}
425
426/* slow path */
427noinline
428static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
429 gfp_t gfp)
430{
431 const int bulk = PP_ALLOC_CACHE_REFILL;
432 unsigned int pp_flags = pool->p.flags;
433 unsigned int pp_order = pool->p.order;
434 struct page *page;
435 int i, nr_pages;
436
437 /* Don't support bulk alloc for high-order pages */
438 if (unlikely(pp_order))
439 return __page_pool_alloc_page_order(pool, gfp);
440
441 /* Unnecessary as alloc cache is empty, but guarantees zero count */
442 if (unlikely(pool->alloc.count > 0))
443 return pool->alloc.cache[--pool->alloc.count];
444
445 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
446 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
447
d810d367
JW
448 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
449 pool->alloc.cache);
be5dba25
JDB
450 if (unlikely(!nr_pages))
451 return NULL;
452
453 /* Pages have been filled into alloc.cache array, but count is zero and
454 * page element have not been (possibly) DMA mapped.
455 */
456 for (i = 0; i < nr_pages; i++) {
457 page = pool->alloc.cache[i];
458 if ((pp_flags & PP_FLAG_DMA_MAP) &&
459 unlikely(!page_pool_dma_map(pool, page))) {
460 put_page(page);
461 continue;
462 }
57f05bc2
YL
463
464 page_pool_set_pp_info(pool, page);
be5dba25
JDB
465 pool->alloc.cache[pool->alloc.count++] = page;
466 /* Track how many pages are held 'in-flight' */
467 pool->pages_state_hold_cnt++;
468 trace_page_pool_state_hold(pool, page,
469 pool->pages_state_hold_cnt);
470 }
471
472 /* Return last page */
8610037e 473 if (likely(pool->alloc.count > 0)) {
be5dba25 474 page = pool->alloc.cache[--pool->alloc.count];
8610037e
JD
475 alloc_stat_inc(pool, slow);
476 } else {
be5dba25 477 page = NULL;
8610037e 478 }
32c28f7e 479
ff7d6b27
JDB
480 /* When page just alloc'ed is should/must have refcnt 1. */
481 return page;
482}
483
484/* For using page_pool replace: alloc_pages() API calls, but provide
485 * synchronization guarantee for allocation side.
486 */
487struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
488{
489 struct page *page;
490
491 /* Fast-path: Get a page from cache */
492 page = __page_pool_get_cached(pool);
493 if (page)
494 return page;
495
496 /* Slow-path: cache empty, do real allocation */
497 page = __page_pool_alloc_pages_slow(pool, gfp);
498 return page;
499}
500EXPORT_SYMBOL(page_pool_alloc_pages);
501
99c07c43
JDB
502/* Calculate distance between two u32 values, valid if distance is below 2^(31)
503 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
504 */
505#define _distance(a, b) (s32)((a) - (b))
506
507static s32 page_pool_inflight(struct page_pool *pool)
508{
509 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
510 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
c3f812ce 511 s32 inflight;
99c07c43 512
c3f812ce 513 inflight = _distance(hold_cnt, release_cnt);
99c07c43 514
7c9e6942 515 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
99c07c43
JDB
516 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
517
c3f812ce 518 return inflight;
99c07c43
JDB
519}
520
458de8a9
IA
521/* Disconnects a page (from a page_pool). API users can have a need
522 * to disconnect a page (from a page_pool), to allow it to be used as
523 * a regular page (that will eventually be returned to the normal
524 * page-allocator via put_page).
525 */
07e0c7d3 526static void page_pool_return_page(struct page_pool *pool, struct page *page)
ff7d6b27 527{
1567b85e 528 dma_addr_t dma;
c3f812ce 529 int count;
1567b85e 530
ff7d6b27 531 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
458de8a9
IA
532 /* Always account for inflight pages, even if we didn't
533 * map them
534 */
99c07c43 535 goto skip_dma_unmap;
ff7d6b27 536
9ddb3c14 537 dma = page_pool_get_dma_addr(page);
458de8a9 538
9ddb3c14 539 /* When page is unmapped, it cannot be returned to our pool */
13f16d9d
JDB
540 dma_unmap_page_attrs(pool->p.dev, dma,
541 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
8e4c62c7 542 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
9ddb3c14 543 page_pool_set_dma_addr(page, 0);
99c07c43 544skip_dma_unmap:
57f05bc2 545 page_pool_clear_pp_info(page);
c07aea3e 546
c3f812ce
JL
547 /* This may be the last page returned, releasing the pool, so
548 * it is not safe to reference pool afterwards.
549 */
7fb9b66d 550 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
c3f812ce 551 trace_page_pool_state_release(pool, page, count);
99c07c43 552
ff7d6b27
JDB
553 put_page(page);
554 /* An optimization would be to call __free_pages(page, pool->p.order)
555 * knowing page is not part of page-cache (thus avoiding a
556 * __page_cache_release() call).
557 */
558}
559
458de8a9 560static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
ff7d6b27
JDB
561{
562 int ret;
542bcea4
QD
563 /* BH protection not needed if current is softirq */
564 if (in_softirq())
ff7d6b27
JDB
565 ret = ptr_ring_produce(&pool->ring, page);
566 else
567 ret = ptr_ring_produce_bh(&pool->ring, page);
568
ad6fa1e1
JD
569 if (!ret) {
570 recycle_stat_inc(pool, ring);
571 return true;
572 }
573
574 return false;
ff7d6b27
JDB
575}
576
577/* Only allow direct recycling in special circumstances, into the
578 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
579 *
580 * Caller must provide appropriate safe context.
581 */
458de8a9 582static bool page_pool_recycle_in_cache(struct page *page,
ff7d6b27
JDB
583 struct page_pool *pool)
584{
ad6fa1e1
JD
585 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
586 recycle_stat_inc(pool, cache_full);
ff7d6b27 587 return false;
ad6fa1e1 588 }
ff7d6b27
JDB
589
590 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
591 pool->alloc.cache[pool->alloc.count++] = page;
ad6fa1e1 592 recycle_stat_inc(pool, cached);
ff7d6b27
JDB
593 return true;
594}
595
458de8a9
IA
596/* If the page refcnt == 1, this will try to recycle the page.
597 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
598 * the configured size min(dma_sync_size, pool->max_len).
599 * If the page refcnt != 1, then the page will be returned to memory
600 * subsystem.
601 */
78862447
LB
602static __always_inline struct page *
603__page_pool_put_page(struct page_pool *pool, struct page *page,
604 unsigned int dma_sync_size, bool allow_direct)
ff7d6b27 605{
ff4e538c
JK
606 lockdep_assert_no_hardirq();
607
ff7d6b27
JDB
608 /* This allocator is optimized for the XDP mode that uses
609 * one-frame-per-page, but have fallbacks that act like the
610 * regular page allocator APIs.
611 *
612 * refcnt == 1 means page_pool owns page, and can recycle it.
05656132
AL
613 *
614 * page is NOT reusable when allocated when system is under
615 * some pressure. (page_is_pfmemalloc)
ff7d6b27 616 */
05656132 617 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
ff7d6b27
JDB
618 /* Read barrier done in page_ref_count / READ_ONCE */
619
e68bc756
LB
620 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
621 page_pool_dma_sync_for_device(pool, page,
622 dma_sync_size);
623
542bcea4 624 if (allow_direct && in_softirq() &&
78862447
LB
625 page_pool_recycle_in_cache(page, pool))
626 return NULL;
ff7d6b27 627
78862447
LB
628 /* Page found as candidate for recycling */
629 return page;
ff7d6b27
JDB
630 }
631 /* Fallback/non-XDP mode: API user have elevated refcnt.
632 *
633 * Many drivers split up the page into fragments, and some
634 * want to keep doing this to save memory and do refcnt based
635 * recycling. Support this use case too, to ease drivers
636 * switching between XDP/non-XDP.
637 *
638 * In-case page_pool maintains the DMA mapping, API user must
639 * call page_pool_put_page once. In this elevated refcnt
640 * case, the DMA is unmapped/released, as driver is likely
641 * doing refcnt based recycle tricks, meaning another process
642 * will be invoking put_page.
643 */
ad6fa1e1 644 recycle_stat_inc(pool, released_refcnt);
07e0c7d3 645 page_pool_return_page(pool, page);
78862447
LB
646
647 return NULL;
648}
649
52cc6ffc
AD
650void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
651 unsigned int dma_sync_size, bool allow_direct)
78862447
LB
652{
653 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
654 if (page && !page_pool_recycle_in_ring(pool, page)) {
655 /* Cache full, fallback to free pages */
ad6fa1e1 656 recycle_stat_inc(pool, ring_full);
78862447
LB
657 page_pool_return_page(pool, page);
658 }
ff7d6b27 659}
52cc6ffc 660EXPORT_SYMBOL(page_pool_put_defragged_page);
ff7d6b27 661
82e896d9
JK
662/**
663 * page_pool_put_page_bulk() - release references on multiple pages
664 * @pool: pool from which pages were allocated
665 * @data: array holding page pointers
666 * @count: number of pages in @data
667 *
668 * Tries to refill a number of pages into the ptr_ring cache holding ptr_ring
669 * producer lock. If the ptr_ring is full, page_pool_put_page_bulk()
670 * will release leftover pages to the page allocator.
671 * page_pool_put_page_bulk() is suitable to be run inside the driver NAPI tx
672 * completion loop for the XDP_REDIRECT use case.
673 *
674 * Please note the caller must not use data area after running
675 * page_pool_put_page_bulk(), as this function overwrites it.
676 */
78862447
LB
677void page_pool_put_page_bulk(struct page_pool *pool, void **data,
678 int count)
679{
680 int i, bulk_len = 0;
368d3cb4 681 bool in_softirq;
78862447
LB
682
683 for (i = 0; i < count; i++) {
684 struct page *page = virt_to_head_page(data[i]);
685
52cc6ffc 686 /* It is not the last user for the page frag case */
58d53d8f 687 if (!page_pool_is_last_frag(page))
52cc6ffc
AD
688 continue;
689
78862447
LB
690 page = __page_pool_put_page(pool, page, -1, false);
691 /* Approved for bulk recycling in ptr_ring cache */
692 if (page)
693 data[bulk_len++] = page;
694 }
695
696 if (unlikely(!bulk_len))
697 return;
698
699 /* Bulk producer into ptr_ring page_pool cache */
368d3cb4 700 in_softirq = page_pool_producer_lock(pool);
78862447 701 for (i = 0; i < bulk_len; i++) {
590032a4
LB
702 if (__ptr_ring_produce(&pool->ring, data[i])) {
703 /* ring full */
704 recycle_stat_inc(pool, ring_full);
705 break;
706 }
78862447 707 }
590032a4 708 recycle_stat_add(pool, ring, i);
368d3cb4 709 page_pool_producer_unlock(pool, in_softirq);
78862447
LB
710
711 /* Hopefully all pages was return into ptr_ring */
712 if (likely(i == bulk_len))
713 return;
714
715 /* ptr_ring cache full, free remaining pages outside producer lock
716 * since put_page() with refcnt == 1 can be an expensive operation
717 */
718 for (; i < bulk_len; i++)
719 page_pool_return_page(pool, data[i]);
720}
721EXPORT_SYMBOL(page_pool_put_page_bulk);
722
53e0961d
YL
723static struct page *page_pool_drain_frag(struct page_pool *pool,
724 struct page *page)
725{
726 long drain_count = BIAS_MAX - pool->frag_users;
727
728 /* Some user is still using the page frag */
52cc6ffc 729 if (likely(page_pool_defrag_page(page, drain_count)))
53e0961d
YL
730 return NULL;
731
732 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
733 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
734 page_pool_dma_sync_for_device(pool, page, -1);
735
736 return page;
737 }
738
739 page_pool_return_page(pool, page);
740 return NULL;
741}
742
743static void page_pool_free_frag(struct page_pool *pool)
744{
745 long drain_count = BIAS_MAX - pool->frag_users;
746 struct page *page = pool->frag_page;
747
748 pool->frag_page = NULL;
749
52cc6ffc 750 if (!page || page_pool_defrag_page(page, drain_count))
53e0961d
YL
751 return;
752
753 page_pool_return_page(pool, page);
754}
755
756struct page *page_pool_alloc_frag(struct page_pool *pool,
757 unsigned int *offset,
758 unsigned int size, gfp_t gfp)
759{
760 unsigned int max_size = PAGE_SIZE << pool->p.order;
761 struct page *page = pool->frag_page;
762
09d96ee5 763 if (WARN_ON(size > max_size))
53e0961d
YL
764 return NULL;
765
766 size = ALIGN(size, dma_get_cache_alignment());
767 *offset = pool->frag_offset;
768
769 if (page && *offset + size > max_size) {
770 page = page_pool_drain_frag(pool, page);
0f6deac3
JW
771 if (page) {
772 alloc_stat_inc(pool, fast);
53e0961d 773 goto frag_reset;
0f6deac3 774 }
53e0961d
YL
775 }
776
777 if (!page) {
778 page = page_pool_alloc_pages(pool, gfp);
779 if (unlikely(!page)) {
780 pool->frag_page = NULL;
781 return NULL;
782 }
783
784 pool->frag_page = page;
785
786frag_reset:
787 pool->frag_users = 1;
788 *offset = 0;
789 pool->frag_offset = size;
52cc6ffc 790 page_pool_fragment_page(page, BIAS_MAX);
53e0961d
YL
791 return page;
792 }
793
794 pool->frag_users++;
795 pool->frag_offset = *offset + size;
0f6deac3 796 alloc_stat_inc(pool, fast);
53e0961d
YL
797 return page;
798}
799EXPORT_SYMBOL(page_pool_alloc_frag);
800
458de8a9 801static void page_pool_empty_ring(struct page_pool *pool)
ff7d6b27
JDB
802{
803 struct page *page;
804
805 /* Empty recycle ring */
4905bd9a 806 while ((page = ptr_ring_consume_bh(&pool->ring))) {
ff7d6b27
JDB
807 /* Verify the refcnt invariant of cached pages */
808 if (!(page_ref_count(page) == 1))
809 pr_crit("%s() page_pool refcnt %d violation\n",
810 __func__, page_ref_count(page));
811
458de8a9 812 page_pool_return_page(pool, page);
ff7d6b27
JDB
813 }
814}
815
de97502e 816static void __page_pool_destroy(struct page_pool *pool)
d956a048 817{
c3f812ce
JL
818 if (pool->disconnect)
819 pool->disconnect(pool);
e54cfd7e
JDB
820
821 ptr_ring_cleanup(&pool->ring, NULL);
f71fec47
JDB
822
823 if (pool->p.flags & PP_FLAG_DMA_MAP)
824 put_device(pool->p.dev);
825
ad6fa1e1
JD
826#ifdef CONFIG_PAGE_POOL_STATS
827 free_percpu(pool->recycle_stats);
828#endif
e54cfd7e
JDB
829 kfree(pool);
830}
e54cfd7e 831
7c9e6942 832static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
ff7d6b27
JDB
833{
834 struct page *page;
835
7c9e6942
JDB
836 if (pool->destroy_cnt)
837 return;
838
ff7d6b27
JDB
839 /* Empty alloc cache, assume caller made sure this is
840 * no-longer in use, and page_pool_alloc_pages() cannot be
841 * call concurrently.
842 */
843 while (pool->alloc.count) {
844 page = pool->alloc.cache[--pool->alloc.count];
458de8a9 845 page_pool_return_page(pool, page);
ff7d6b27 846 }
7c9e6942
JDB
847}
848
849static void page_pool_scrub(struct page_pool *pool)
850{
851 page_pool_empty_alloc_cache_once(pool);
852 pool->destroy_cnt++;
ff7d6b27
JDB
853
854 /* No more consumers should exist, but producers could still
855 * be in-flight.
856 */
458de8a9 857 page_pool_empty_ring(pool);
c3f812ce
JL
858}
859
860static int page_pool_release(struct page_pool *pool)
861{
862 int inflight;
863
864 page_pool_scrub(pool);
865 inflight = page_pool_inflight(pool);
866 if (!inflight)
de97502e 867 __page_pool_destroy(pool);
c3f812ce
JL
868
869 return inflight;
870}
871
872static void page_pool_release_retry(struct work_struct *wq)
873{
874 struct delayed_work *dwq = to_delayed_work(wq);
875 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
876 int inflight;
877
878 inflight = page_pool_release(pool);
879 if (!inflight)
880 return;
881
882 /* Periodic warning */
883 if (time_after_eq(jiffies, pool->defer_warn)) {
884 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
885
886 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
887 __func__, inflight, sec);
888 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
889 }
890
891 /* Still not ready to be disconnected, retry later */
892 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
893}
894
64693ec7
THJ
895void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
896 struct xdp_mem_info *mem)
c3f812ce
JL
897{
898 refcount_inc(&pool->user_cnt);
899 pool->disconnect = disconnect;
64693ec7 900 pool->xdp_mem_id = mem->id;
c3f812ce
JL
901}
902
dd64b232
JK
903void page_pool_unlink_napi(struct page_pool *pool)
904{
905 if (!pool->p.napi)
906 return;
907
908 /* To avoid races with recycling and additional barriers make sure
909 * pool and NAPI are unlinked when NAPI is disabled.
910 */
911 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
912 READ_ONCE(pool->p.napi->list_owner) != -1);
913
914 WRITE_ONCE(pool->p.napi, NULL);
915}
916EXPORT_SYMBOL(page_pool_unlink_napi);
917
c3f812ce
JL
918void page_pool_destroy(struct page_pool *pool)
919{
920 if (!pool)
921 return;
922
923 if (!page_pool_put(pool))
924 return;
925
dd64b232 926 page_pool_unlink_napi(pool);
53e0961d
YL
927 page_pool_free_frag(pool);
928
c3f812ce
JL
929 if (!page_pool_release(pool))
930 return;
931
932 pool->defer_start = jiffies;
933 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
ff7d6b27 934
c3f812ce
JL
935 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
936 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
ff7d6b27 937}
c3f812ce 938EXPORT_SYMBOL(page_pool_destroy);
bc836748
SM
939
940/* Caller must provide appropriate safe context, e.g. NAPI. */
941void page_pool_update_nid(struct page_pool *pool, int new_nid)
942{
44768dec
JDB
943 struct page *page;
944
bc836748
SM
945 trace_page_pool_update_nid(pool, new_nid);
946 pool->p.nid = new_nid;
44768dec
JDB
947
948 /* Flush pool alloc cache, as refill will check NUMA node */
949 while (pool->alloc.count) {
950 page = pool->alloc.cache[--pool->alloc.count];
458de8a9 951 page_pool_return_page(pool, page);
44768dec 952 }
bc836748
SM
953}
954EXPORT_SYMBOL(page_pool_update_nid);