net: mvneta: rely on page_pool_recycle_direct in mvneta_run_xdp
[linux-2.6-block.git] / net / core / page_pool.c
CommitLineData
ff7d6b27
JDB
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * page_pool.c
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
6 */
32c28f7e 7
ff7d6b27
JDB
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/slab.h>
f71fec47 11#include <linux/device.h>
ff7d6b27
JDB
12
13#include <net/page_pool.h>
14#include <linux/dma-direction.h>
15#include <linux/dma-mapping.h>
16#include <linux/page-flags.h>
17#include <linux/mm.h> /* for __put_page() */
18
32c28f7e
JDB
19#include <trace/events/page_pool.h>
20
c3f812ce
JL
21#define DEFER_TIME (msecs_to_jiffies(1000))
22#define DEFER_WARN_INTERVAL (60 * HZ)
23
ff7d6b27
JDB
24static int page_pool_init(struct page_pool *pool,
25 const struct page_pool_params *params)
26{
27 unsigned int ring_qsize = 1024; /* Default */
28
29 memcpy(&pool->p, params, sizeof(pool->p));
30
31 /* Validate only known flags were used */
32 if (pool->p.flags & ~(PP_FLAG_ALL))
33 return -EINVAL;
34
35 if (pool->p.pool_size)
36 ring_qsize = pool->p.pool_size;
37
38 /* Sanity limit mem that can be pinned down */
39 if (ring_qsize > 32768)
40 return -E2BIG;
41
42 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
43 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
44 * which is the XDP_TX use-case.
45 */
46 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
47 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
48 return -EINVAL;
49
50 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
51 return -ENOMEM;
52
99c07c43
JDB
53 atomic_set(&pool->pages_state_release_cnt, 0);
54
1da4bbef
IK
55 /* Driver calling page_pool_create() also call page_pool_destroy() */
56 refcount_set(&pool->user_cnt, 1);
57
f71fec47
JDB
58 if (pool->p.flags & PP_FLAG_DMA_MAP)
59 get_device(pool->p.dev);
60
ff7d6b27
JDB
61 return 0;
62}
63
64struct page_pool *page_pool_create(const struct page_pool_params *params)
65{
66 struct page_pool *pool;
873343e7 67 int err;
ff7d6b27
JDB
68
69 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
70 if (!pool)
71 return ERR_PTR(-ENOMEM);
72
73 err = page_pool_init(pool, params);
74 if (err < 0) {
75 pr_warn("%s() gave up with errno %d\n", __func__, err);
76 kfree(pool);
77 return ERR_PTR(err);
78 }
1da4bbef 79
ff7d6b27
JDB
80 return pool;
81}
82EXPORT_SYMBOL(page_pool_create);
83
84/* fast path */
85static struct page *__page_pool_get_cached(struct page_pool *pool)
86{
87 struct ptr_ring *r = &pool->ring;
8d73f8f2 88 bool refill = false;
ff7d6b27
JDB
89 struct page *page;
90
ff7d6b27
JDB
91 /* Test for safe-context, caller should provide this guarantee */
92 if (likely(in_serving_softirq())) {
93 if (likely(pool->alloc.count)) {
94 /* Fast-path */
95 page = pool->alloc.cache[--pool->alloc.count];
96 return page;
97 }
8d73f8f2 98 refill = true;
ff7d6b27
JDB
99 }
100
8d73f8f2
JL
101 /* Quicker fallback, avoid locks when ring is empty */
102 if (__ptr_ring_empty(r))
103 return NULL;
104
105 /* Slow-path: Get page from locked ring queue,
106 * refill alloc array if requested.
107 */
108 spin_lock(&r->consumer_lock);
109 page = __ptr_ring_consume(r);
110 if (refill)
111 pool->alloc.count = __ptr_ring_consume_batched(r,
112 pool->alloc.cache,
113 PP_ALLOC_CACHE_REFILL);
114 spin_unlock(&r->consumer_lock);
ff7d6b27
JDB
115 return page;
116}
117
118/* slow path */
119noinline
120static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
121 gfp_t _gfp)
122{
123 struct page *page;
124 gfp_t gfp = _gfp;
125 dma_addr_t dma;
126
127 /* We could always set __GFP_COMP, and avoid this branch, as
128 * prep_new_page() can handle order-0 with __GFP_COMP.
129 */
130 if (pool->p.order)
131 gfp |= __GFP_COMP;
132
133 /* FUTURE development:
134 *
135 * Current slow-path essentially falls back to single page
136 * allocations, which doesn't improve performance. This code
137 * need bulk allocation support from the page allocator code.
138 */
139
140 /* Cache was empty, do real allocation */
141 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
142 if (!page)
143 return NULL;
144
145 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
146 goto skip_dma_map;
147
1567b85e
IA
148 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
149 * since dma_addr_t can be either 32 or 64 bits and does not always fit
150 * into page private data (i.e 32bit cpu with 64bit DMA caps)
ff7d6b27
JDB
151 * This mapping is kept for lifetime of page, until leaving pool.
152 */
13f16d9d
JDB
153 dma = dma_map_page_attrs(pool->p.dev, page, 0,
154 (PAGE_SIZE << pool->p.order),
155 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
ff7d6b27
JDB
156 if (dma_mapping_error(pool->p.dev, dma)) {
157 put_page(page);
158 return NULL;
159 }
1567b85e 160 page->dma_addr = dma;
ff7d6b27
JDB
161
162skip_dma_map:
99c07c43
JDB
163 /* Track how many pages are held 'in-flight' */
164 pool->pages_state_hold_cnt++;
165
32c28f7e
JDB
166 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
167
ff7d6b27
JDB
168 /* When page just alloc'ed is should/must have refcnt 1. */
169 return page;
170}
171
172/* For using page_pool replace: alloc_pages() API calls, but provide
173 * synchronization guarantee for allocation side.
174 */
175struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
176{
177 struct page *page;
178
179 /* Fast-path: Get a page from cache */
180 page = __page_pool_get_cached(pool);
181 if (page)
182 return page;
183
184 /* Slow-path: cache empty, do real allocation */
185 page = __page_pool_alloc_pages_slow(pool, gfp);
186 return page;
187}
188EXPORT_SYMBOL(page_pool_alloc_pages);
189
99c07c43
JDB
190/* Calculate distance between two u32 values, valid if distance is below 2^(31)
191 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
192 */
193#define _distance(a, b) (s32)((a) - (b))
194
195static s32 page_pool_inflight(struct page_pool *pool)
196{
197 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
198 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
c3f812ce 199 s32 inflight;
99c07c43 200
c3f812ce 201 inflight = _distance(hold_cnt, release_cnt);
99c07c43 202
7c9e6942 203 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
99c07c43
JDB
204 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
205
c3f812ce 206 return inflight;
99c07c43
JDB
207}
208
ff7d6b27
JDB
209/* Cleanup page_pool state from page */
210static void __page_pool_clean_page(struct page_pool *pool,
211 struct page *page)
212{
1567b85e 213 dma_addr_t dma;
c3f812ce 214 int count;
1567b85e 215
ff7d6b27 216 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
99c07c43 217 goto skip_dma_unmap;
ff7d6b27 218
1567b85e 219 dma = page->dma_addr;
ff7d6b27 220 /* DMA unmap */
13f16d9d
JDB
221 dma_unmap_page_attrs(pool->p.dev, dma,
222 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
223 DMA_ATTR_SKIP_CPU_SYNC);
1567b85e 224 page->dma_addr = 0;
99c07c43 225skip_dma_unmap:
c3f812ce
JL
226 /* This may be the last page returned, releasing the pool, so
227 * it is not safe to reference pool afterwards.
228 */
229 count = atomic_inc_return(&pool->pages_state_release_cnt);
230 trace_page_pool_state_release(pool, page, count);
ff7d6b27
JDB
231}
232
a25d50bf
IA
233/* unmap the page and clean our state */
234void page_pool_unmap_page(struct page_pool *pool, struct page *page)
235{
99c07c43
JDB
236 /* When page is unmapped, this implies page will not be
237 * returned to page_pool.
238 */
a25d50bf
IA
239 __page_pool_clean_page(pool, page);
240}
241EXPORT_SYMBOL(page_pool_unmap_page);
242
ff7d6b27
JDB
243/* Return a page to the page allocator, cleaning up our state */
244static void __page_pool_return_page(struct page_pool *pool, struct page *page)
245{
246 __page_pool_clean_page(pool, page);
99c07c43 247
ff7d6b27
JDB
248 put_page(page);
249 /* An optimization would be to call __free_pages(page, pool->p.order)
250 * knowing page is not part of page-cache (thus avoiding a
251 * __page_cache_release() call).
252 */
253}
254
255static bool __page_pool_recycle_into_ring(struct page_pool *pool,
256 struct page *page)
257{
258 int ret;
259 /* BH protection not needed if current is serving softirq */
260 if (in_serving_softirq())
261 ret = ptr_ring_produce(&pool->ring, page);
262 else
263 ret = ptr_ring_produce_bh(&pool->ring, page);
264
265 return (ret == 0) ? true : false;
266}
267
268/* Only allow direct recycling in special circumstances, into the
269 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
270 *
271 * Caller must provide appropriate safe context.
272 */
273static bool __page_pool_recycle_direct(struct page *page,
274 struct page_pool *pool)
275{
276 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
277 return false;
278
279 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
280 pool->alloc.cache[pool->alloc.count++] = page;
281 return true;
282}
283
d5394610
SM
284/* page is NOT reusable when:
285 * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
286 * 2) belongs to a different NUMA node than pool->p.nid.
287 *
288 * To update pool->p.nid users must call page_pool_update_nid.
289 */
290static bool pool_page_reusable(struct page_pool *pool, struct page *page)
291{
292 return !page_is_pfmemalloc(page) && page_to_nid(page) == pool->p.nid;
293}
294
ff7d6b27
JDB
295void __page_pool_put_page(struct page_pool *pool,
296 struct page *page, bool allow_direct)
297{
298 /* This allocator is optimized for the XDP mode that uses
299 * one-frame-per-page, but have fallbacks that act like the
300 * regular page allocator APIs.
301 *
302 * refcnt == 1 means page_pool owns page, and can recycle it.
303 */
d5394610
SM
304 if (likely(page_ref_count(page) == 1 &&
305 pool_page_reusable(pool, page))) {
ff7d6b27
JDB
306 /* Read barrier done in page_ref_count / READ_ONCE */
307
308 if (allow_direct && in_serving_softirq())
309 if (__page_pool_recycle_direct(page, pool))
310 return;
311
312 if (!__page_pool_recycle_into_ring(pool, page)) {
313 /* Cache full, fallback to free pages */
314 __page_pool_return_page(pool, page);
315 }
316 return;
317 }
318 /* Fallback/non-XDP mode: API user have elevated refcnt.
319 *
320 * Many drivers split up the page into fragments, and some
321 * want to keep doing this to save memory and do refcnt based
322 * recycling. Support this use case too, to ease drivers
323 * switching between XDP/non-XDP.
324 *
325 * In-case page_pool maintains the DMA mapping, API user must
326 * call page_pool_put_page once. In this elevated refcnt
327 * case, the DMA is unmapped/released, as driver is likely
328 * doing refcnt based recycle tricks, meaning another process
329 * will be invoking put_page.
330 */
331 __page_pool_clean_page(pool, page);
332 put_page(page);
333}
334EXPORT_SYMBOL(__page_pool_put_page);
335
336static void __page_pool_empty_ring(struct page_pool *pool)
337{
338 struct page *page;
339
340 /* Empty recycle ring */
4905bd9a 341 while ((page = ptr_ring_consume_bh(&pool->ring))) {
ff7d6b27
JDB
342 /* Verify the refcnt invariant of cached pages */
343 if (!(page_ref_count(page) == 1))
344 pr_crit("%s() page_pool refcnt %d violation\n",
345 __func__, page_ref_count(page));
346
347 __page_pool_return_page(pool, page);
348 }
349}
350
c3f812ce 351static void page_pool_free(struct page_pool *pool)
d956a048 352{
c3f812ce
JL
353 if (pool->disconnect)
354 pool->disconnect(pool);
e54cfd7e
JDB
355
356 ptr_ring_cleanup(&pool->ring, NULL);
f71fec47
JDB
357
358 if (pool->p.flags & PP_FLAG_DMA_MAP)
359 put_device(pool->p.dev);
360
e54cfd7e
JDB
361 kfree(pool);
362}
e54cfd7e 363
7c9e6942 364static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
ff7d6b27
JDB
365{
366 struct page *page;
367
7c9e6942
JDB
368 if (pool->destroy_cnt)
369 return;
370
ff7d6b27
JDB
371 /* Empty alloc cache, assume caller made sure this is
372 * no-longer in use, and page_pool_alloc_pages() cannot be
373 * call concurrently.
374 */
375 while (pool->alloc.count) {
376 page = pool->alloc.cache[--pool->alloc.count];
377 __page_pool_return_page(pool, page);
378 }
7c9e6942
JDB
379}
380
381static void page_pool_scrub(struct page_pool *pool)
382{
383 page_pool_empty_alloc_cache_once(pool);
384 pool->destroy_cnt++;
ff7d6b27
JDB
385
386 /* No more consumers should exist, but producers could still
387 * be in-flight.
388 */
389 __page_pool_empty_ring(pool);
c3f812ce
JL
390}
391
392static int page_pool_release(struct page_pool *pool)
393{
394 int inflight;
395
396 page_pool_scrub(pool);
397 inflight = page_pool_inflight(pool);
398 if (!inflight)
399 page_pool_free(pool);
400
401 return inflight;
402}
403
404static void page_pool_release_retry(struct work_struct *wq)
405{
406 struct delayed_work *dwq = to_delayed_work(wq);
407 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
408 int inflight;
409
410 inflight = page_pool_release(pool);
411 if (!inflight)
412 return;
413
414 /* Periodic warning */
415 if (time_after_eq(jiffies, pool->defer_warn)) {
416 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
417
418 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
419 __func__, inflight, sec);
420 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
421 }
422
423 /* Still not ready to be disconnected, retry later */
424 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
425}
426
427void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
428{
429 refcount_inc(&pool->user_cnt);
430 pool->disconnect = disconnect;
431}
432
433void page_pool_destroy(struct page_pool *pool)
434{
435 if (!pool)
436 return;
437
438 if (!page_pool_put(pool))
439 return;
440
441 if (!page_pool_release(pool))
442 return;
443
444 pool->defer_start = jiffies;
445 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
ff7d6b27 446
c3f812ce
JL
447 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
448 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
ff7d6b27 449}
c3f812ce 450EXPORT_SYMBOL(page_pool_destroy);
bc836748
SM
451
452/* Caller must provide appropriate safe context, e.g. NAPI. */
453void page_pool_update_nid(struct page_pool *pool, int new_nid)
454{
455 trace_page_pool_update_nid(pool, new_nid);
456 pool->p.nid = new_nid;
457}
458EXPORT_SYMBOL(page_pool_update_nid);