1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
13 #include <net/page_pool.h>
16 #include <linux/dma-direction.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/page-flags.h>
19 #include <linux/mm.h> /* for put_page() */
20 #include <linux/poison.h>
21 #include <linux/ethtool.h>
23 #include <trace/events/page_pool.h>
25 #define DEFER_TIME (msecs_to_jiffies(1000))
26 #define DEFER_WARN_INTERVAL (60 * HZ)
28 #define BIAS_MAX LONG_MAX
30 #ifdef CONFIG_PAGE_POOL_STATS
31 /* alloc_stat_inc is intended to be used in softirq context */
32 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
33 /* recycle_stat_inc is safe to use when preemption is possible. */
34 #define recycle_stat_inc(pool, __stat) \
36 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
37 this_cpu_inc(s->__stat); \
40 #define recycle_stat_add(pool, __stat, val) \
42 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
43 this_cpu_add(s->__stat, val); \
46 static const char pp_stats[][ETH_GSTRING_LEN] = {
49 "rx_pp_alloc_slow_ho",
53 "rx_pp_recycle_cached",
54 "rx_pp_recycle_cache_full",
56 "rx_pp_recycle_ring_full",
57 "rx_pp_recycle_released_ref",
60 bool page_pool_get_stats(struct page_pool *pool,
61 struct page_pool_stats *stats)
68 /* The caller is responsible to initialize stats. */
69 stats->alloc_stats.fast += pool->alloc_stats.fast;
70 stats->alloc_stats.slow += pool->alloc_stats.slow;
71 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
72 stats->alloc_stats.empty += pool->alloc_stats.empty;
73 stats->alloc_stats.refill += pool->alloc_stats.refill;
74 stats->alloc_stats.waive += pool->alloc_stats.waive;
76 for_each_possible_cpu(cpu) {
77 const struct page_pool_recycle_stats *pcpu =
78 per_cpu_ptr(pool->recycle_stats, cpu);
80 stats->recycle_stats.cached += pcpu->cached;
81 stats->recycle_stats.cache_full += pcpu->cache_full;
82 stats->recycle_stats.ring += pcpu->ring;
83 stats->recycle_stats.ring_full += pcpu->ring_full;
84 stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
89 EXPORT_SYMBOL(page_pool_get_stats);
91 u8 *page_pool_ethtool_stats_get_strings(u8 *data)
95 for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
96 memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
97 data += ETH_GSTRING_LEN;
102 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
104 int page_pool_ethtool_stats_get_count(void)
106 return ARRAY_SIZE(pp_stats);
108 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
110 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
112 struct page_pool_stats *pool_stats = stats;
114 *data++ = pool_stats->alloc_stats.fast;
115 *data++ = pool_stats->alloc_stats.slow;
116 *data++ = pool_stats->alloc_stats.slow_high_order;
117 *data++ = pool_stats->alloc_stats.empty;
118 *data++ = pool_stats->alloc_stats.refill;
119 *data++ = pool_stats->alloc_stats.waive;
120 *data++ = pool_stats->recycle_stats.cached;
121 *data++ = pool_stats->recycle_stats.cache_full;
122 *data++ = pool_stats->recycle_stats.ring;
123 *data++ = pool_stats->recycle_stats.ring_full;
124 *data++ = pool_stats->recycle_stats.released_refcnt;
128 EXPORT_SYMBOL(page_pool_ethtool_stats_get);
131 #define alloc_stat_inc(pool, __stat)
132 #define recycle_stat_inc(pool, __stat)
133 #define recycle_stat_add(pool, __stat, val)
136 static bool page_pool_producer_lock(struct page_pool *pool)
137 __acquires(&pool->ring.producer_lock)
139 bool in_softirq = in_softirq();
142 spin_lock(&pool->ring.producer_lock);
144 spin_lock_bh(&pool->ring.producer_lock);
149 static void page_pool_producer_unlock(struct page_pool *pool,
151 __releases(&pool->ring.producer_lock)
154 spin_unlock(&pool->ring.producer_lock);
156 spin_unlock_bh(&pool->ring.producer_lock);
159 static int page_pool_init(struct page_pool *pool,
160 const struct page_pool_params *params)
162 unsigned int ring_qsize = 1024; /* Default */
164 memcpy(&pool->p, params, sizeof(pool->p));
166 /* Validate only known flags were used */
167 if (pool->p.flags & ~(PP_FLAG_ALL))
170 if (pool->p.pool_size)
171 ring_qsize = pool->p.pool_size;
173 /* Sanity limit mem that can be pinned down */
174 if (ring_qsize > 32768)
177 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
178 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
179 * which is the XDP_TX use-case.
181 if (pool->p.flags & PP_FLAG_DMA_MAP) {
182 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
183 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
187 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
188 /* In order to request DMA-sync-for-device the page
191 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
194 if (!pool->p.max_len)
197 /* pool->p.offset has to be set according to the address
198 * offset used by the DMA engine to start copying rx data
202 if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
203 pool->p.flags & PP_FLAG_PAGE_FRAG)
206 #ifdef CONFIG_PAGE_POOL_STATS
207 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
208 if (!pool->recycle_stats)
212 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
215 atomic_set(&pool->pages_state_release_cnt, 0);
217 /* Driver calling page_pool_create() also call page_pool_destroy() */
218 refcount_set(&pool->user_cnt, 1);
220 if (pool->p.flags & PP_FLAG_DMA_MAP)
221 get_device(pool->p.dev);
226 struct page_pool *page_pool_create(const struct page_pool_params *params)
228 struct page_pool *pool;
231 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
233 return ERR_PTR(-ENOMEM);
235 err = page_pool_init(pool, params);
237 pr_warn("%s() gave up with errno %d\n", __func__, err);
244 EXPORT_SYMBOL(page_pool_create);
246 static void page_pool_return_page(struct page_pool *pool, struct page *page);
249 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
251 struct ptr_ring *r = &pool->ring;
253 int pref_nid; /* preferred NUMA node */
255 /* Quicker fallback, avoid locks when ring is empty */
256 if (__ptr_ring_empty(r)) {
257 alloc_stat_inc(pool, empty);
261 /* Softirq guarantee CPU and thus NUMA node is stable. This,
262 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
265 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
267 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
268 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
271 /* Refill alloc array, but only if NUMA match */
273 page = __ptr_ring_consume(r);
277 if (likely(page_to_nid(page) == pref_nid)) {
278 pool->alloc.cache[pool->alloc.count++] = page;
281 * (1) release 1 page to page-allocator and
282 * (2) break out to fallthrough to alloc_pages_node.
283 * This limit stress on page buddy alloactor.
285 page_pool_return_page(pool, page);
286 alloc_stat_inc(pool, waive);
290 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
292 /* Return last page */
293 if (likely(pool->alloc.count > 0)) {
294 page = pool->alloc.cache[--pool->alloc.count];
295 alloc_stat_inc(pool, refill);
302 static struct page *__page_pool_get_cached(struct page_pool *pool)
306 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
307 if (likely(pool->alloc.count)) {
309 page = pool->alloc.cache[--pool->alloc.count];
310 alloc_stat_inc(pool, fast);
312 page = page_pool_refill_alloc_cache(pool);
318 static void page_pool_dma_sync_for_device(struct page_pool *pool,
320 unsigned int dma_sync_size)
322 dma_addr_t dma_addr = page_pool_get_dma_addr(page);
324 dma_sync_size = min(dma_sync_size, pool->p.max_len);
325 dma_sync_single_range_for_device(pool->p.dev, dma_addr,
326 pool->p.offset, dma_sync_size,
330 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
334 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
335 * since dma_addr_t can be either 32 or 64 bits and does not always fit
336 * into page private data (i.e 32bit cpu with 64bit DMA caps)
337 * This mapping is kept for lifetime of page, until leaving pool.
339 dma = dma_map_page_attrs(pool->p.dev, page, 0,
340 (PAGE_SIZE << pool->p.order),
341 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
342 if (dma_mapping_error(pool->p.dev, dma))
345 page_pool_set_dma_addr(page, dma);
347 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
348 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
353 static void page_pool_set_pp_info(struct page_pool *pool,
357 page->pp_magic |= PP_SIGNATURE;
358 if (pool->p.init_callback)
359 pool->p.init_callback(page, pool->p.init_arg);
362 static void page_pool_clear_pp_info(struct page *page)
368 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
374 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
378 if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
379 unlikely(!page_pool_dma_map(pool, page))) {
384 alloc_stat_inc(pool, slow_high_order);
385 page_pool_set_pp_info(pool, page);
387 /* Track how many pages are held 'in-flight' */
388 pool->pages_state_hold_cnt++;
389 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
395 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
398 const int bulk = PP_ALLOC_CACHE_REFILL;
399 unsigned int pp_flags = pool->p.flags;
400 unsigned int pp_order = pool->p.order;
404 /* Don't support bulk alloc for high-order pages */
405 if (unlikely(pp_order))
406 return __page_pool_alloc_page_order(pool, gfp);
408 /* Unnecessary as alloc cache is empty, but guarantees zero count */
409 if (unlikely(pool->alloc.count > 0))
410 return pool->alloc.cache[--pool->alloc.count];
412 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
413 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
415 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
417 if (unlikely(!nr_pages))
420 /* Pages have been filled into alloc.cache array, but count is zero and
421 * page element have not been (possibly) DMA mapped.
423 for (i = 0; i < nr_pages; i++) {
424 page = pool->alloc.cache[i];
425 if ((pp_flags & PP_FLAG_DMA_MAP) &&
426 unlikely(!page_pool_dma_map(pool, page))) {
431 page_pool_set_pp_info(pool, page);
432 pool->alloc.cache[pool->alloc.count++] = page;
433 /* Track how many pages are held 'in-flight' */
434 pool->pages_state_hold_cnt++;
435 trace_page_pool_state_hold(pool, page,
436 pool->pages_state_hold_cnt);
439 /* Return last page */
440 if (likely(pool->alloc.count > 0)) {
441 page = pool->alloc.cache[--pool->alloc.count];
442 alloc_stat_inc(pool, slow);
447 /* When page just alloc'ed is should/must have refcnt 1. */
451 /* For using page_pool replace: alloc_pages() API calls, but provide
452 * synchronization guarantee for allocation side.
454 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
458 /* Fast-path: Get a page from cache */
459 page = __page_pool_get_cached(pool);
463 /* Slow-path: cache empty, do real allocation */
464 page = __page_pool_alloc_pages_slow(pool, gfp);
467 EXPORT_SYMBOL(page_pool_alloc_pages);
469 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
470 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
472 #define _distance(a, b) (s32)((a) - (b))
474 static s32 page_pool_inflight(struct page_pool *pool)
476 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
477 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
480 inflight = _distance(hold_cnt, release_cnt);
482 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
483 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
488 /* Disconnects a page (from a page_pool). API users can have a need
489 * to disconnect a page (from a page_pool), to allow it to be used as
490 * a regular page (that will eventually be returned to the normal
491 * page-allocator via put_page).
493 void page_pool_release_page(struct page_pool *pool, struct page *page)
498 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
499 /* Always account for inflight pages, even if we didn't
504 dma = page_pool_get_dma_addr(page);
506 /* When page is unmapped, it cannot be returned to our pool */
507 dma_unmap_page_attrs(pool->p.dev, dma,
508 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
509 DMA_ATTR_SKIP_CPU_SYNC);
510 page_pool_set_dma_addr(page, 0);
512 page_pool_clear_pp_info(page);
514 /* This may be the last page returned, releasing the pool, so
515 * it is not safe to reference pool afterwards.
517 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
518 trace_page_pool_state_release(pool, page, count);
520 EXPORT_SYMBOL(page_pool_release_page);
522 /* Return a page to the page allocator, cleaning up our state */
523 static void page_pool_return_page(struct page_pool *pool, struct page *page)
525 page_pool_release_page(pool, page);
528 /* An optimization would be to call __free_pages(page, pool->p.order)
529 * knowing page is not part of page-cache (thus avoiding a
530 * __page_cache_release() call).
534 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
537 /* BH protection not needed if current is softirq */
539 ret = ptr_ring_produce(&pool->ring, page);
541 ret = ptr_ring_produce_bh(&pool->ring, page);
544 recycle_stat_inc(pool, ring);
551 /* Only allow direct recycling in special circumstances, into the
552 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
554 * Caller must provide appropriate safe context.
556 static bool page_pool_recycle_in_cache(struct page *page,
557 struct page_pool *pool)
559 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
560 recycle_stat_inc(pool, cache_full);
564 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
565 pool->alloc.cache[pool->alloc.count++] = page;
566 recycle_stat_inc(pool, cached);
570 /* If the page refcnt == 1, this will try to recycle the page.
571 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
572 * the configured size min(dma_sync_size, pool->max_len).
573 * If the page refcnt != 1, then the page will be returned to memory
576 static __always_inline struct page *
577 __page_pool_put_page(struct page_pool *pool, struct page *page,
578 unsigned int dma_sync_size, bool allow_direct)
580 /* This allocator is optimized for the XDP mode that uses
581 * one-frame-per-page, but have fallbacks that act like the
582 * regular page allocator APIs.
584 * refcnt == 1 means page_pool owns page, and can recycle it.
586 * page is NOT reusable when allocated when system is under
587 * some pressure. (page_is_pfmemalloc)
589 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
590 /* Read barrier done in page_ref_count / READ_ONCE */
592 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
593 page_pool_dma_sync_for_device(pool, page,
596 if (allow_direct && in_softirq() &&
597 page_pool_recycle_in_cache(page, pool))
600 /* Page found as candidate for recycling */
603 /* Fallback/non-XDP mode: API user have elevated refcnt.
605 * Many drivers split up the page into fragments, and some
606 * want to keep doing this to save memory and do refcnt based
607 * recycling. Support this use case too, to ease drivers
608 * switching between XDP/non-XDP.
610 * In-case page_pool maintains the DMA mapping, API user must
611 * call page_pool_put_page once. In this elevated refcnt
612 * case, the DMA is unmapped/released, as driver is likely
613 * doing refcnt based recycle tricks, meaning another process
614 * will be invoking put_page.
616 recycle_stat_inc(pool, released_refcnt);
617 /* Do not replace this with page_pool_return_page() */
618 page_pool_release_page(pool, page);
624 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
625 unsigned int dma_sync_size, bool allow_direct)
627 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
628 if (page && !page_pool_recycle_in_ring(pool, page)) {
629 /* Cache full, fallback to free pages */
630 recycle_stat_inc(pool, ring_full);
631 page_pool_return_page(pool, page);
634 EXPORT_SYMBOL(page_pool_put_defragged_page);
636 /* Caller must not use data area after call, as this function overwrites it */
637 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
643 for (i = 0; i < count; i++) {
644 struct page *page = virt_to_head_page(data[i]);
646 /* It is not the last user for the page frag case */
647 if (!page_pool_is_last_frag(pool, page))
650 page = __page_pool_put_page(pool, page, -1, false);
651 /* Approved for bulk recycling in ptr_ring cache */
653 data[bulk_len++] = page;
656 if (unlikely(!bulk_len))
659 /* Bulk producer into ptr_ring page_pool cache */
660 in_softirq = page_pool_producer_lock(pool);
661 for (i = 0; i < bulk_len; i++) {
662 if (__ptr_ring_produce(&pool->ring, data[i])) {
664 recycle_stat_inc(pool, ring_full);
668 recycle_stat_add(pool, ring, i);
669 page_pool_producer_unlock(pool, in_softirq);
671 /* Hopefully all pages was return into ptr_ring */
672 if (likely(i == bulk_len))
675 /* ptr_ring cache full, free remaining pages outside producer lock
676 * since put_page() with refcnt == 1 can be an expensive operation
678 for (; i < bulk_len; i++)
679 page_pool_return_page(pool, data[i]);
681 EXPORT_SYMBOL(page_pool_put_page_bulk);
683 static struct page *page_pool_drain_frag(struct page_pool *pool,
686 long drain_count = BIAS_MAX - pool->frag_users;
688 /* Some user is still using the page frag */
689 if (likely(page_pool_defrag_page(page, drain_count)))
692 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
693 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
694 page_pool_dma_sync_for_device(pool, page, -1);
699 page_pool_return_page(pool, page);
703 static void page_pool_free_frag(struct page_pool *pool)
705 long drain_count = BIAS_MAX - pool->frag_users;
706 struct page *page = pool->frag_page;
708 pool->frag_page = NULL;
710 if (!page || page_pool_defrag_page(page, drain_count))
713 page_pool_return_page(pool, page);
716 struct page *page_pool_alloc_frag(struct page_pool *pool,
717 unsigned int *offset,
718 unsigned int size, gfp_t gfp)
720 unsigned int max_size = PAGE_SIZE << pool->p.order;
721 struct page *page = pool->frag_page;
723 if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
727 size = ALIGN(size, dma_get_cache_alignment());
728 *offset = pool->frag_offset;
730 if (page && *offset + size > max_size) {
731 page = page_pool_drain_frag(pool, page);
733 alloc_stat_inc(pool, fast);
739 page = page_pool_alloc_pages(pool, gfp);
740 if (unlikely(!page)) {
741 pool->frag_page = NULL;
745 pool->frag_page = page;
748 pool->frag_users = 1;
750 pool->frag_offset = size;
751 page_pool_fragment_page(page, BIAS_MAX);
756 pool->frag_offset = *offset + size;
757 alloc_stat_inc(pool, fast);
760 EXPORT_SYMBOL(page_pool_alloc_frag);
762 static void page_pool_empty_ring(struct page_pool *pool)
766 /* Empty recycle ring */
767 while ((page = ptr_ring_consume_bh(&pool->ring))) {
768 /* Verify the refcnt invariant of cached pages */
769 if (!(page_ref_count(page) == 1))
770 pr_crit("%s() page_pool refcnt %d violation\n",
771 __func__, page_ref_count(page));
773 page_pool_return_page(pool, page);
777 static void page_pool_free(struct page_pool *pool)
779 if (pool->disconnect)
780 pool->disconnect(pool);
782 ptr_ring_cleanup(&pool->ring, NULL);
784 if (pool->p.flags & PP_FLAG_DMA_MAP)
785 put_device(pool->p.dev);
787 #ifdef CONFIG_PAGE_POOL_STATS
788 free_percpu(pool->recycle_stats);
793 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
797 if (pool->destroy_cnt)
800 /* Empty alloc cache, assume caller made sure this is
801 * no-longer in use, and page_pool_alloc_pages() cannot be
804 while (pool->alloc.count) {
805 page = pool->alloc.cache[--pool->alloc.count];
806 page_pool_return_page(pool, page);
810 static void page_pool_scrub(struct page_pool *pool)
812 page_pool_empty_alloc_cache_once(pool);
815 /* No more consumers should exist, but producers could still
818 page_pool_empty_ring(pool);
821 static int page_pool_release(struct page_pool *pool)
825 page_pool_scrub(pool);
826 inflight = page_pool_inflight(pool);
828 page_pool_free(pool);
833 static void page_pool_release_retry(struct work_struct *wq)
835 struct delayed_work *dwq = to_delayed_work(wq);
836 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
839 inflight = page_pool_release(pool);
843 /* Periodic warning */
844 if (time_after_eq(jiffies, pool->defer_warn)) {
845 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
847 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
848 __func__, inflight, sec);
849 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
852 /* Still not ready to be disconnected, retry later */
853 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
856 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
857 struct xdp_mem_info *mem)
859 refcount_inc(&pool->user_cnt);
860 pool->disconnect = disconnect;
861 pool->xdp_mem_id = mem->id;
864 void page_pool_destroy(struct page_pool *pool)
869 if (!page_pool_put(pool))
872 page_pool_free_frag(pool);
874 if (!page_pool_release(pool))
877 pool->defer_start = jiffies;
878 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
880 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
881 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
883 EXPORT_SYMBOL(page_pool_destroy);
885 /* Caller must provide appropriate safe context, e.g. NAPI. */
886 void page_pool_update_nid(struct page_pool *pool, int new_nid)
890 trace_page_pool_update_nid(pool, new_nid);
891 pool->p.nid = new_nid;
893 /* Flush pool alloc cache, as refill will check NUMA node */
894 while (pool->alloc.count) {
895 page = pool->alloc.cache[--pool->alloc.count];
896 page_pool_return_page(pool, page);
899 EXPORT_SYMBOL(page_pool_update_nid);
901 bool page_pool_return_skb_page(struct page *page)
903 struct page_pool *pp;
905 page = compound_head(page);
907 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
908 * in order to preserve any existing bits, such as bit 0 for the
909 * head page of compound page and bit 1 for pfmemalloc page, so
910 * mask those bits for freeing side when doing below checking,
911 * and page_is_pfmemalloc() is checked in __page_pool_put_page()
912 * to avoid recycling the pfmemalloc page.
914 if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
919 /* Driver set this to memory recycling info. Reset it on recycle.
920 * This will *not* work for NIC using a split-page memory model.
921 * The page will be returned to the pool here regardless of the
922 * 'flipped' fragment being in use or not.
924 page_pool_put_full_page(pp, page, false);
928 EXPORT_SYMBOL(page_pool_return_skb_page);