]> git.itanic.dy.fi Git - linux-stable/commitdiff
page_pool: introduce page_pool_alloc() API
authorYunsheng Lin <linyunsheng@huawei.com>
Fri, 20 Oct 2023 09:59:50 +0000 (17:59 +0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 24 Oct 2023 02:14:48 +0000 (19:14 -0700)
Currently page pool supports the below use cases:
use case 1: allocate page without page splitting using
            page_pool_alloc_pages() API if the driver knows
            that the memory it need is always bigger than
            half of the page allocated from page pool.
use case 2: allocate page frag with page splitting using
            page_pool_alloc_frag() API if the driver knows
            that the memory it need is always smaller than
            or equal to the half of the page allocated from
            page pool.

There is emerging use case [1] & [2] that is a mix of the
above two case: the driver doesn't know the size of memory it
need beforehand, so the driver may use something like below to
allocate memory with least memory utilization and performance
penalty:

if (size << 1 > max_size)
page = page_pool_alloc_pages();
else
page = page_pool_alloc_frag();

To avoid the driver doing something like above, add the
page_pool_alloc() API to support the above use case, and update
the true size of memory that is acctually allocated by updating
'*size' back to the driver in order to avoid exacerbating
truesize underestimate problem.

Rename page_pool_free() which is used in the destroy process to
__page_pool_destroy() to avoid confusion with the newly added
API.

1. https://lore.kernel.org/all/d3ae6bd3537fbce379382ac6a42f67e22f27ece2.1683896626.git.lorenzo@kernel.org/
2. https://lore.kernel.org/all/20230526054621.18371-3-liangchen.linux@gmail.com/

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
CC: Lorenzo Bianconi <lorenzo@kernel.org>
CC: Alexander Duyck <alexander.duyck@gmail.com>
CC: Liang Chen <liangchen.linux@gmail.com>
CC: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/20231020095952.11055-4-linyunsheng@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/page_pool/helpers.h
net/core/page_pool.c

index 759489c037c72c268bc399872c8fbd934037afc7..1b76e05dc4d2c3069fa7d13cd48c9530f0a74455 100644 (file)
@@ -82,6 +82,66 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
        return page_pool_alloc_frag(pool, offset, size, gfp);
 }
 
+static inline struct page *page_pool_alloc(struct page_pool *pool,
+                                          unsigned int *offset,
+                                          unsigned int *size, gfp_t gfp)
+{
+       unsigned int max_size = PAGE_SIZE << pool->p.order;
+       struct page *page;
+
+       if ((*size << 1) > max_size) {
+               *size = max_size;
+               *offset = 0;
+               return page_pool_alloc_pages(pool, gfp);
+       }
+
+       page = page_pool_alloc_frag(pool, offset, *size, gfp);
+       if (unlikely(!page))
+               return NULL;
+
+       /* There is very likely not enough space for another fragment, so append
+        * the remaining size to the current fragment to avoid truesize
+        * underestimate problem.
+        */
+       if (pool->frag_offset + *size > max_size) {
+               *size = max_size - *offset;
+               pool->frag_offset = max_size;
+       }
+
+       return page;
+}
+
+static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
+                                              unsigned int *offset,
+                                              unsigned int *size)
+{
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       return page_pool_alloc(pool, offset, size, gfp);
+}
+
+static inline void *page_pool_alloc_va(struct page_pool *pool,
+                                      unsigned int *size, gfp_t gfp)
+{
+       unsigned int offset;
+       struct page *page;
+
+       /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */
+       page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM);
+       if (unlikely(!page))
+               return NULL;
+
+       return page_address(page) + offset;
+}
+
+static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
+                                          unsigned int *size)
+{
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       return page_pool_alloc_va(pool, size, gfp);
+}
+
 /**
  * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
  * @pool:      pool from which page was allocated
@@ -221,6 +281,12 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
 #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA    \
                (sizeof(dma_addr_t) > sizeof(unsigned long))
 
+static inline void page_pool_free_va(struct page_pool *pool, void *va,
+                                    bool allow_direct)
+{
+       page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
+}
+
 /**
  * page_pool_get_dma_addr() - Retrieve the stored DMA address.
  * @page:      page allocated from a page pool
index 2a3671c97ca7e34e26547ec163eaf83d0e3f2f32..5e409b98aba0f0a1fd6fb88db3d1dde4291f1cd7 100644 (file)
@@ -809,7 +809,7 @@ static void page_pool_empty_ring(struct page_pool *pool)
        }
 }
 
-static void page_pool_free(struct page_pool *pool)
+static void __page_pool_destroy(struct page_pool *pool)
 {
        if (pool->disconnect)
                pool->disconnect(pool);
@@ -860,7 +860,7 @@ static int page_pool_release(struct page_pool *pool)
        page_pool_scrub(pool);
        inflight = page_pool_inflight(pool);
        if (!inflight)
-               page_pool_free(pool);
+               __page_pool_destroy(pool);
 
        return inflight;
 }