]> git.itanic.dy.fi Git - linux-stable/commitdiff
page_pool: keep pp info as long as page pool owns the page
authorYunsheng Lin <linyunsheng@huawei.com>
Fri, 6 Aug 2021 02:46:19 +0000 (10:46 +0800)
committerJakub Kicinski <kuba@kernel.org>
Mon, 9 Aug 2021 22:49:00 +0000 (15:49 -0700)
Currently, page->pp is cleared and set everytime the page
is recycled, which is unnecessary.

So only set the page->pp when the page is added to the page
pool and only clear it when the page is released from the
page pool.

This is also a preparation to support allocating frag page
in page pool.

Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_new.c
include/linux/skbuff.h
include/net/page_pool.h
net/core/page_pool.c

index ff8db311963c3ad006e9792c5e317ac4b8c3a6bc..5d1007e1b5c981963bca910b940a7cbf13547d8f 100644 (file)
@@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
+       skb_mark_for_recycle(skb);
 
        skb_reserve(skb, xdp->data - xdp->data_hard_start);
        skb_put(skb, xdp->data_end - xdp->data);
@@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                skb_frag_page(frag), skb_frag_off(frag),
                                skb_frag_size(frag), PAGE_SIZE);
-               /* We don't need to reset pp_recycle here. It's already set, so
-                * just mark fragments for recycling.
-                */
-               page_pool_store_mem_info(skb_frag_page(frag), pool);
        }
 
        return skb;
index 99bd8b8aa0e29c42ac5c2070dd92aeca0f8cce09..744f58f41ecccf2e4f2cbf644b8545436d0539da 100644 (file)
@@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
                }
 
                if (pp)
-                       skb_mark_for_recycle(skb, page, pp);
+                       skb_mark_for_recycle(skb);
                else
                        dma_unmap_single_attrs(dev->dev.parent, dma_addr,
                                               bm_pool->buf_size, DMA_FROM_DEVICE,
index 8e1e582a10c8c154441d5f57f0d6d2e1a5aa1e19..9f70e40779f6a75989bb2b453c0286e2872f0cb0 100644 (file)
@@ -431,7 +431,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
        skb->protocol = eth_type_trans(skb, ndev);
 
        /* mark skb for recycling */
-       skb_mark_for_recycle(skb, page, pool);
+       skb_mark_for_recycle(skb);
        netif_receive_skb(skb);
 
        ndev->stats.rx_bytes += len;
index 192394fe4c1c6f9a46ac0e06b3b7d0fd63692972..ff3a96b084ee57817310cd5d11fac6480ed64f41 100644 (file)
@@ -375,7 +375,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
        skb->protocol = eth_type_trans(skb, ndev);
 
        /* mark skb for recycling */
-       skb_mark_for_recycle(skb, page, pool);
+       skb_mark_for_recycle(skb);
        netif_receive_skb(skb);
 
        ndev->stats.rx_bytes += len;
index 783cc2368bb119da0ad6b803a9ecb8d3b3314d12..6bdb0db3e8258ad2745705a9b046eb1c93e05840 100644 (file)
@@ -4712,11 +4712,9 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
 }
 
 #ifdef CONFIG_PAGE_POOL
-static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
-                                       struct page_pool *pp)
+static inline void skb_mark_for_recycle(struct sk_buff *skb)
 {
        skb->pp_recycle = 1;
-       page_pool_store_mem_info(page, pp);
 }
 #endif
 
index 3dd62dd73027d53c348783f9ee30c030ebaec3b9..8d7744d1c7c1040e1b3bd2ce4107f3d0170e570b 100644 (file)
@@ -253,11 +253,4 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
                spin_unlock_bh(&pool->ring.producer_lock);
 }
 
-/* Store mem_info on struct page and use it while recycling skb frags */
-static inline
-void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
-{
-       page->pp = pp;
-}
-
 #endif /* _NET_PAGE_POOL_H */
index 5e4eb45b139c7138ad3fe2d223dc1ac1d405cdd1..78838c6fe0078b3cd632d597bf3958ebd29b48da 100644 (file)
@@ -206,6 +206,19 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
        return true;
 }
 
+static void page_pool_set_pp_info(struct page_pool *pool,
+                                 struct page *page)
+{
+       page->pp = pool;
+       page->pp_magic |= PP_SIGNATURE;
+}
+
+static void page_pool_clear_pp_info(struct page *page)
+{
+       page->pp_magic = 0;
+       page->pp = NULL;
+}
+
 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
                                                 gfp_t gfp)
 {
@@ -222,7 +235,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
                return NULL;
        }
 
-       page->pp_magic |= PP_SIGNATURE;
+       page_pool_set_pp_info(pool, page);
 
        /* Track how many pages are held 'in-flight' */
        pool->pages_state_hold_cnt++;
@@ -266,7 +279,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
                        put_page(page);
                        continue;
                }
-               page->pp_magic |= PP_SIGNATURE;
+
+               page_pool_set_pp_info(pool, page);
                pool->alloc.cache[pool->alloc.count++] = page;
                /* Track how many pages are held 'in-flight' */
                pool->pages_state_hold_cnt++;
@@ -345,7 +359,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
                             DMA_ATTR_SKIP_CPU_SYNC);
        page_pool_set_dma_addr(page, 0);
 skip_dma_unmap:
-       page->pp_magic = 0;
+       page_pool_clear_pp_info(page);
 
        /* This may be the last page returned, releasing the pool, so
         * it is not safe to reference pool afterwards.
@@ -644,7 +658,6 @@ bool page_pool_return_skb_page(struct page *page)
         * The page will be returned to the pool here regardless of the
         * 'flipped' fragment being in use or not.
         */
-       page->pp = NULL;
        page_pool_put_full_page(pp, page, false);
 
        return true;