]> git.itanic.dy.fi Git - linux-stable/commitdiff
page_pool: remove PP_FLAG_PAGE_FRAG
authorYunsheng Lin <linyunsheng@huawei.com>
Fri, 20 Oct 2023 09:59:49 +0000 (17:59 +0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 24 Oct 2023 02:14:48 +0000 (19:14 -0700)
PP_FLAG_PAGE_FRAG is not really needed after pp_frag_count
handling is unified and page_pool_alloc_frag() is supported
in 32-bit arch with 64-bit DMA, so remove it.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
CC: Lorenzo Bianconi <lorenzo@kernel.org>
CC: Alexander Duyck <alexander.duyck@gmail.com>
CC: Liang Chen <liangchen.linux@gmail.com>
CC: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/20231020095952.11055-3-linyunsheng@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/wireless/mediatek/mt76/mac80211.c
include/net/page_pool/types.h
net/core/page_pool.c
net/core/skbuff.c

index 5d7a29f9940164d403d454721fcf63e734b2c1cc..d0359b569afeb17e3ef1340bac5b3645636af237 100644 (file)
@@ -3302,8 +3302,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
        pp.dma_dir = bp->rx_dir;
        pp.max_len = PAGE_SIZE;
        pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
-       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
-               pp.flags |= PP_FLAG_PAGE_FRAG;
 
        rxr->page_pool = page_pool_create(&pp);
        if (IS_ERR(rxr->page_pool)) {
index cf50368441b783df43cb308745d49fa58a69674c..06117502001f922271f67cc9103f896e9122f2e7 100644 (file)
@@ -4940,8 +4940,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
 static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
 {
        struct page_pool_params pp_params = {
-               .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
-                               PP_FLAG_DMA_SYNC_DEV,
+               .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
                .order = hns3_page_order(ring),
                .pool_size = ring->desc_num * hns3_buf_size(ring) /
                                (PAGE_SIZE << hns3_page_order(ring)),
index 58c5412d317361345a210bd6c6d4c75f883325a5..5e1ef70d54fe4147a42e5a3263b73cd3e6316679 100644 (file)
@@ -595,9 +595,6 @@ static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
                .offset         = 0,
        };
 
-       if (rxbufq->rx_buf_size == IDPF_RX_BUF_2048)
-               pp.flags |= PP_FLAG_PAGE_FRAG;
-
        return page_pool_create(&pp);
 }
 
index 818ce76185b2f3bab47bc59696f7afe2868d3ec7..1a42bfded8722ac7c494552ee98739a95d87089c 100644 (file)
@@ -1404,7 +1404,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
        }
 
        pp_params.order = get_order(buf_size);
-       pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+       pp_params.flags = PP_FLAG_DMA_MAP;
        pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
        pp_params.nid = NUMA_NO_NODE;
        pp_params.dev = pfvf->dev;
index 9325b8f00af0658c06c1efc7631b788bbdd4ef10..ea58c691743302bfc0ccc4420f15ff4ff06e3cae 100644 (file)
@@ -897,7 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
                struct page_pool_params pp_params = { 0 };
 
                pp_params.order     = 0;
-               pp_params.flags     = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
+               pp_params.flags     = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
                pp_params.pool_size = pool_size;
                pp_params.nid       = node;
                pp_params.dev       = rq->pdev;
index cb76053973aa84e26a5669c3e397e36d3f1cd311..51a767121b0da78f393c5f44f1227dd97e1bdb9e 100644 (file)
@@ -570,7 +570,7 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
 {
        struct page_pool_params pp_params = {
                .order = 0,
-               .flags = PP_FLAG_PAGE_FRAG,
+               .flags = 0,
                .nid = NUMA_NO_NODE,
                .dev = dev->dma_dev,
        };
index 887e7946a597494d714f12f9f7fb2e53e32557d1..6fc5134095ed19503bcecd3e223116cf49350566 100644 (file)
                                        * Please note DMA-sync-for-CPU is still
                                        * device driver responsibility
                                        */
-#define PP_FLAG_PAGE_FRAG      BIT(2) /* for page frag feature */
 #define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
-                                PP_FLAG_DMA_SYNC_DEV |\
-                                PP_FLAG_PAGE_FRAG)
+                                PP_FLAG_DMA_SYNC_DEV)
 
 /*
  * Fast allocation side cache array/stack
@@ -45,7 +43,7 @@ struct pp_alloc_cache {
 
 /**
  * struct page_pool_params - page pool parameters
- * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
+ * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
  * @order:     2^order pages on allocation
  * @pool_size: size of the ptr_ring
  * @nid:       NUMA node id to allocate from pages from
index 953535cab08161abedf36d06b4c215a479df8197..2a3671c97ca7e34e26547ec163eaf83d0e3f2f32 100644 (file)
@@ -756,8 +756,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
        unsigned int max_size = PAGE_SIZE << pool->p.order;
        struct page *page = pool->frag_page;
 
-       if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
-                   size > max_size))
+       if (WARN_ON(size > max_size))
                return NULL;
 
        size = ALIGN(size, dma_get_cache_alignment());
index 975c9a6ffb4ad1584ab0500e8eeea979e7407acb..c52ddd6891d9d392cd5108b67d2c6d4685146c37 100644 (file)
@@ -5765,7 +5765,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
        /* In general, avoid mixing page_pool and non-page_pool allocated
         * pages within the same SKB. Additionally avoid dealing with clones
         * with page_pool pages, in case the SKB is using page_pool fragment
-        * references (PP_FLAG_PAGE_FRAG). Since we only take full page
+        * references (page_pool_alloc_frag()). Since we only take full page
         * references for cloned SKBs at the moment that would result in
         * inconsistent reference counts.
         * In theory we could take full references if @from is cloned and