]> git.itanic.dy.fi Git - linux-stable/commitdiff
page_pool: split types and declarations from page_pool.h
authorYunsheng Lin <linyunsheng@huawei.com>
Fri, 4 Aug 2023 18:05:24 +0000 (20:05 +0200)
committerJakub Kicinski <kuba@kernel.org>
Mon, 7 Aug 2023 20:05:19 +0000 (13:05 -0700)
Split types and pure function declarations from page_pool.h
and add them in page_page/types.h, so that C sources can
include page_pool.h and headers should generally only include
page_pool/types.h as suggested by jakub.
Rename page_pool.h to page_pool/helpers.h to have both in
one place.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Suggested-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Link: https://lore.kernel.org/r/20230804180529.2483231-2-aleksander.lobakin@intel.com
[Jakub: change microsoft/mana, fix kdoc paths in Documentation]
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
43 files changed:
Documentation/networking/page_pool.rst
MAINTAINERS
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/engleder/tsnep_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
drivers/net/ethernet/microsoft/mana/mana_en.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_new.c
drivers/net/ethernet/ti/cpsw_priv.c
drivers/net/ethernet/wangxun/libwx/wx_lib.c
drivers/net/veth.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/xen-netfront.c
include/linux/skbuff.h
include/net/page_pool/helpers.h [moved from include/net/page_pool.h with 51% similarity]
include/net/page_pool/types.h [new file with mode: 0644]
include/trace/events/page_pool.h
net/bpf/test_run.c
net/core/page_pool.c
net/core/skbuff.c
net/core/xdp.c

index 53b5448cc0f11adc995f8106ccb48f5172ec20ac..68b82cea13e438fcf71620cc9fed35fe68103d56 100644 (file)
@@ -67,10 +67,10 @@ a page will cause no race conditions is enough.
 .. kernel-doc:: net/core/page_pool.c
    :identifiers: page_pool_create
 
-.. kernel-doc:: include/net/page_pool.h
+.. kernel-doc:: include/net/page_pool/types.h
    :identifiers: struct page_pool_params
 
-.. kernel-doc:: include/net/page_pool.h
+.. kernel-doc:: include/net/page_pool/helpers.h
    :identifiers: page_pool_put_page page_pool_put_full_page
                 page_pool_recycle_direct page_pool_dev_alloc_pages
                 page_pool_get_dma_addr page_pool_get_dma_dir
@@ -122,7 +122,7 @@ page_pool_stats allocated by the caller.
 The API will fill in the provided struct page_pool_stats with
 statistics about the page_pool.
 
-.. kernel-doc:: include/net/page_pool.h
+.. kernel-doc:: include/net/page_pool/types.h
    :identifiers: struct page_pool_recycle_stats
                 struct page_pool_alloc_stats
                 struct page_pool_stats
index 5e2bb1059ab67b4230be886cd2e6d134ea4bde37..08bcf3a7c482dc01278fed62635428e3a2299423 100644 (file)
@@ -16020,7 +16020,7 @@ M:      Ilias Apalodimas <ilias.apalodimas@linaro.org>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/page_pool.rst
-F:     include/net/page_pool.h
+F:     include/net/page_pool/
 F:     include/trace/events/page_pool.h
 F:     net/core/page_pool.c
 
index 6a643aae78028887e2fb68572588aa4d06a31e6d..eb168ca983b753663ee99fbda1db6639a93f2fa7 100644 (file)
@@ -54,7 +54,7 @@
 #include <net/pkt_cls.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <linux/align.h>
 #include <net/netdev_queues.h>
 
index 2ce46d7affe4a09529e9febf37b0dc439d92f6c2..96f5ca778c67d609ffa3530fa5508704e0b8e98f 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/filter.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include "bnxt_hsi.h"
 #include "bnxt.h"
 #include "bnxt_xdp.h"
index 079f9f6ae21aebe9689d40a90a6cef57d38c4ed5..f61bd89734c588e7c5314859f7245c7e081bba87 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/iopoll.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
 #include <net/xdp_sock_drv.h>
 
 #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
index 43f14cec91e9335488f9c2bcf80e62181f081f1f..3bd0bf03aedbf0cca160676d527e2497c737adc3 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <net/ip.h>
+#include <net/page_pool/helpers.h>
 #include <net/selftests.h>
 #include <net/tso.h>
 #include <linux/tcp.h>
index 9f6890059666e50bde32445c801369417c882796..e5e37a33fd819777e16222f6baa1c6d584551d3b 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/gre.h>
 #include <net/gro.h>
 #include <net/ip6_checksum.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_cls.h>
 #include <net/pkt_sched.h>
 #include <net/tcp.h>
index 88af34bbee34bad0be2aa1e28678fb00c37c35a3..acd756b0c7c9a4134a0d7c47a50b8112dc881ee1 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <linux/dim.h>
 #include <linux/if_vlan.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <asm/barrier.h>
 
 #include "hnae3.h"
index acf4f6ba73a6f97b74b3c2a4e6b2581146d0f651..d483b8c00ec0e2dacfd5ac616b3c282e09d4fd37 100644 (file)
@@ -37,7 +37,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/tso.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_sched.h>
 #include <linux/bpf_trace.h>
 
index 11e603686a276661ebbf6e3c1ea7545253213eab..e809f91c08fb9daac8b7c4a89bd279c671036fb7 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/phy.h>
 #include <linux/phylink.h>
 #include <net/flow_offload.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <linux/bpf.h>
 #include <net/xdp.h>
 
index 9e1b596c8f088846d4b27e9d58f182f0bfdcef93..eb74ccddb4409762cbc01cf24b9bb53a95dd250d 100644 (file)
@@ -35,6 +35,7 @@
 #include <uapi/linux/ppp_defs.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
 #include <net/tso.h>
 #include <linux/bpf_trace.h>
 
index 8cdd92dd97628f7f81cd9f7cee1d9132aa61181e..8336cea16aff01661f424fd0535d18692ce9d9d1 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/pci.h>
+#include <net/page_pool/helpers.h>
 #include <net/tso.h>
 #include <linux/bitfield.h>
 
index 61f62a6ec6627cd006e34df8ac37feedc4dd8853..70b9065f7d10188180b8aa16e2b37c85d1ff7176 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/bitfield.h>
+#include <net/page_pool/types.h>
 
 #include "otx2_reg.h"
 #include "otx2_common.h"
index 1b89f800f6dff7de04addde31648f50305e5f6a5..fe05c90202699ff5e8af1adcc8461ad83dea94a6 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/bitfield.h>
 #include <net/dsa.h>
 #include <net/dst_metadata.h>
+#include <net/page_pool/helpers.h>
 
 #include "mtk_eth_soc.h"
 #include "mtk_wed.h"
index 80d17729e557846b3ff027427757992e36e199e0..4a2470fbad2cf012579178521935b71ddcb055b7 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/rhashtable.h>
 #include <linux/dim.h>
 #include <linux/bitfield.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <linux/bpf_trace.h>
 #include "mtk_ppe.h"
 
index 5ce28ff7685fcedd66994c3cc98340eda7eef513..e097f336e1c4a0ff1543bab027a4700aef8a6be2 100644 (file)
@@ -6,6 +6,7 @@
 #include "en/port.h"
 #include "en_accel/en_accel.h"
 #include "en_accel/ipsec.h"
+#include <net/page_pool/types.h>
 #include <net/xdp_sock_drv.h>
 
 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
index 201ac7dd338f06181f778000a3e43e363af6484d..698647cc8c0f92dab81697880df065019b8b1db8 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 /* Copyright (c) 2020 Mellanox Technologies */
 
-#include <net/page_pool.h>
 #include "en/txrx.h"
 #include "en/params.h"
 #include "en/trap.h"
index 40589cebb773002739746987f3d1089d76de2ffb..12f56d0db0af2f904866b75d159a00a05a9e2f7a 100644 (file)
@@ -35,6 +35,7 @@
 #include "en/xdp.h"
 #include "en/params.h"
 #include <linux/bitfield.h>
+#include <net/page_pool/helpers.h>
 
 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
 {
index 1c820119e438f0cd119f891080e16628313971cb..c8ec6467d4d16d80e1ec370012d7528d6051cb06 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/debugfs.h>
 #include <linux/if_bridge.h>
 #include <linux/filter.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <net/pkt_sched.h>
 #include <net/xdp_sock_drv.h>
 #include "eswitch.h"
index f7bb5f4aaaca0170b6d94536d5f53cfbe1539a39..3fd11b0761e09e4af026bd82a44ee3f812261fd0 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/bitmap.h>
 #include <linux/filter.h>
 #include <net/ip6_checksum.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/inet_ecn.h>
 #include <net/gro.h>
 #include <net/udp.h>
index 4d77055abd4be3659672663e97c6b4269435119c..07b84d668fcc02ff046f2076bfa7faccb796e228 100644 (file)
@@ -38,7 +38,7 @@
 #include "en/port.h"
 
 #ifdef CONFIG_PAGE_POOL_STATS
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #endif
 
 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
index bd72fbc2220f3010afd8b90f3704e261b9d0a98f..3960534ac2ad812b7bb41fa359b9d20d6e5197e3 100644 (file)
@@ -2,6 +2,7 @@
 
 #include <linux/bpf.h>
 #include <linux/filter.h>
+#include <net/page_pool/helpers.h>
 
 #include "lan966x_main.h"
 
index aebc9154693af14a9d8336cc33a5c2740a2f7abb..caa9e0533c96b3c31d956ee3f01eb7e59bab591c 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/phy.h>
 #include <linux/phylink.h>
 #include <linux/ptp_clock_kernel.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <net/pkt_cls.h>
 #include <net/pkt_sched.h>
 #include <net/switchdev.h>
index a08023c57e25ef9f7082c82857e8e198b55c555f..31e2f2c74e15a96cefec375ae6fe9cf023f56a38 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
+#include <net/page_pool/helpers.h>
 #include <net/xdp.h>
 
 #include <net/mana/mana.h>
index 0dcd6a568b061242f510dd460dde5798895ab1a5..f358ea003193698cb48ed0362be9e9f96e90d6db 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/bpf_trace.h>
 
 #include <net/tcp.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/ip6_checksum.h>
 
 #define NETSEC_REG_SOFT_RST                    0x104
index a6d034968654d2f492661d585b0947ed42dbf4e5..3401e888a9f6860b835a115a80102d01ea44ebff 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/ptp_clock_kernel.h>
 #include <linux/net_tstamp.h>
 #include <linux/reset.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <net/xdp.h>
 #include <uapi/linux/bpf.h>
 
index 99aa5360b3ff91bb3816a1f8d7431c07e2f7ad3e..fcab363d8dfa245269d6c3729951aa7fc4de37e1 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/phylink.h>
 #include <linux/udp.h>
 #include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_cls.h>
 #include <net/xdp_sock_drv.h>
 #include "stmmac_ptp.h"
index f9cd566d1c9b588e5cd547ca4109567dc50114e4..ca4d4548f85e300721a7608066b778f2a2335aca 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/if_vlan.h>
 #include <linux/kmemleak.h>
 #include <linux/sys_soc.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 
index c61e4e44a78f06ddbd28e94ce65e2af410291fe7..0e4f526b17532e94d0634ad8611371934dad7717 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/sys_soc.h>
 
 #include <net/switchdev.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_cls.h>
 #include <net/devlink.h>
 
index ae52cdbcf8cc55f1846b154ffc833080d5005617..0ec85635dfd60a2f169f2c6aab5222842f2d3ae5 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/skbuff.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_cls.h>
 #include <net/pkt_sched.h>
 
index 2c3f08be8c37432b569834152064c56d2a138a5c..e04d4a5eed7ba0c06ecb7c4040e3fb4f837c2134 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <linux/etherdevice.h>
 #include <net/ip6_checksum.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/inet_ecn.h>
 #include <linux/iopoll.h>
 #include <linux/sctp.h>
index 614f3e3efab0950fa4bbe1ee897ba008fc5e3369..953f6d8f8db04d5341fb7ac5de27ffa45e761fcf 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/ptr_ring.h>
 #include <linux/bpf_trace.h>
 #include <linux/net_tstamp.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 
 #define DRV_NAME       "veth"
 #define DRV_VERSION    "1.0"
index c0ff36a98bed3e36a3462483d410d3815236c21d..d158320bc15dbea865cdd891655eb608d999c2c0 100644 (file)
@@ -4,7 +4,6 @@
  */
 #include <linux/sched.h>
 #include <linux/of.h>
-#include <net/page_pool.h>
 #include "mt76.h"
 
 #define CHAN2G(_idx, _freq) {                  \
index 878087257ea7e45c56c42d8ea20bb77a1aba193e..e8757865a3d068f1a4b7bd88d85dff8827a2c1aa 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/average.h>
 #include <linux/soc/mediatek/mtk_wed.h>
 #include <net/mac80211.h>
+#include <net/page_pool/helpers.h>
 #include "util.h"
 #include "testmode.h"
 
index 47d54d8ea59d17a94a9fa08d50d06eee45743bfe..ad29f370034e4f080ca7350e8114093446f798e6 100644 (file)
@@ -45,7 +45,7 @@
 #include <linux/slab.h>
 #include <net/ip.h>
 #include <linux/bpf.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <linux/bpf_trace.h>
 
 #include <xen/xen.h>
index 16a49ba534e4aa3fbb6c4ebe29aa8e9ef7bb4c76..888e3d7e74c1facd2e164f94385e482cc11c7b40 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/if_packet.h>
 #include <linux/llist.h>
 #include <net/flow.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <linux/netfilter/nf_conntrack_common.h>
 #endif
similarity index 51%
rename from include/net/page_pool.h
rename to include/net/page_pool/helpers.h
index 73d4f786418d55496f3809d6560ef49dec7e513c..78df91804c8788f7bd7e7f8c5bfca908a7dd7ada 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0
  *
- * page_pool.h
+ * page_pool/helpers.h
  *     Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
  *     Copyright (C) 2016 Red Hat, Inc.
  */
  * will release the DMA mapping and in-flight state accounting.  We
  * hope to lift this requirement in the future.
  */
-#ifndef _NET_PAGE_POOL_H
-#define _NET_PAGE_POOL_H
+#ifndef _NET_PAGE_POOL_HELPERS_H
+#define _NET_PAGE_POOL_HELPERS_H
 
-#include <linux/mm.h> /* Needed by ptr_ring */
-#include <linux/ptr_ring.h>
-#include <linux/dma-direction.h>
-
-#define PP_FLAG_DMA_MAP                BIT(0) /* Should page_pool do the DMA
-                                       * map/unmap
-                                       */
-#define PP_FLAG_DMA_SYNC_DEV   BIT(1) /* If set all pages that the driver gets
-                                       * from page_pool will be
-                                       * DMA-synced-for-device according to
-                                       * the length provided by the device
-                                       * driver.
-                                       * Please note DMA-sync-for-CPU is still
-                                       * device driver responsibility
-                                       */
-#define PP_FLAG_PAGE_FRAG      BIT(2) /* for page frag feature */
-#define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
-                                PP_FLAG_DMA_SYNC_DEV |\
-                                PP_FLAG_PAGE_FRAG)
-
-/*
- * Fast allocation side cache array/stack
- *
- * The cache size and refill watermark is related to the network
- * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
- * ring is usually refilled and the max consumed elements will be 64,
- * thus a natural max size of objects needed in the cache.
- *
- * Keeping room for more objects, is due to XDP_DROP use-case.  As
- * XDP_DROP allows the opportunity to recycle objects directly into
- * this array, as it shares the same softirq/NAPI protection.  If
- * cache is already full (or partly full) then the XDP_DROP recycles
- * would have to take a slower code path.
- */
-#define PP_ALLOC_CACHE_SIZE    128
-#define PP_ALLOC_CACHE_REFILL  64
-struct pp_alloc_cache {
-       u32 count;
-       struct page *cache[PP_ALLOC_CACHE_SIZE];
-};
-
-/**
- * struct page_pool_params - page pool parameters
- * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
- * @order:     2^order pages on allocation
- * @pool_size: size of the ptr_ring
- * @nid:       NUMA node id to allocate from pages from
- * @dev:       device, for DMA pre-mapping purposes
- * @napi:      NAPI which is the sole consumer of pages, otherwise NULL
- * @dma_dir:   DMA mapping direction
- * @max_len:   max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
- * @offset:    DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
- */
-struct page_pool_params {
-       unsigned int    flags;
-       unsigned int    order;
-       unsigned int    pool_size;
-       int             nid;
-       struct device   *dev;
-       struct napi_struct *napi;
-       enum dma_data_direction dma_dir;
-       unsigned int    max_len;
-       unsigned int    offset;
-/* private: used by test code only */
-       void (*init_callback)(struct page *page, void *arg);
-       void *init_arg;
-};
+#include <net/page_pool/types.h>
 
 #ifdef CONFIG_PAGE_POOL_STATS
-/**
- * struct page_pool_alloc_stats - allocation statistics
- * @fast:      successful fast path allocations
- * @slow:      slow path order-0 allocations
- * @slow_high_order: slow path high order allocations
- * @empty:     ptr ring is empty, so a slow path allocation was forced
- * @refill:    an allocation which triggered a refill of the cache
- * @waive:     pages obtained from the ptr ring that cannot be added to
- *             the cache due to a NUMA mismatch
- */
-struct page_pool_alloc_stats {
-       u64 fast;
-       u64 slow;
-       u64 slow_high_order;
-       u64 empty;
-       u64 refill;
-       u64 waive;
-};
-
-/**
- * struct page_pool_recycle_stats - recycling (freeing) statistics
- * @cached:    recycling placed page in the page pool cache
- * @cache_full:        page pool cache was full
- * @ring:      page placed into the ptr ring
- * @ring_full: page released from page pool because the ptr ring was full
- * @released_refcnt:   page released (and not recycled) because refcnt > 1
- */
-struct page_pool_recycle_stats {
-       u64 cached;
-       u64 cache_full;
-       u64 ring;
-       u64 ring_full;
-       u64 released_refcnt;
-};
-
-/**
- * struct page_pool_stats - combined page pool use statistics
- * @alloc_stats:       see struct page_pool_alloc_stats
- * @recycle_stats:     see struct page_pool_recycle_stats
- *
- * Wrapper struct for combining page pool stats with different storage
- * requirements.
- */
-struct page_pool_stats {
-       struct page_pool_alloc_stats alloc_stats;
-       struct page_pool_recycle_stats recycle_stats;
-};
-
 int page_pool_ethtool_stats_get_count(void);
 u8 *page_pool_ethtool_stats_get_strings(u8 *data);
 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
@@ -158,7 +44,6 @@ u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
 bool page_pool_get_stats(struct page_pool *pool,
                         struct page_pool_stats *stats);
 #else
-
 static inline int page_pool_ethtool_stats_get_count(void)
 {
        return 0;
@@ -173,72 +58,7 @@ static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
 {
        return data;
 }
-
-#endif
-
-struct page_pool {
-       struct page_pool_params p;
-
-       struct delayed_work release_dw;
-       void (*disconnect)(void *);
-       unsigned long defer_start;
-       unsigned long defer_warn;
-
-       u32 pages_state_hold_cnt;
-       unsigned int frag_offset;
-       struct page *frag_page;
-       long frag_users;
-
-#ifdef CONFIG_PAGE_POOL_STATS
-       /* these stats are incremented while in softirq context */
-       struct page_pool_alloc_stats alloc_stats;
 #endif
-       u32 xdp_mem_id;
-
-       /*
-        * Data structure for allocation side
-        *
-        * Drivers allocation side usually already perform some kind
-        * of resource protection.  Piggyback on this protection, and
-        * require driver to protect allocation side.
-        *
-        * For NIC drivers this means, allocate a page_pool per
-        * RX-queue. As the RX-queue is already protected by
-        * Softirq/BH scheduling and napi_schedule. NAPI schedule
-        * guarantee that a single napi_struct will only be scheduled
-        * on a single CPU (see napi_schedule).
-        */
-       struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
-
-       /* Data structure for storing recycled pages.
-        *
-        * Returning/freeing pages is more complicated synchronization
-        * wise, because free's can happen on remote CPUs, with no
-        * association with allocation resource.
-        *
-        * Use ptr_ring, as it separates consumer and producer
-        * effeciently, it a way that doesn't bounce cache-lines.
-        *
-        * TODO: Implement bulk return pages into this structure.
-        */
-       struct ptr_ring ring;
-
-#ifdef CONFIG_PAGE_POOL_STATS
-       /* recycle stats are per-cpu to avoid locking */
-       struct page_pool_recycle_stats __percpu *recycle_stats;
-#endif
-       atomic_t pages_state_release_cnt;
-
-       /* A page_pool is strictly tied to a single RX-queue being
-        * protected by NAPI, due to above pp_alloc_cache. This
-        * refcnt serves purpose is to simplify drivers error handling.
-        */
-       refcount_t user_cnt;
-
-       u64 destroy_cnt;
-};
-
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
 
 /**
  * page_pool_dev_alloc_pages() - allocate a page.
@@ -253,9 +73,6 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
        return page_pool_alloc_pages(pool, gfp);
 }
 
-struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
-                                 unsigned int size, gfp_t gfp);
-
 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
                                                    unsigned int *offset,
                                                    unsigned int size)
@@ -278,44 +95,6 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
        return pool->p.dma_dir;
 }
 
-bool page_pool_return_skb_page(struct page *page, bool napi_safe);
-
-struct page_pool *page_pool_create(const struct page_pool_params *params);
-
-struct xdp_mem_info;
-
-#ifdef CONFIG_PAGE_POOL
-void page_pool_unlink_napi(struct page_pool *pool);
-void page_pool_destroy(struct page_pool *pool);
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
-                          struct xdp_mem_info *mem);
-void page_pool_put_page_bulk(struct page_pool *pool, void **data,
-                            int count);
-#else
-static inline void page_pool_unlink_napi(struct page_pool *pool)
-{
-}
-
-static inline void page_pool_destroy(struct page_pool *pool)
-{
-}
-
-static inline void page_pool_use_xdp_mem(struct page_pool *pool,
-                                        void (*disconnect)(void *),
-                                        struct xdp_mem_info *mem)
-{
-}
-
-static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
-                                          int count)
-{
-}
-#endif
-
-void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
-                                 unsigned int dma_sync_size,
-                                 bool allow_direct);
-
 /* pp_frag_count represents the number of writers who can update the page
  * either by updating skb->data or via DMA mappings for the device.
  * We can't rely on the page refcnt for that as we don't know who might be
@@ -445,26 +224,15 @@ static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
                page->dma_addr_upper = upper_32_bits(addr);
 }
 
-static inline bool is_page_pool_compiled_in(void)
-{
-#ifdef CONFIG_PAGE_POOL
-       return true;
-#else
-       return false;
-#endif
-}
-
 static inline bool page_pool_put(struct page_pool *pool)
 {
        return refcount_dec_and_test(&pool->user_cnt);
 }
 
-/* Caller must provide appropriate safe context, e.g. NAPI. */
-void page_pool_update_nid(struct page_pool *pool, int new_nid);
 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
 {
        if (unlikely(pool->p.nid != new_nid))
                page_pool_update_nid(pool, new_nid);
 }
 
-#endif /* _NET_PAGE_POOL_H */
+#endif /* _NET_PAGE_POOL_HELPERS_H */
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
new file mode 100644 (file)
index 0000000..9ac3919
--- /dev/null
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NET_PAGE_POOL_TYPES_H
+#define _NET_PAGE_POOL_TYPES_H
+
+#include <linux/dma-direction.h>
+#include <linux/ptr_ring.h>
+
+#define PP_FLAG_DMA_MAP                BIT(0) /* Should page_pool do the DMA
+                                       * map/unmap
+                                       */
+#define PP_FLAG_DMA_SYNC_DEV   BIT(1) /* If set all pages that the driver gets
+                                       * from page_pool will be
+                                       * DMA-synced-for-device according to
+                                       * the length provided by the device
+                                       * driver.
+                                       * Please note DMA-sync-for-CPU is still
+                                       * device driver responsibility
+                                       */
+#define PP_FLAG_PAGE_FRAG      BIT(2) /* for page frag feature */
+#define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
+                                PP_FLAG_DMA_SYNC_DEV |\
+                                PP_FLAG_PAGE_FRAG)
+
+/*
+ * Fast allocation side cache array/stack
+ *
+ * The cache size and refill watermark is related to the network
+ * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
+ * ring is usually refilled and the max consumed elements will be 64,
+ * thus a natural max size of objects needed in the cache.
+ *
+ * Keeping room for more objects, is due to XDP_DROP use-case.  As
+ * XDP_DROP allows the opportunity to recycle objects directly into
+ * this array, as it shares the same softirq/NAPI protection.  If
+ * cache is already full (or partly full) then the XDP_DROP recycles
+ * would have to take a slower code path.
+ */
+#define PP_ALLOC_CACHE_SIZE    128
+#define PP_ALLOC_CACHE_REFILL  64
+struct pp_alloc_cache {
+       u32 count;
+       struct page *cache[PP_ALLOC_CACHE_SIZE];
+};
+
+/**
+ * struct page_pool_params - page pool parameters
+ * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
+ * @order:     2^order pages on allocation
+ * @pool_size: size of the ptr_ring
+ * @nid:       NUMA node id to allocate from pages from
+ * @dev:       device, for DMA pre-mapping purposes
+ * @napi:      NAPI which is the sole consumer of pages, otherwise NULL
+ * @dma_dir:   DMA mapping direction
+ * @max_len:   max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
+ * @offset:    DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
+ */
+struct page_pool_params {
+       unsigned int    flags;
+       unsigned int    order;
+       unsigned int    pool_size;
+       int             nid;
+       struct device   *dev;
+       struct napi_struct *napi;
+       enum dma_data_direction dma_dir;
+       unsigned int    max_len;
+       unsigned int    offset;
+/* private: used by test code only */
+       void (*init_callback)(struct page *page, void *arg);
+       void *init_arg;
+};
+
+#ifdef CONFIG_PAGE_POOL_STATS
+/**
+ * struct page_pool_alloc_stats - allocation statistics
+ * @fast:      successful fast path allocations
+ * @slow:      slow path order-0 allocations
+ * @slow_high_order: slow path high order allocations
+ * @empty:     ptr ring is empty, so a slow path allocation was forced
+ * @refill:    an allocation which triggered a refill of the cache
+ * @waive:     pages obtained from the ptr ring that cannot be added to
+ *             the cache due to a NUMA mismatch
+ */
+struct page_pool_alloc_stats {
+       u64 fast;
+       u64 slow;
+       u64 slow_high_order;
+       u64 empty;
+       u64 refill;
+       u64 waive;
+};
+
+/**
+ * struct page_pool_recycle_stats - recycling (freeing) statistics
+ * @cached:    recycling placed page in the page pool cache
+ * @cache_full:        page pool cache was full
+ * @ring:      page placed into the ptr ring
+ * @ring_full: page released from page pool because the ptr ring was full
+ * @released_refcnt:   page released (and not recycled) because refcnt > 1
+ */
+struct page_pool_recycle_stats {
+       u64 cached;
+       u64 cache_full;
+       u64 ring;
+       u64 ring_full;
+       u64 released_refcnt;
+};
+
+/**
+ * struct page_pool_stats - combined page pool use statistics
+ * @alloc_stats:       see struct page_pool_alloc_stats
+ * @recycle_stats:     see struct page_pool_recycle_stats
+ *
+ * Wrapper struct for combining page pool stats with different storage
+ * requirements.
+ */
+struct page_pool_stats {
+       struct page_pool_alloc_stats alloc_stats;
+       struct page_pool_recycle_stats recycle_stats;
+};
+#endif
+
+struct page_pool {
+       struct page_pool_params p;
+
+       struct delayed_work release_dw;
+       void (*disconnect)(void *pool);
+       unsigned long defer_start;
+       unsigned long defer_warn;
+
+       u32 pages_state_hold_cnt;
+       unsigned int frag_offset;
+       struct page *frag_page;
+       long frag_users;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+       /* these stats are incremented while in softirq context */
+       struct page_pool_alloc_stats alloc_stats;
+#endif
+       u32 xdp_mem_id;
+
+       /*
+        * Data structure for allocation side
+        *
+        * Drivers allocation side usually already perform some kind
+        * of resource protection.  Piggyback on this protection, and
+        * require driver to protect allocation side.
+        *
+        * For NIC drivers this means, allocate a page_pool per
+        * RX-queue. As the RX-queue is already protected by
+        * Softirq/BH scheduling and napi_schedule. NAPI schedule
+        * guarantee that a single napi_struct will only be scheduled
+        * on a single CPU (see napi_schedule).
+        */
+       struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
+
+       /* Data structure for storing recycled pages.
+        *
+        * Returning/freeing pages is more complicated synchronization
+        * wise, because free's can happen on remote CPUs, with no
+        * association with allocation resource.
+        *
+        * Use ptr_ring, as it separates consumer and producer
+        * efficiently, it a way that doesn't bounce cache-lines.
+        *
+        * TODO: Implement bulk return pages into this structure.
+        */
+       struct ptr_ring ring;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+       /* recycle stats are per-cpu to avoid locking */
+       struct page_pool_recycle_stats __percpu *recycle_stats;
+#endif
+       atomic_t pages_state_release_cnt;
+
+       /* A page_pool is strictly tied to a single RX-queue being
+        * protected by NAPI, due to above pp_alloc_cache. This
+        * refcnt serves purpose is to simplify drivers error handling.
+        */
+       refcount_t user_cnt;
+
+       u64 destroy_cnt;
+};
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+                                 unsigned int size, gfp_t gfp);
+bool page_pool_return_skb_page(struct page *page, bool napi_safe);
+
+struct page_pool *page_pool_create(const struct page_pool_params *params);
+
+struct xdp_mem_info;
+
+#ifdef CONFIG_PAGE_POOL
+void page_pool_unlink_napi(struct page_pool *pool);
+void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+                          struct xdp_mem_info *mem);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+                            int count);
+#else
+static inline void page_pool_unlink_napi(struct page_pool *pool)
+{
+}
+
+static inline void page_pool_destroy(struct page_pool *pool)
+{
+}
+
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+                                        void (*disconnect)(void *),
+                                        struct xdp_mem_info *mem)
+{
+}
+
+static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+                                          int count)
+{
+}
+#endif
+
+void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
+                                 unsigned int dma_sync_size,
+                                 bool allow_direct);
+
+static inline bool is_page_pool_compiled_in(void)
+{
+#ifdef CONFIG_PAGE_POOL
+       return true;
+#else
+       return false;
+#endif
+}
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+
+#endif /* _NET_PAGE_POOL_H */
index ca534501158b7ee2bc6f4e16921218da11ee21c5..6834356b2d2aea55127d377601f2c56106a01a0d 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/tracepoint.h>
 
 #include <trace/events/mmflags.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 
 TRACE_EVENT(page_pool_release,
 
index 0aac76c13fd4abfcd496f0743bd6e5e3b7394664..57a7a64b84ede57b842c04a4adef34725858d5d2 100644 (file)
@@ -15,7 +15,7 @@
 #include <net/sock.h>
 #include <net/tcp.h>
 #include <net/net_namespace.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <linux/error-injection.h>
 #include <linux/smp.h>
 #include <linux/sock_diag.h>
index 5d615a1697187341f14e0b09510acd72cfe8ca25..cd28c1f14002b24610093d287f0a852ac15d70db 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/xdp.h>
 
 #include <linux/dma-direction.h>
index c6f98245582cd4dd01a7c4f5708163122500a4f0..d3bed964123c9c61b553c6347d3b789bc120436c 100644 (file)
@@ -73,7 +73,7 @@
 #include <net/mpls.h>
 #include <net/mptcp.h>
 #include <net/mctp.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <net/dropreason.h>
 
 #include <linux/uaccess.h>
index 8362130bf085d53d3c5f18bc00763718db8cad5d..a70670fe9a2dc8df69d304f7e2535f32dacf2d79 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/idr.h>
 #include <linux/rhashtable.h>
 #include <linux/bug.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 
 #include <net/xdp.h>
 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */