]> git.itanic.dy.fi Git - linux-stable/commitdiff
xsk: Inherit need_wakeup flag for shared sockets
authorJalal Mostafa <jalal.a.mostapha@gmail.com>
Wed, 21 Sep 2022 13:57:01 +0000 (13:57 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 Oct 2022 07:51:23 +0000 (09:51 +0200)
commit 60240bc26114543fcbfcd8a28466e67e77b20388 upstream.

The flag for need_wakeup is not set for xsks with `XDP_SHARED_UMEM`
flag and of different queue ids and/or devices. They should inherit
the flag from the first socket buffer pool since no flags can be
specified once `XDP_SHARED_UMEM` is specified.

Fixes: b5aea28dca134 ("xsk: Add shared umem support between queue ids")
Signed-off-by: Jalal Mostafa <jalal.a.mostapha@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20220921135701.10199-1-jalal.a.mostapha@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/net/xsk_buff_pool.h
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c

index 647722e847b4131bd3cde64e15b2a834ef04a664..f787c3f524b0358d9394d93bdd1c1521f9d8ee35 100644 (file)
@@ -95,7 +95,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
                                                struct xdp_umem *umem);
 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
                  u16 queue_id, u16 flags);
-int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
                         struct net_device *dev, u16 queue_id);
 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
 void xp_destroy(struct xsk_buff_pool *pool);
index 09002387987ea7cf22c18889e92fd62a75e90f9e..7e311420aab9fc2bf2c2685a91d50713d8fc52ee 100644 (file)
@@ -951,8 +951,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
                                goto out_unlock;
                        }
 
-                       err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
-                                                  dev, qid);
+                       err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
+                                                  qid);
                        if (err) {
                                xp_destroy(xs->pool);
                                xs->pool = NULL;
index a71a8c6edf55342f49aabb7b4e2c6e7609abe886..ed6c71826d31f9212cc1ca3eb65afa58e69e4759 100644 (file)
@@ -212,17 +212,18 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
        return err;
 }
 
-int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
                         struct net_device *dev, u16 queue_id)
 {
        u16 flags;
+       struct xdp_umem *umem = umem_xs->umem;
 
        /* One fill and completion ring required for each queue id. */
        if (!pool->fq || !pool->cq)
                return -EINVAL;
 
        flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
-       if (pool->uses_need_wakeup)
+       if (umem_xs->pool->uses_need_wakeup)
                flags |= XDP_USE_NEED_WAKEUP;
 
        return xp_assign_dev(pool, dev, queue_id, flags);