]> git.itanic.dy.fi Git - linux-stable/commitdiff
xsk: Inherit need_wakeup flag for shared sockets
authorJalal Mostafa <jalal.a.mostapha@gmail.com>
Wed, 21 Sep 2022 13:57:01 +0000 (13:57 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 Oct 2022 07:53:26 +0000 (09:53 +0200)
commit 60240bc26114543fcbfcd8a28466e67e77b20388 upstream.

The flag for need_wakeup is not set for xsks with `XDP_SHARED_UMEM`
flag and of different queue ids and/or devices. They should inherit
the flag from the first socket buffer pool since no flags can be
specified once `XDP_SHARED_UMEM` is specified.

Fixes: b5aea28dca134 ("xsk: Add shared umem support between queue ids")
Signed-off-by: Jalal Mostafa <jalal.a.mostapha@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20220921135701.10199-1-jalal.a.mostapha@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/net/xsk_buff_pool.h
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c

index f9869d9ce57dade681c9a4efb7bf4ad6312c1157..7517f4faf6b32f7a8028b83a0c923d3c715ada5a 100644 (file)
@@ -87,7 +87,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
                                                struct xdp_umem *umem);
 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
                  u16 queue_id, u16 flags);
-int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
                         struct net_device *dev, u16 queue_id);
 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
 void xp_destroy(struct xsk_buff_pool *pool);
index 9b55ca27cccf2a6954d03484cb23ebf91e4b3834..10c302f9c6d782101364a025d701f17c09290acd 100644 (file)
@@ -968,8 +968,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
                                goto out_unlock;
                        }
 
-                       err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
-                                                  dev, qid);
+                       err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
+                                                  qid);
                        if (err) {
                                xp_destroy(xs->pool);
                                xs->pool = NULL;
index ccedbbd27692e66454dbfafc88c63fb5a79d55a6..2aa559f1c185628e75989ca363c78d5e7ec3fcea 100644 (file)
@@ -206,17 +206,18 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
        return err;
 }
 
-int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
                         struct net_device *dev, u16 queue_id)
 {
        u16 flags;
+       struct xdp_umem *umem = umem_xs->umem;
 
        /* One fill and completion ring required for each queue id. */
        if (!pool->fq || !pool->cq)
                return -EINVAL;
 
        flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
-       if (pool->uses_need_wakeup)
+       if (umem_xs->pool->uses_need_wakeup)
                flags |= XDP_USE_NEED_WAKEUP;
 
        return xp_assign_dev(pool, dev, queue_id, flags);