]> git.itanic.dy.fi Git - linux-stable/commitdiff
tcp: Fix NEW_SYN_RECV handling in inet_twsk_purge()
authorEric Dumazet <edumazet@google.com>
Fri, 8 Mar 2024 20:01:21 +0000 (12:01 -0800)
committerSasha Levin <sashal@kernel.org>
Tue, 26 Mar 2024 22:17:31 +0000 (18:17 -0400)
[ Upstream commit 1c4e97dd2d3c9a3e84f7e26346aa39bc426d3249 ]

inet_twsk_purge() uses rcu to find TIME_WAIT and NEW_SYN_RECV
objects to purge.

These objects use SLAB_TYPESAFE_BY_RCU semantic and need special
care. We need to use refcount_inc_not_zero(&sk->sk_refcnt).

Reuse the existing correct logic I wrote for TIME_WAIT,
because both structures have common locations for
sk_state, sk_family, and netns pointer.

If after the refcount_inc_not_zero() the object fields longer match
the keys, use sock_gen_put(sk) to release the refcount.

Then we can call inet_twsk_deschedule_put() for TIME_WAIT,
inet_csk_reqsk_queue_drop_and_put() for NEW_SYN_RECV sockets,
with BH disabled.

Then we need to restart the loop because we had drop rcu_read_lock().

Fixes: 740ea3c4a0b2 ("tcp: Clean up kernel listener's reqsk in inet_twsk_purge()")
Link: https://lore.kernel.org/netdev/CANn89iLvFuuihCtt9PME2uS1WJATnf5fKjDToa1WzVnRzHnPfg@mail.gmail.com/T/#u
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20240308200122.64357-2-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/ipv4/inet_timewait_sock.c

index 5befa4de5b2416281ad2795713a70d0fd847b0b2..e8de45d34d56a507a4bdcceaccbd5707692b6c0a 100644 (file)
@@ -263,12 +263,12 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
 
+/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
 void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
 {
-       struct inet_timewait_sock *tw;
-       struct sock *sk;
        struct hlist_nulls_node *node;
        unsigned int slot;
+       struct sock *sk;
 
        for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
                struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
@@ -277,38 +277,35 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
                rcu_read_lock();
 restart:
                sk_nulls_for_each_rcu(sk, node, &head->chain) {
-                       if (sk->sk_state != TCP_TIME_WAIT) {
-                               /* A kernel listener socket might not hold refcnt for net,
-                                * so reqsk_timer_handler() could be fired after net is
-                                * freed.  Userspace listener and reqsk never exist here.
-                                */
-                               if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
-                                            hashinfo->pernet)) {
-                                       struct request_sock *req = inet_reqsk(sk);
-
-                                       inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
-                               }
+                       int state = inet_sk_state_load(sk);
 
+                       if ((1 << state) & ~(TCPF_TIME_WAIT |
+                                            TCPF_NEW_SYN_RECV))
                                continue;
-                       }
 
-                       tw = inet_twsk(sk);
-                       if ((tw->tw_family != family) ||
-                               refcount_read(&twsk_net(tw)->ns.count))
+                       if (sk->sk_family != family ||
+                           refcount_read(&sock_net(sk)->ns.count))
                                continue;
 
-                       if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
+                       if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
                                continue;
 
-                       if (unlikely((tw->tw_family != family) ||
-                                    refcount_read(&twsk_net(tw)->ns.count))) {
-                               inet_twsk_put(tw);
+                       if (unlikely(sk->sk_family != family ||
+                                    refcount_read(&sock_net(sk)->ns.count))) {
+                               sock_gen_put(sk);
                                goto restart;
                        }
 
                        rcu_read_unlock();
                        local_bh_disable();
-                       inet_twsk_deschedule_put(tw);
+                       if (state == TCP_TIME_WAIT) {
+                               inet_twsk_deschedule_put(inet_twsk(sk));
+                       } else {
+                               struct request_sock *req = inet_reqsk(sk);
+
+                               inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+                                                                 req);
+                       }
                        local_bh_enable();
                        goto restart_rcu;
                }