]> git.itanic.dy.fi Git - linux-stable/commitdiff
socket, bpf: fix possible use after free
authorEric Dumazet <edumazet@google.com>
Mon, 2 Oct 2017 19:20:51 +0000 (12:20 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 Oct 2017 09:51:23 +0000 (11:51 +0200)
[ Upstream commit eefca20eb20c66b06cf5ed09b49b1a7caaa27b7b ]

Starting from linux-4.4, 3WHS no longer takes the listener lock.

Since this time, we might hit a use-after-free in sk_filter_charge(),
if the filter we got in the memcpy() of the listener content
just happened to be replaced by a thread changing listener BPF filter.

To fix this, we need to make sure the filter refcount is not already
zero before incrementing it again.

Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
net/core/filter.c
net/core/sock.c

index 4eb4ce0aeef4929d56bb876281e205ab055fb2fe..bfeedbbde2143fd5ba9b2ffe202ff995138bf515 100644 (file)
@@ -937,20 +937,31 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 /* try to charge the socket memory if there is space available
  * return true on success
  */
-bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
        u32 filter_size = bpf_prog_size(fp->prog->len);
 
        /* same check as in sock_kmalloc() */
        if (filter_size <= sysctl_optmem_max &&
            atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
-               atomic_inc(&fp->refcnt);
                atomic_add(filter_size, &sk->sk_omem_alloc);
                return true;
        }
        return false;
 }
 
+bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+{
+       if (!atomic_inc_not_zero(&fp->refcnt))
+               return false;
+
+       if (!__sk_filter_charge(sk, fp)) {
+               sk_filter_release(fp);
+               return false;
+       }
+       return true;
+}
+
 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
 {
        struct sock_filter *old_prog;
index 231c38d91855bcf52c0a709b2ebec695d846dc1c..2a77cc50f021c2cccc47005b3fa32fd783e49164 100644 (file)
@@ -1528,13 +1528,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                sock_reset_flag(newsk, SOCK_DONE);
                skb_queue_head_init(&newsk->sk_error_queue);
 
-               filter = rcu_dereference_protected(newsk->sk_filter, 1);
+               rcu_read_lock();
+               filter = rcu_dereference(sk->sk_filter);
                if (filter != NULL)
                        /* though it's an empty new sock, the charging may fail
                         * if sysctl_optmem_max was changed between creation of
                         * original socket and cloning
                         */
                        is_charged = sk_filter_charge(newsk, filter);
+               RCU_INIT_POINTER(newsk->sk_filter, filter);
+               rcu_read_unlock();
 
                if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
                        /* We need to make sure that we don't uncharge the new