]> git.baikalelectronics.ru Git - kernel.git/commitdiff
netfilter: nf_queue: handle socket prefetch
authorFlorian Westphal <fw@strlen.de>
Mon, 28 Feb 2022 23:46:19 +0000 (00:46 +0100)
committerFlorian Westphal <fw@strlen.de>
Tue, 1 Mar 2022 10:51:15 +0000 (11:51 +0100)
In case someone combines bpf socket assign and nf_queue, then we will
queue an skb who references a struct sock that did not have its
reference count incremented.

As we leave rcu protection, there is no guarantee that skb->sk is still
valid.

For refcount-less skb->sk case, try to increment the reference count
and then override the destructor.

In case of failure we have two choices: orphan the skb and 'delete'
preselect or let nf_queue() drop the packet.

Do the latter, it should not happen during normal operation.

Fixes: cf7fbe660f2d ("bpf: Add socket assign support")
Acked-by: Joe Stringer <joe@cilium.io>
Signed-off-by: Florian Westphal <fw@strlen.de>
net/netfilter/nf_queue.c

index e39549c559455d9cc5dc6253e47526427d3f4728..63d1516816b1fdaa570288c725cc5f721cde694d 100644 (file)
@@ -180,6 +180,18 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
                break;
        }
 
+       if (skb_sk_is_prefetched(skb)) {
+               struct sock *sk = skb->sk;
+
+               if (!sk_is_refcounted(sk)) {
+                       if (!refcount_inc_not_zero(&sk->sk_refcnt))
+                               return -ENOTCONN;
+
+                       /* drop refcount on skb_orphan */
+                       skb->destructor = sock_edemux;
+               }
+       }
+
        entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
        if (!entry)
                return -ENOMEM;