]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net_sched: fix an OOB access in cls_tcindex
authorCong Wang <xiyou.wangcong@gmail.com>
Mon, 3 Feb 2020 05:14:35 +0000 (21:14 -0800)
committerDavid S. Miller <davem@davemloft.net>
Tue, 4 Feb 2020 10:41:36 +0000 (11:41 +0100)
As Eric noticed, tcindex_alloc_perfect_hash() uses cp->hash
to compute the size of memory allocation, but cp->hash is
set again after the allocation, this caused an out-of-bound
access.

So we have to move all cp->hash initialization and computation
before the memory allocation. Move cp->mask and cp->shift together
as cp->hash may need them for computation too.

Reported-and-tested-by: syzbot+35d4dea36c387813ed31@syzkaller.appspotmail.com
Fixes: a4b67f07dc6f ("net: sched: RCU cls_tcindex")
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Jiri Pirko <jiri@resnulli.us>
Cc: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/cls_tcindex.c

index 3d4a1280352f35f404ef88cf73491552b7feffdd..0323aee03de7efbb99c7943be078765c74dfdf2e 100644 (file)
@@ -333,12 +333,31 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        cp->fall_through = p->fall_through;
        cp->tp = tp;
 
+       if (tb[TCA_TCINDEX_HASH])
+               cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
+
+       if (tb[TCA_TCINDEX_MASK])
+               cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
+
+       if (tb[TCA_TCINDEX_SHIFT])
+               cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
+
+       if (!cp->hash) {
+               /* Hash not specified, use perfect hash if the upper limit
+                * of the hashing index is below the threshold.
+                */
+               if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
+                       cp->hash = (cp->mask >> cp->shift) + 1;
+               else
+                       cp->hash = DEFAULT_HASH_SIZE;
+       }
+
        if (p->perfect) {
                int i;
 
                if (tcindex_alloc_perfect_hash(net, cp) < 0)
                        goto errout;
-               for (i = 0; i < cp->hash; i++)
+               for (i = 0; i < min(cp->hash, p->hash); i++)
                        cp->perfect[i].res = p->perfect[i].res;
                balloc = 1;
        }
@@ -350,15 +369,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        if (old_r)
                cr = r->res;
 
-       if (tb[TCA_TCINDEX_HASH])
-               cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
-
-       if (tb[TCA_TCINDEX_MASK])
-               cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
-
-       if (tb[TCA_TCINDEX_SHIFT])
-               cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
        err = -EBUSY;
 
        /* Hash already allocated, make sure that we still meet the
@@ -376,16 +386,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        if (tb[TCA_TCINDEX_FALL_THROUGH])
                cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
 
-       if (!cp->hash) {
-               /* Hash not specified, use perfect hash if the upper limit
-                * of the hashing index is below the threshold.
-                */
-               if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
-                       cp->hash = (cp->mask >> cp->shift) + 1;
-               else
-                       cp->hash = DEFAULT_HASH_SIZE;
-       }
-
        if (!cp->perfect && !cp->h)
                cp->alloc_hash = cp->hash;