]> git.baikalelectronics.ru Git - kernel.git/commitdiff
netfilter: nat: include zone id in nat table hash again
authorFlorian Westphal <fw@strlen.de>
Wed, 8 Sep 2021 12:28:37 +0000 (14:28 +0200)
committerPablo Neira Ayuso <pablo@netfilter.org>
Tue, 21 Sep 2021 01:46:55 +0000 (03:46 +0200)
Similar to the conntrack change, also use the zone id for the nat source
lists if the zone id is valid in both directions.

Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
net/netfilter/nf_nat_core.c

index 7008961f5cb08b828530bcc23601694362d5a57c..273117683922858eca82f7192f022533422de590 100644 (file)
@@ -150,13 +150,16 @@ static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static unsigned int
-hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net,
+           const struct nf_conntrack_zone *zone,
+           const struct nf_conntrack_tuple *tuple)
 {
        unsigned int hash;
        struct {
                struct nf_conntrack_man src;
                u32 net_mix;
                u32 protonum;
+               u32 zone;
        } __aligned(SIPHASH_ALIGNMENT) combined;
 
        get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
@@ -165,9 +168,13 @@ hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
 
        /* Original src, to ensure we map it consistently if poss. */
        combined.src = tuple->src;
-       combined.net_mix = net_hash_mix(n);
+       combined.net_mix = net_hash_mix(net);
        combined.protonum = tuple->dst.protonum;
 
+       /* Zone ID can be used provided its valid for both directions */
+       if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)
+               combined.zone = zone->id;
+
        hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);
 
        return reciprocal_scale(hash, nf_nat_htable_size);
@@ -272,7 +279,7 @@ find_appropriate_src(struct net *net,
                     struct nf_conntrack_tuple *result,
                     const struct nf_nat_range2 *range)
 {
-       unsigned int h = hash_by_src(net, tuple);
+       unsigned int h = hash_by_src(net, zone, tuple);
        const struct nf_conn *ct;
 
        hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
@@ -619,7 +626,7 @@ nf_nat_setup_info(struct nf_conn *ct,
                unsigned int srchash;
                spinlock_t *lock;
 
-               srchash = hash_by_src(net,
+               srchash = hash_by_src(net, nf_ct_zone(ct),
                                      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
                spin_lock_bh(lock);
@@ -788,7 +795,7 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
 {
        unsigned int h;
 
-       h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+       h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
        spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
        hlist_del_rcu(&ct->nat_bysource);
        spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);