]> git.baikalelectronics.ru Git - kernel.git/commitdiff
tcp: sk_forced_mem_schedule() optimization
authorEric Dumazet <edumazet@google.com>
Sat, 11 Jun 2022 03:30:16 +0000 (20:30 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 13 Jun 2022 12:35:25 +0000 (13:35 +0100)
sk_memory_allocated_add() has three callers, and returns
to them @memory_allocated.

sk_forced_mem_schedule() is one of them, and ignores
the returned value.

Change sk_memory_allocated_add() to return void.

Change sock_reserve_memory() and __sk_mem_raise_allocated()
to call sk_memory_allocated().

This removes one cache line miss [1] for RPC workloads,
as first skbs in TCP write queue and receive queue go through
sk_forced_mem_schedule().

[1] Cache line holding tcp_memory_allocated.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sock.h
net/core/sock.c

index 0063e8410a4e3ed91aef9cf34eb1127f7ce33b93..304a5e39d41e27105148058066e8fa23490cf9fa 100644 (file)
@@ -1412,7 +1412,7 @@ sk_memory_allocated(const struct sock *sk)
 /* 1 MB per cpu, in page units */
 #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
 
-static inline long
+static inline void
 sk_memory_allocated_add(struct sock *sk, int amt)
 {
        int local_reserve;
@@ -1424,7 +1424,6 @@ sk_memory_allocated_add(struct sock *sk, int amt)
                atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
        }
        preempt_enable();
-       return sk_memory_allocated(sk);
 }
 
 static inline void
index 697d5c8e2f0def49351a7d38ec59ab5e875d3b10..92a0296ccb1842f11fb8dd4b2f768885d05daa8f 100644 (file)
@@ -1019,7 +1019,8 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
                return -ENOMEM;
 
        /* pre-charge to forward_alloc */
-       allocated = sk_memory_allocated_add(sk, pages);
+       sk_memory_allocated_add(sk, pages);
+       allocated = sk_memory_allocated(sk);
        /* If the system goes into memory pressure with this
         * precharge, give up and return error.
         */
@@ -2906,11 +2907,13 @@ EXPORT_SYMBOL(sk_wait_data);
  */
 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
 {
-       struct proto *prot = sk->sk_prot;
-       long allocated = sk_memory_allocated_add(sk, amt);
        bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
+       struct proto *prot = sk->sk_prot;
        bool charged = true;
+       long allocated;
 
+       sk_memory_allocated_add(sk, amt);
+       allocated = sk_memory_allocated(sk);
        if (memcg_charge &&
            !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
                                                gfp_memcg_charge())))