]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net_sched: sch_fq: properly set sk->sk_pacing_status
authorEric Dumazet <edumazet@google.com>
Mon, 23 Dec 2019 19:13:24 +0000 (11:13 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 26 Dec 2019 23:35:09 +0000 (15:35 -0800)
If fq_classify() recycles a struct fq_flow because
a socket structure has been reallocated, we do not
set sk->sk_pacing_status immediately, but later if the
flow becomes detached.

This means that any flow requiring pacing (BBR, or SO_MAX_PACING_RATE)
might fallback to TCP internal pacing, which requires a per-socket
high resolution timer, and therefore more cpu cycles.

Fixes: c1fd2cc7e149 ("tcp: internal implementation for pacing")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Soheil Hassas Yeganeh <soheil@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/sch_fq.c

index b1c7e726ce5d1ae139f765c5b92dfdaea9bee258..ff4c5e9d0d7778d86f20f4bd67cc627eed0713d9 100644 (file)
@@ -301,6 +301,9 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
                                     f->socket_hash != sk->sk_hash)) {
                                f->credit = q->initial_quantum;
                                f->socket_hash = sk->sk_hash;
+                               if (q->rate_enable)
+                                       smp_store_release(&sk->sk_pacing_status,
+                                                         SK_PACING_FQ);
                                if (fq_flow_is_throttled(f))
                                        fq_flow_unset_throttled(q, f);
                                f->time_next_packet = 0ULL;
@@ -322,8 +325,12 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 
        fq_flow_set_detached(f);
        f->sk = sk;
-       if (skb->sk == sk)
+       if (skb->sk == sk) {
                f->socket_hash = sk->sk_hash;
+               if (q->rate_enable)
+                       smp_store_release(&sk->sk_pacing_status,
+                                         SK_PACING_FQ);
+       }
        f->credit = q->initial_quantum;
 
        rb_link_node(&f->fq_node, parent, p);
@@ -428,17 +435,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        f->qlen++;
        qdisc_qstats_backlog_inc(sch, skb);
        if (fq_flow_is_detached(f)) {
-               struct sock *sk = skb->sk;
-
                fq_flow_add_tail(&q->new_flows, f);
                if (time_after(jiffies, f->age + q->flow_refill_delay))
                        f->credit = max_t(u32, f->credit, q->quantum);
-               if (sk && q->rate_enable) {
-                       if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
-                                    SK_PACING_FQ))
-                               smp_store_release(&sk->sk_pacing_status,
-                                                 SK_PACING_FQ);
-               }
                q->inactive_flows--;
        }