struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += len;
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, len);
u64_stats_update_end(&stats->syncp);
}
}
struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
u64_stats_update_end(&stats->syncp);
}
skb->protocol = eth_type_trans (skb, dev->net);
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
- stats64->rx_packets++;
- stats64->rx_bytes += skb->len;
+ u64_stats_inc(&stats64->rx_packets);
+ u64_stats_add(&stats64->rx_bytes, skb->len);
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
- stats64->tx_packets += entry->packets;
- stats64->tx_bytes += entry->length;
+ u64_stats_add(&stats64->tx_packets, entry->packets);
+ u64_stats_add(&stats64->tx_bytes, entry->length);
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
} else {
dev->net->stats.tx_errors++;
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tx_packets++;
- tx_stats->tx_bytes += len;
+ u64_stats_inc(&tx_stats->tx_packets);
+ u64_stats_add(&tx_stats->tx_bytes, len);
u64_stats_update_end(&tx_stats->syncp);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->rx_packets++;
- rx_stats->rx_bytes += len;
+ u64_stats_inc(&rx_stats->rx_packets);
+ u64_stats_add(&rx_stats->rx_bytes, len);
u64_stats_update_end(&rx_stats->syncp);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
} __aligned(4 * sizeof(u64));
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->rx_bytes += len;
- tstats->rx_packets++;
+ u64_stats_add(&tstats->rx_bytes, len);
+ u64_stats_inc(&tstats->rx_packets);
u64_stats_update_end(&tstats->syncp);
}
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += len;
- tstats->tx_packets += packets;
+ u64_stats_add(&tstats->tx_bytes, len);
+ u64_stats_add(&tstats->tx_packets, packets);
u64_stats_update_end(&tstats->syncp);
}
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += pkt_len;
- tstats->tx_packets++;
+ u64_stats_add(&tstats->tx_bytes, pkt_len);
+ u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
} else {
if (v->vid == pvid)
vxi.flags |= BRIDGE_VLAN_INFO_PVID;
br_vlan_get_stats(v, &stats);
- vxi.rx_bytes = stats.rx_bytes;
- vxi.rx_packets = stats.rx_packets;
- vxi.tx_bytes = stats.tx_bytes;
- vxi.tx_packets = stats.tx_packets;
+ vxi.rx_bytes = u64_stats_read(&stats.rx_bytes);
+ vxi.rx_packets = u64_stats_read(&stats.rx_packets);
+ vxi.tx_bytes = u64_stats_read(&stats.tx_bytes);
+ vxi.tx_packets = u64_stats_read(&stats.tx_packets);
if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
goto nla_put_failure;
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
+ u64_stats_add(&stats->tx_bytes, skb->len);
+ u64_stats_inc(&stats->tx_packets);
u64_stats_update_end(&stats->syncp);
}
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_bytes += skb->len;
- stats->rx_packets++;
+ u64_stats_add(&stats->rx_bytes, skb->len);
+ u64_stats_inc(&stats->rx_packets);
u64_stats_update_end(&stats->syncp);
}
cpu_stats = per_cpu_ptr(v->stats, i);
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- rxpackets = cpu_stats->rx_packets;
- rxbytes = cpu_stats->rx_bytes;
- txbytes = cpu_stats->tx_bytes;
- txpackets = cpu_stats->tx_packets;
+ rxpackets = u64_stats_read(&cpu_stats->rx_packets);
+ rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
+ txbytes = u64_stats_read(&cpu_stats->tx_bytes);
+ txpackets = u64_stats_read(&cpu_stats->tx_packets);
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
- stats->rx_packets += rxpackets;
- stats->rx_bytes += rxbytes;
- stats->tx_bytes += txbytes;
- stats->tx_packets += txpackets;
+ u64_stats_add(&stats->rx_packets, rxpackets);
+ u64_stats_add(&stats->rx_bytes, rxbytes);
+ u64_stats_add(&stats->tx_bytes, txbytes);
+ u64_stats_add(&stats->tx_packets, txpackets);
}
}
return false;
br_vlan_get_stats(v, &stats);
- if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES,
+ u64_stats_read(&stats.rx_bytes),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
- stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
- nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
+ u64_stats_read(&stats.rx_packets),
+ BRIDGE_VLANDB_STATS_PAD) ||
+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES,
+ u64_stats_read(&stats.tx_bytes),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
- stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
+ u64_stats_read(&stats.tx_packets),
+ BRIDGE_VLANDB_STATS_PAD))
goto out_err;
nla_nest_end(skb, nest);
int cpu;
for_each_possible_cpu(cpu) {
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
const struct pcpu_sw_netstats *stats;
- struct pcpu_sw_netstats tmp;
unsigned int start;
stats = per_cpu_ptr(netstats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
- tmp.rx_packets = stats->rx_packets;
- tmp.rx_bytes = stats->rx_bytes;
- tmp.tx_packets = stats->tx_packets;
- tmp.tx_bytes = stats->tx_bytes;
+ rx_packets = u64_stats_read(&stats->rx_packets);
+ rx_bytes = u64_stats_read(&stats->rx_bytes);
+ tx_packets = u64_stats_read(&stats->tx_packets);
+ tx_bytes = u64_stats_read(&stats->tx_bytes);
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- s->rx_packets += tmp.rx_packets;
- s->rx_bytes += tmp.rx_bytes;
- s->tx_packets += tmp.tx_packets;
- s->tx_bytes += tmp.tx_bytes;
+ s->rx_packets += rx_packets;
+ s->rx_bytes += rx_bytes;
+ s->tx_packets += tx_packets;
+ s->tx_bytes += tx_bytes;
}
}
EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
s = per_cpu_ptr(dev->tstats, i);
do {
start = u64_stats_fetch_begin_irq(&s->syncp);
- tx_packets = s->tx_packets;
- tx_bytes = s->tx_bytes;
- rx_packets = s->rx_packets;
- rx_bytes = s->rx_bytes;
+ tx_packets = u64_stats_read(&s->tx_packets);
+ tx_bytes = u64_stats_read(&s->tx_bytes);
+ rx_packets = u64_stats_read(&s->rx_packets);
+ rx_bytes = u64_stats_read(&s->rx_bytes);
} while (u64_stats_fetch_retry_irq(&s->syncp, start));
data[0] += tx_packets;
data[1] += tx_bytes;