__entry->txq_state, __entry->packets, __entry->skbaddr )
);
+TRACE_EVENT(qdisc_enqueue,
+
+ TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb),
+
+ TP_ARGS(qdisc, txq, skb),
+
+ TP_STRUCT__entry(
+ __field(struct Qdisc *, qdisc)
+ __field(void *, skbaddr)
+ __field(int, ifindex)
+ __field(u32, handle)
+ __field(u32, parent)
+ ),
+
+ TP_fast_assign(
+ __entry->qdisc = qdisc;
+ __entry->skbaddr = skb;
+ __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
+ __entry->handle = qdisc->handle;
+ __entry->parent = qdisc->parent;
+ ),
+
+ TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%px",
+ __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr)
+);
+
TRACE_EVENT(qdisc_reset,
TP_PROTO(struct Qdisc *q),
#include <trace/events/napi.h>
#include <trace/events/net.h>
#include <trace/events/skb.h>
+#include <trace/events/qdisc.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
#include <linux/static_key.h>
}
}
+static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
+ struct sk_buff **to_free,
+ struct netdev_queue *txq)
+{
+ int rc;
+
+ rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
+ if (rc == NET_XMIT_SUCCESS)
+ trace_qdisc_enqueue(q, txq, skb);
+ return rc;
+}
+
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
* of q->seqlock to protect from racing with requeuing.
*/
if (unlikely(!nolock_qdisc_is_empty(q))) {
- rc = q->enqueue(skb, q, &to_free) &
- NET_XMIT_MASK;
+ rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
__qdisc_run(q);
qdisc_run_end(q);
return NET_XMIT_SUCCESS;
}
- rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
qdisc_run(q);
no_lock_out:
qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
- rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);