static const char *opt_irq_str = "";
static u32 irq_no;
static int irqs_at_init = -1;
+static u32 sequence;
static int opt_poll;
static int opt_interval = 1;
static int opt_retries = 3;
static unsigned long opt_tx_cycle_ns;
static int opt_schpolicy = SCHED_OTHER;
static int opt_schprio = SCHED_PRI__DEFAULT;
+static bool opt_tstamp;
struct vlan_ethhdr {
unsigned char h_dest[6];
__be16 h_vlan_encapsulated_proto;
};
+#define PKTGEN_MAGIC 0xbe9be955
+struct pktgen_hdr {
+ __be32 pgh_magic;
+ __be32 seq_num;
+ __be32 tv_sec;
+ __be32 tv_usec;
+};
+
struct xsk_ring_stats {
unsigned long rx_npkts;
unsigned long tx_npkts;
#define ETH_HDR_SIZE (opt_vlan_tag ? sizeof(struct vlan_ethhdr) : \
sizeof(struct ethhdr))
+#define PKTGEN_HDR_SIZE (opt_tstamp ? sizeof(struct pktgen_hdr) : 0)
#define PKT_HDR_SIZE (ETH_HDR_SIZE + sizeof(struct iphdr) + \
- sizeof(struct udphdr))
+ sizeof(struct udphdr) + PKTGEN_HDR_SIZE)
+#define PKTGEN_HDR_OFFSET (ETH_HDR_SIZE + sizeof(struct iphdr) + \
+ sizeof(struct udphdr))
+#define PKTGEN_SIZE_MIN (PKTGEN_HDR_OFFSET + sizeof(struct pktgen_hdr) + \
+ ETH_FCS_SIZE)
#define PKT_SIZE (opt_pkt_size - ETH_FCS_SIZE)
#define IP_PKT_SIZE (PKT_SIZE - ETH_HDR_SIZE)
#define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr))
-#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr))
+#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - \
+ (sizeof(struct udphdr) + PKTGEN_HDR_SIZE))
static u8 pkt_data[XSK_UMEM__DEFAULT_FRAME_SIZE];
static void gen_eth_hdr_data(void)
{
+ struct pktgen_hdr *pktgen_hdr;
struct udphdr *udp_hdr;
struct iphdr *ip_hdr;
sizeof(struct iphdr));
ip_hdr = (struct iphdr *)(pkt_data +
sizeof(struct vlan_ethhdr));
-
+ pktgen_hdr = (struct pktgen_hdr *)(pkt_data +
+ sizeof(struct vlan_ethhdr) +
+ sizeof(struct iphdr) +
+ sizeof(struct udphdr));
/* ethernet & VLAN header */
memcpy(veth_hdr->h_dest, &opt_txdmac, ETH_ALEN);
memcpy(veth_hdr->h_source, &opt_txsmac, ETH_ALEN);
sizeof(struct iphdr));
ip_hdr = (struct iphdr *)(pkt_data +
sizeof(struct ethhdr));
-
+ pktgen_hdr = (struct pktgen_hdr *)(pkt_data +
+ sizeof(struct ethhdr) +
+ sizeof(struct iphdr) +
+ sizeof(struct udphdr));
/* ethernet header */
memcpy(eth_hdr->h_dest, &opt_txdmac, ETH_ALEN);
memcpy(eth_hdr->h_source, &opt_txsmac, ETH_ALEN);
udp_hdr->dest = htons(0x1000);
udp_hdr->len = htons(UDP_PKT_SIZE);
+ if (opt_tstamp)
+ pktgen_hdr->pgh_magic = htonl(PKTGEN_MAGIC);
+
/* UDP data */
memset32_htonl(pkt_data + PKT_HDR_SIZE, opt_pkt_fill_pattern,
UDP_PKT_DATA_SIZE);
{"tx-dmac", required_argument, 0, 'G'},
{"tx-smac", required_argument, 0, 'H'},
{"tx-cycle", required_argument, 0, 'T'},
+ {"tstamp", no_argument, 0, 'y'},
{"policy", required_argument, 0, 'W'},
{"schpri", required_argument, 0, 'U'},
{"extra-stats", no_argument, 0, 'x'},
" -G, --tx-dmac=<MAC> Dest MAC addr of TX frame in aa:bb:cc:dd:ee:ff format (For -V|--tx-vlan)\n"
" -H, --tx-smac=<MAC> Src MAC addr of TX frame in aa:bb:cc:dd:ee:ff format (For -V|--tx-vlan)\n"
" -T, --tx-cycle=n Tx cycle time in micro-seconds (For -t|--txonly).\n"
+ " -y, --tstamp Add time-stamp to packet (For -t|--txonly).\n"
" -W, --policy=POLICY Schedule policy. Default: SCHED_OTHER\n"
" -U, --schpri=n Schedule priority. Default: %d\n"
" -x, --extra-stats Display extra statistics.\n"
for (;;) {
c = getopt_long(argc, argv,
- "Frtli:q:pSNn:w:O:czf:muMd:b:C:s:P:VJ:K:G:H:T:W:U:xQaI:BR",
+ "Frtli:q:pSNn:w:O:czf:muMd:b:C:s:P:VJ:K:G:H:T:yW:U:xQaI:BR",
long_options, &option_index);
if (c == -1)
break;
opt_tx_cycle_ns = atoi(optarg);
opt_tx_cycle_ns *= NSEC_PER_USEC;
break;
+ case 'y':
+ opt_tstamp = 1;
+ break;
case 'W':
if (get_schpolicy(&opt_schpolicy, optarg)) {
fprintf(stderr,
}
}
-static int tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
+static int tx_only(struct xsk_socket_info *xsk, u32 *frame_nb,
+ int batch_size, unsigned long tx_ns)
{
- u32 idx;
+ u32 idx, tv_sec, tv_usec;
unsigned int i;
while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) <
return 0;
}
+ if (opt_tstamp) {
+ tv_sec = (u32)(tx_ns / NSEC_PER_SEC);
+ tv_usec = (u32)((tx_ns % NSEC_PER_SEC) / 1000);
+ }
+
for (i = 0; i < batch_size; i++) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
idx + i);
tx_desc->addr = (*frame_nb + i) * opt_xsk_frame_size;
tx_desc->len = PKT_SIZE;
+
+ if (opt_tstamp) {
+ struct pktgen_hdr *pktgen_hdr;
+ u64 addr = tx_desc->addr;
+ char *pkt;
+
+ pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
+ pktgen_hdr = (struct pktgen_hdr *)(pkt + PKTGEN_HDR_OFFSET);
+
+ pktgen_hdr->seq_num = htonl(sequence++);
+ pktgen_hdr->tv_sec = htonl(tv_sec);
+ pktgen_hdr->tv_usec = htonl(tv_usec);
+
+ hex_dump(pkt, PKT_SIZE, addr);
+ }
}
xsk_ring_prod__submit(&xsk->tx, batch_size);
while ((opt_pkt_count && pkt_cnt < opt_pkt_count) || !opt_pkt_count) {
int batch_size = get_batch_size(pkt_cnt);
+ unsigned long tx_ns = 0;
struct timespec next;
int tx_cnt = 0;
long diff;
}
/* Measure periodic Tx scheduling variance */
- diff = get_nsecs() - next_tx_ns;
+ tx_ns = get_nsecs();
+ diff = tx_ns - next_tx_ns;
if (diff < tx_cycle_diff_min)
tx_cycle_diff_min = diff;
tx_cycle_diff_ave += (double)diff;
tx_cycle_cnt++;
+ } else if (opt_tstamp) {
+ tx_ns = get_nsecs();
}
for (i = 0; i < num_socks; i++)
- tx_cnt += tx_only(xsks[i], &frame_nb[i], batch_size);
+ tx_cnt += tx_only(xsks[i], &frame_nb[i], batch_size, tx_ns);
pkt_cnt += tx_cnt;
apply_setsockopt(xsks[i]);
if (opt_bench == BENCH_TXONLY) {
+ if (opt_tstamp && opt_pkt_size < PKTGEN_SIZE_MIN)
+ opt_pkt_size = PKTGEN_SIZE_MIN;
+
gen_eth_hdr_data();
for (i = 0; i < NUM_FRAMES; i++)