unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
- hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
phead = skb->data;
if (unlikely(!skb_pull(skb, hlen))) {
ipoib_warn(priv, "linear data too small\n");
return ret;
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
- packet->header_len = skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
+ packet->header_len = skb_inner_tcp_all_headers(skb);
packet->tcp_header_len = inner_tcp_hdrlen(skb);
} else {
- packet->header_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ packet->header_len = skb_tcp_all_headers(skb);
packet->tcp_header_len = tcp_hdrlen(skb);
}
packet->tcp_payload_len = skb->len - packet->header_len;
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
*tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
if (skb_is_gso(skb)) {
if (skb->protocol == htons(ETH_P_IP) ||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
- proto_hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb)) {
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
MAX_TX_BUF_LEN - 1) >>
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
netdev_warn(adapter->netdev,
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (segment) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
ntohs(iph->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->len == hdr_len) {
iph->check = 0;
tcp_hdr(skb)->check =
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (retval) {
/* TSO */
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
mss = skb_shinfo(skb)->gso_size;
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
- proto_hdr_len = (skb_transport_offset(skb) +
- tcp_hdrlen(skb));
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (unlikely(proto_hdr_len > len)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
/* Headers length */
if (xmit_type & XMIT_GSO_ENC)
- hlen = (int)(skb_inner_transport_header(skb) -
- skb->data) +
- inner_tcp_hdrlen(skb);
+ hlen = skb_inner_tcp_all_headers(skb);
else
- hlen = (int)(skb_transport_header(skb) -
- skb->data) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
/* Amount of data (w/o headers) on linear part of SKB*/
first_bd_sz = skb_headlen(skb) - hlen;
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data;
+ return skb_inner_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_inner_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
}
/**
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ return skb_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
+ return skb_transport_offset(skb) + sizeof(struct udphdr);
}
/* set FW indication according to inner or outer protocols if tunneled */
u32 hdr_len;
if (skb->encapsulation)
- hdr_len = skb_inner_network_offset(skb) +
- skb_inner_network_header_len(skb) +
- inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
else
- hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
/* HW/FW can not correctly segment packets that have been
* vlan encapsulated.
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
return -EINVAL;
}
- if (unlikely((gso_size + skb_transport_offset(skb) +
- tcp_hdrlen(skb)) >= skb->len)) {
+ if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) +
- tcp_hdrlen(skb))) {
+ skb_tcp_all_headers(skb))) {
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
return -EINVAL;
}
/* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
/* if this is required, would need to copy to single buffer */
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int sh_len = skb_tcp_all_headers(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
- hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_start = skb_tcp_all_headers(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (cxgb4_is_ktls_skb(skb) &&
- (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
+ (skb->len - skb_tcp_all_headers(skb)))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pktlen = skb_tcp_all_headers(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
return 0;
th = tcp_hdr(nskb);
- skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
skb_tx_timestamp(nskb);
unsigned long flags;
tcp_seq = ntohl(th->seq);
- skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_offset = skb_tcp_all_headers(skb);
skb_data_len = skb->len - skb_offset;
data_len = skb_data_len;
skb_frag_t *frag;
if (skb->encapsulation) {
- hdr_len = skb_inner_transport_header(skb) - skb->data;
- hdr_len += inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
} else {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
}
static int be_gso_hdr_len(struct sk_buff *skb)
{
if (skb->encapsulation)
- return skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
void *bufaddr;
unsigned long dmabuf;
const struct fun_ktls_tx_ctx *tls_ctx;
u32 datalen, seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return skb;
(__force __wsum)htonl(paylen));
/* Compute length of segmentation header. */
- header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ header_len = skb_tcp_all_headers(skb);
break;
default:
return -EINVAL;
*/
static bool gve_can_send_tso(const struct sk_buff *skb)
{
- const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
int cur_seg_size;
#define HNS_BUFFER_SIZE_2048 2048
#define BD_MAX_SEND_SIZE 8191
-#define SKB_TMP_LEN(SKB) \
- (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
int send_sz, dma_addr_t dma, int frag_end,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
}
desc->tx.ip_offset = ip_offset;
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
{
if (!skb->encapsulation)
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_tcp_all_headers(skb);
- return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
}
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
__entry->hdr_len = skb->encapsulation ?
- skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
- skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb);
__entry->ip_summed = skb->ip_summed;
__entry->fraglist = skb_has_frag_list(skb);
hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
* For TSO packets we only copy the headers into the
* immediate area.
*/
- immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ immediate_len = skb_tcp_all_headers(skb);
}
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
case e1000_82544: {
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
* points to just header, pull a few bytes of payload from
* frags into skb->data
*/
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
/* we do this workaround for ES2LAN, but it is un-necessary,
* avoiding it could save a lot of cycles
*/
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
iph = ip_hdr(skb);
iph->tot_len = 0;
u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
int tx_index;
struct tx_desc *desc;
int ret;
static inline void
mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ int hdr_len = skb_tcp_all_headers(skb);
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
if ((txq->count + tso_count_descs(skb)) >= txq->size)
return 0;
- if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
pr_info("*** Is this even possible?\n");
return 0;
}
ext->subdc = NIX_SUBDC_EXT;
if (skb_shinfo(skb)->gso_size) {
ext->lso = 1;
- ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_sb = skb_tcp_all_headers(skb);
ext->lso_mps = skb_shinfo(skb)->gso_size;
/* Only TSOv4 and TSOv6 GSO offloads are supported */
* be correctly modified, hence don't offload such TSO segments.
*/
- payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ payload_len = skb->len - skb_tcp_all_headers(skb);
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
return false;
if (mss != 0) {
if (!(hw->flags & SKY2_HW_NEW_LE))
- mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss += skb_tcp_all_headers(skb);
if (mss != sky2->tx_last_mss) {
le = get_tx_le(sky2, &slot);
*inline_ok = false;
*hopbyhop = 0;
if (skb->encapsulation) {
- *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ *lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
/* Detects large IPV6 TCP packets and prepares for removal of
* HBH header that has been pushed by ip6_xmit(),
*/
if (ipv6_has_hopopt_jumbo(skb))
*hopbyhop = sizeof(struct hop_jumbo_hdr);
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ *lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
int datalen;
u32 seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return true;
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ ihs = skb_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ihs = skb_tcp_all_headers(skb);
if (ipv6_has_hopopt_jumbo(skb)) {
*hopbyhop = sizeof(struct hop_jumbo_hdr);
ihs -= sizeof(struct hop_jumbo_hdr);
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header can be at most 1KB long */
- cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cum_len = -skb_tcp_all_headers(skb);
/* for IPv6 TSO, the checksum offset stores the
* TCP header length, to save the firmware from
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
}
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
}
segs = skb_shinfo(skb)->gso_segs;
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
return skb;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
resync_pending = tls_offload_tx_resync_pending(skb->sk);
if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
return;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
seq = ntohl(tcp_hdr(skb)->seq);
ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
if (skb_is_gso(skb)) {
u32 hdrlen;
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
/* Assume worst case scenario of having longest possible
* metadata prepend - 8B
}
if (encap)
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
+ hdrlen = skb_inner_tcp_all_headers(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss);
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
{
if (is_encap_pkt)
- return (skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data);
- else
- return (skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
}
opcode = QLCNIC_TX_ETHER_PKT;
if (skb_is_gso(skb)) {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->hdr_length = hdr_len;
opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
pskb_trim(skb, pkt_len);
}
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* we only need to do csum */
netif_warn(adpt, tx_err, adpt->netdev,
/* if Large Segment Offload is (in TCP Segmentation Offload struct) */
if (TPD_LSO(tpd)) {
- mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mapped_len = skb_tcp_all_headers(skb);
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
tpbuf->length = mapped_len;
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
hdr = sizeof(struct udphdr);
} else {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
hdr = tcp_hdrlen(skb);
}
if (ret)
return ret;
- pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pkt_info->header_len = skb_tcp_all_headers(skb);
pkt_info->tcp_header_len = tcp_hdrlen(skb);
pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
pkt_info->mss = skb_shinfo(skb)->gso_size;
}
/* Header Length = MAC header len + IP header len + TCP header len*/
- hdrlen = ETH_HLEN +
- (int)skb_network_header_len(skb) +
- tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
switch (gso_type) {
}
mss = skb_shinfo(skb)->gso_size;
- hdrlen = skb_transport_header(skb) -
- skb_mac_header(skb) +
- tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
skb_shinfo(skb)->gso_segs =
DIV_ROUND_UP(skb->len - hdrlen, mss);
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cpu_to_le16(skb_tcp_all_headers(skb));
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb)
return inner_tcp_hdr(skb)->doff * 4;
}
+/**
+ * skb_tcp_all_headers - Returns size of all headers for a TCP packet
+ * @skb: buffer
+ *
+ * Used in TX path, for a packet known to be a TCP one.
+ *
+ * if (skb_is_gso(skb)) {
+ * int hlen = skb_tcp_all_headers(skb);
+ * ...
+ */
+static inline int skb_tcp_all_headers(const struct sk_buff *skb)
+{
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
+}
+
+/**
+ * skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet
+ * @skb: buffer
+ *
+ * Used in TX path, for a packet known to be a TCP one.
+ *
+ * if (skb_is_gso(skb) && skb->encapsulation) {
+ * int hlen = skb_inner_tcp_all_headers(skb);
+ * ...
+ */
+static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb)
+{
+ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+}
+
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
{
return (tcp_hdr(skb)->doff - 5) * 4;
s32 *sync_size,
int *resync_sgs)
{
- int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
int payload_len = skb->len - tcp_payload_offset;
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
struct tls_record_info *record;
struct sk_buff *skb,
s32 sync_size, u64 rcd_sn)
{
- int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
int payload_len = skb->len - tcp_payload_offset;
void *buf, *iv, *aad, *dummy_buf;
struct aead_request *aead_req;
static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
{
- int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int payload_len = skb->len - tcp_payload_offset;