u32 send_buf_index;
u32 total_data_buflen;
- u32 pad1;
-
-
- u64 send_completion_tid;
};
struct netvsc_device_info {
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
- struct hv_page_buffer **page_buffer);
+ struct hv_page_buffer **page_buffer,
+ struct sk_buff *skb);
void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_message *resp);
-void netvsc_xmit_completion(void *context);
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet,
void **data,
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev;
u32 send_index;
+ struct sk_buff *skb;
ndev = net_device->ndev;
int queue_sends;
/* Get the send context */
- nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
- packet->trans_id;
+ skb = (struct sk_buff *)(unsigned long)packet->trans_id;
/* Notify the layer above us */
- if (nvsc_packet) {
+ if (skb) {
+ nvsc_packet = (struct hv_netvsc_packet *) skb->cb;
send_index = nvsc_packet->send_buf_index;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
q_idx = nvsc_packet->q_idx;
channel = incoming_channel;
- netvsc_xmit_completion(nvsc_packet);
+ dev_kfree_skb_any(skb);
}
num_outstanding_sends =
static inline int netvsc_send_pkt(
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
- struct hv_page_buffer **pb)
+ struct hv_page_buffer **pb,
+ struct sk_buff *skb)
{
struct nvsp_message nvmsg;
u16 q_idx = packet->q_idx;
nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
packet->total_data_buflen;
- if (packet->completion_func)
- req_id = (ulong)packet;
- else
- req_id = 0;
+ req_id = (ulong)skb;
if (out_channel->rescind)
return -ENODEV;
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
- struct hv_page_buffer **pb)
+ struct hv_page_buffer **pb,
+ struct sk_buff *skb)
{
struct netvsc_device *net_device;
int ret = 0, m_ret = 0;
}
if (msdp->pkt)
- netvsc_xmit_completion(msdp->pkt);
+ dev_kfree_skb_any(skb);
if (packet->xmit_more && !packet->cp_partial) {
msdp->pkt = packet;
}
if (msd_send) {
- m_ret = netvsc_send_pkt(msd_send, net_device, pb);
+ m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb);
if (m_ret != 0) {
netvsc_free_send_slot(net_device,
msd_send->send_buf_index);
- netvsc_xmit_completion(msd_send);
+ dev_kfree_skb_any(skb);
}
}
if (cur_send)
- ret = netvsc_send_pkt(cur_send, net_device, pb);
+ ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, section_index);
return q_idx;
}
-void netvsc_xmit_completion(void *context)
-{
- struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
- struct sk_buff *skb = (struct sk_buff *)
- (unsigned long)packet->send_completion_tid;
-
- if (skb)
- dev_kfree_skb_any(skb);
-}
-
static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
struct hv_page_buffer *pb)
{
/* Set the completion routine */
packet->completion_func = 1;
- packet->send_completion_tid = (unsigned long)skb;
isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
skb, packet, &pb);
- ret = netvsc_send(net_device_ctx->device_ctx, packet, rndis_msg, &pb);
+ ret = netvsc_send(net_device_ctx->device_ctx, packet,
+ rndis_msg, &pb, skb);
drop:
if (ret == 0) {