@ -744,28 +744,43 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
@@ -744,28 +744,43 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
for ( i = 0 ; i < skb_shinfo ( skb ) - > nr_frags ; i + + ) {
const struct skb_frag_struct * frag = & skb_shinfo ( skb ) - > frags [ i ] ;
u32 buf_size ;
tbi = tq - > buf_info + tq - > tx_ring . next2fill ;
tbi - > map_type = VMXNET3_MAP_PAGE ;
tbi - > dma_addr = skb_frag_dma_map ( & adapter - > pdev - > dev , frag ,
0 , skb_frag_size ( frag ) ,
DMA_TO_DEVICE ) ;
buf_offset = 0 ;
len = skb_frag_size ( frag ) ;
while ( len ) {
tbi = tq - > buf_info + tq - > tx_ring . next2fill ;
if ( len < VMXNET3_MAX_TX_BUF_SIZE ) {
buf_size = len ;
dw2 | = len ;
} else {
buf_size = VMXNET3_MAX_TX_BUF_SIZE ;
/* spec says that for TxDesc.len, 0 == 2^14 */
}
tbi - > map_type = VMXNET3_MAP_PAGE ;
tbi - > dma_addr = skb_frag_dma_map ( & adapter - > pdev - > dev , frag ,
buf_offset , buf_size ,
DMA_TO_DEVICE ) ;
tbi - > len = skb_frag_size ( frag ) ;
tbi - > len = buf_size ;
gdesc = tq - > tx_ring . base + tq - > tx_ring . next2fill ;
BUG_ON ( gdesc - > txd . gen = = tq - > tx_ring . gen ) ;
gdesc = tq - > tx_ring . base + tq - > tx_ring . next2fill ;
BUG_ON ( gdesc - > txd . gen = = tq - > tx_ring . gen ) ;
gdesc - > txd . addr = cpu_to_le64 ( tbi - > dma_addr ) ;
gdesc - > dword [ 2 ] = cpu_to_le32 ( dw2 | skb_frag_size ( frag ) ) ;
gdesc - > dword [ 3 ] = 0 ;
gdesc - > txd . addr = cpu_to_le64 ( tbi - > dma_addr ) ;
gdesc - > dword [ 2 ] = cpu_to_le32 ( dw2 ) ;
gdesc - > dword [ 3 ] = 0 ;
dev_dbg ( & adapter - > netdev - > dev ,
" txd[%u]: 0x%llu %u %u \n " ,
tq - > tx_ring . next2fill , le64_to_cpu ( gdesc - > txd . addr ) ,
le32_to_cpu ( gdesc - > dword [ 2 ] ) , gdesc - > dword [ 3 ] ) ;
vmxnet3_cmd_ring_adv_next2fill ( & tq - > tx_ring ) ;
dw2 = tq - > tx_ring . gen < < VMXNET3_TXD_GEN_SHIFT ;
dev_dbg ( & adapter - > netdev - > dev ,
" txd[%u]: 0x%llu %u %u \n " ,
tq - > tx_ring . next2fill , le64_to_cpu ( gdesc - > txd . addr ) ,
le32_to_cpu ( gdesc - > dword [ 2 ] ) , gdesc - > dword [ 3 ] ) ;
vmxnet3_cmd_ring_adv_next2fill ( & tq - > tx_ring ) ;
dw2 = tq - > tx_ring . gen < < VMXNET3_TXD_GEN_SHIFT ;
len - = buf_size ;
buf_offset + = buf_size ;
}
}
ctx - > eop_txd = gdesc ;
@ -886,6 +901,18 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
@@ -886,6 +901,18 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
}
}
static int txd_estimate ( const struct sk_buff * skb )
{
int count = VMXNET3_TXD_NEEDED ( skb_headlen ( skb ) ) + 1 ;
int i ;
for ( i = 0 ; i < skb_shinfo ( skb ) - > nr_frags ; i + + ) {
const struct skb_frag_struct * frag = & skb_shinfo ( skb ) - > frags [ i ] ;
count + = VMXNET3_TXD_NEEDED ( skb_frag_size ( frag ) ) ;
}
return count ;
}
/*
* Transmits a pkt thru a given tq
@ -914,9 +941,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
@@ -914,9 +941,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
union Vmxnet3_GenericDesc tempTxDesc ;
# endif
/* conservatively estimate # of descriptors to use */
count = VMXNET3_TXD_NEEDED ( skb_headlen ( skb ) ) +
skb_shinfo ( skb ) - > nr_frags + 1 ;
count = txd_estimate ( skb ) ;
ctx . ipv4 = ( vlan_get_protocol ( skb ) = = cpu_to_be16 ( ETH_P_IP ) ) ;