mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
tlan: manage rx allocation failure better
Rx allocation failure at runtime is non-fatal. For normal Rx frame, it just reuses the buffer, and during setup it just continues with a smaller receive buffer pool. Compile tested only. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
parent
93e16847c9
commit
9ded65a1d7
1 changed files with 29 additions and 34 deletions
|
@ -1539,8 +1539,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
|
||||||
TLanList *head_list;
|
TLanList *head_list;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
TLanList *tail_list;
|
TLanList *tail_list;
|
||||||
void *t;
|
|
||||||
u32 frameSize;
|
|
||||||
u16 tmpCStat;
|
u16 tmpCStat;
|
||||||
dma_addr_t head_list_phys;
|
dma_addr_t head_list_phys;
|
||||||
|
|
||||||
|
@ -1549,40 +1547,34 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
|
||||||
head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
|
head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
|
||||||
|
|
||||||
while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
|
while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
|
||||||
frameSize = head_list->frameSize;
|
dma_addr_t frameDma = head_list->buffer[0].address;
|
||||||
|
u32 frameSize = head_list->frameSize;
|
||||||
ack++;
|
ack++;
|
||||||
if (tmpCStat & TLAN_CSTAT_EOC)
|
if (tmpCStat & TLAN_CSTAT_EOC)
|
||||||
eoc = 1;
|
eoc = 1;
|
||||||
|
|
||||||
if (bbuf) {
|
if (bbuf) {
|
||||||
skb = dev_alloc_skb(frameSize + 7);
|
skb = netdev_alloc_skb(dev, frameSize + 7);
|
||||||
if (skb == NULL)
|
if ( skb ) {
|
||||||
printk(KERN_INFO "TLAN: Couldn't allocate memory for received data.\n");
|
|
||||||
else {
|
|
||||||
head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
|
head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
|
||||||
skb_reserve(skb, 2);
|
skb_reserve(skb, 2);
|
||||||
t = (void *) skb_put(skb, frameSize);
|
pci_dma_sync_single_for_cpu(priv->pciDev,
|
||||||
|
frameDma, frameSize,
|
||||||
|
PCI_DMA_FROMDEVICE);
|
||||||
|
skb_copy_from_linear_data(skb, head_buffer, frameSize);
|
||||||
|
skb_put(skb, frameSize);
|
||||||
|
dev->stats.rx_bytes += frameSize;
|
||||||
|
|
||||||
dev->stats.rx_bytes += head_list->frameSize;
|
|
||||||
|
|
||||||
memcpy( t, head_buffer, frameSize );
|
|
||||||
skb->protocol = eth_type_trans( skb, dev );
|
skb->protocol = eth_type_trans( skb, dev );
|
||||||
netif_rx( skb );
|
netif_rx( skb );
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
struct sk_buff *new_skb;
|
struct sk_buff *new_skb;
|
||||||
|
|
||||||
/*
|
new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
|
||||||
* I changed the algorithm here. What we now do
|
if ( new_skb ) {
|
||||||
* is allocate the new frame. If this fails we
|
|
||||||
* simply recycle the frame.
|
|
||||||
*/
|
|
||||||
|
|
||||||
new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
|
|
||||||
|
|
||||||
if ( new_skb != NULL ) {
|
|
||||||
skb = TLan_GetSKB(head_list);
|
skb = TLan_GetSKB(head_list);
|
||||||
pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
|
pci_unmap_single(priv->pciDev, frameDma, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
skb_put( skb, frameSize );
|
skb_put( skb, frameSize );
|
||||||
|
|
||||||
dev->stats.rx_bytes += frameSize;
|
dev->stats.rx_bytes += frameSize;
|
||||||
|
@ -1590,12 +1582,12 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
|
||||||
skb->protocol = eth_type_trans( skb, dev );
|
skb->protocol = eth_type_trans( skb, dev );
|
||||||
netif_rx( skb );
|
netif_rx( skb );
|
||||||
|
|
||||||
skb_reserve( new_skb, 2 );
|
skb_reserve( new_skb, NET_IP_ALIGN );
|
||||||
head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
|
head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
TLan_StoreSKB(head_list, new_skb);
|
TLan_StoreSKB(head_list, new_skb);
|
||||||
} else
|
}
|
||||||
printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
head_list->forward = 0;
|
head_list->forward = 0;
|
||||||
|
@ -1994,25 +1986,28 @@ static void TLan_ResetLists( struct net_device *dev )
|
||||||
if ( bbuf ) {
|
if ( bbuf ) {
|
||||||
list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE );
|
list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE );
|
||||||
} else {
|
} else {
|
||||||
skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
|
skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
|
||||||
if ( skb == NULL ) {
|
if ( !skb ) {
|
||||||
printk( "TLAN: Couldn't allocate memory for received data.\n" );
|
printk( "TLAN: Couldn't allocate memory for received data.\n" );
|
||||||
/* If this ever happened it would be a problem */
|
break;
|
||||||
} else {
|
|
||||||
skb->dev = dev;
|
|
||||||
skb_reserve( skb, 2 );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skb_reserve( skb, NET_IP_ALIGN );
|
||||||
list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
|
list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
|
||||||
TLan_StoreSKB(list, skb);
|
TLan_StoreSKB(list, skb);
|
||||||
}
|
}
|
||||||
list->buffer[1].count = 0;
|
list->buffer[1].count = 0;
|
||||||
list->buffer[1].address = 0;
|
list->buffer[1].address = 0;
|
||||||
if ( i < TLAN_NUM_RX_LISTS - 1 )
|
list->forward = list_phys + sizeof(TLanList);
|
||||||
list->forward = list_phys + sizeof(TLanList);
|
|
||||||
else
|
|
||||||
list->forward = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* in case ran out of memory early, clear bits */
|
||||||
|
while (i < TLAN_NUM_RX_LISTS) {
|
||||||
|
TLan_StoreSKB(priv->rxList + i, NULL);
|
||||||
|
++i;
|
||||||
|
}
|
||||||
|
list->forward = 0;
|
||||||
|
|
||||||
} /* TLan_ResetLists */
|
} /* TLan_ResetLists */
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue