mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: bnx2: Fix bug in bnx2_free_rx_mem(). irda: Add irda_skb_cb qdisc related padding jme: Fixed a typo net: kernel BUG at drivers/net/phy/mdio_bus.c:165! drivers/net: starfire: Fix napi ->poll() weight handling tlan: Fix pci memory unmapping enc28j60: use netif_rx_ni() to deliver RX packets tlan: Fix small (< 64 bytes) datagram transmissions netfilter: ctnetlink: fix missing CTA_NAT_SEQ_UNSPEC
This commit is contained in:
commit
b3806c3b94
8 changed files with 32 additions and 16 deletions
|
@ -543,9 +543,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
|
|||
for (j = 0; j < bp->rx_max_pg_ring; j++) {
|
||||
if (rxr->rx_pg_desc_ring[j])
|
||||
pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
|
||||
rxr->rx_pg_desc_ring[i],
|
||||
rxr->rx_pg_desc_mapping[i]);
|
||||
rxr->rx_pg_desc_ring[i] = NULL;
|
||||
rxr->rx_pg_desc_ring[j],
|
||||
rxr->rx_pg_desc_mapping[j]);
|
||||
rxr->rx_pg_desc_ring[j] = NULL;
|
||||
}
|
||||
if (rxr->rx_pg_ring)
|
||||
vfree(rxr->rx_pg_ring);
|
||||
|
|
|
@ -959,7 +959,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
|
|||
ndev->stats.rx_packets++;
|
||||
ndev->stats.rx_bytes += len;
|
||||
ndev->last_rx = jiffies;
|
||||
netif_rx(skb);
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
*/
|
||||
|
||||
#ifndef __JME_H_INCLUDED__
|
||||
#define __JME_H_INCLUDEE__
|
||||
#define __JME_H_INCLUDED__
|
||||
|
||||
#define DRV_NAME "jme"
|
||||
#define DRV_VERSION "1.0.3"
|
||||
|
|
|
@ -105,8 +105,6 @@ int mdiobus_register(struct mii_bus *bus)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
bus->state = MDIOBUS_REGISTERED;
|
||||
|
||||
mutex_init(&bus->mdio_lock);
|
||||
|
||||
if (bus->reset)
|
||||
|
@ -123,6 +121,9 @@ int mdiobus_register(struct mii_bus *bus)
|
|||
}
|
||||
}
|
||||
|
||||
if (!err)
|
||||
bus->state = MDIOBUS_REGISTERED;
|
||||
|
||||
pr_info("%s: probed\n", bus->name);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -1509,6 +1509,11 @@ static int __netdev_rx(struct net_device *dev, int *quota)
|
|||
desc->status = 0;
|
||||
np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
|
||||
}
|
||||
|
||||
if (*quota == 0) { /* out of rx quota */
|
||||
retcode = 1;
|
||||
goto out;
|
||||
}
|
||||
writew(np->rx_done, np->base + CompletionQConsumerIdx);
|
||||
|
||||
out:
|
||||
|
|
|
@ -1098,6 +1098,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
|
|||
dma_addr_t tail_list_phys;
|
||||
u8 *tail_buffer;
|
||||
unsigned long flags;
|
||||
unsigned int txlen;
|
||||
|
||||
if ( ! priv->phyOnline ) {
|
||||
TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
|
||||
|
@ -1108,6 +1109,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
|
|||
|
||||
if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
|
||||
return 0;
|
||||
txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
|
||||
|
||||
tail_list = priv->txList + priv->txTail;
|
||||
tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
|
||||
|
@ -1125,16 +1127,16 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
|
|||
|
||||
if ( bbuf ) {
|
||||
tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
|
||||
skb_copy_from_linear_data(skb, tail_buffer, skb->len);
|
||||
skb_copy_from_linear_data(skb, tail_buffer, txlen);
|
||||
} else {
|
||||
tail_list->buffer[0].address = pci_map_single(priv->pciDev,
|
||||
skb->data, skb->len,
|
||||
skb->data, txlen,
|
||||
PCI_DMA_TODEVICE);
|
||||
TLan_StoreSKB(tail_list, skb);
|
||||
}
|
||||
|
||||
tail_list->frameSize = (u16) skb->len;
|
||||
tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
|
||||
tail_list->frameSize = (u16) txlen;
|
||||
tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
|
||||
tail_list->buffer[1].count = 0;
|
||||
tail_list->buffer[1].address = 0;
|
||||
|
||||
|
@ -1431,7 +1433,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
|
|||
if ( ! bbuf ) {
|
||||
struct sk_buff *skb = TLan_GetSKB(head_list);
|
||||
pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
|
||||
skb->len, PCI_DMA_TODEVICE);
|
||||
max(skb->len,
|
||||
(unsigned int)TLAN_MIN_FRAME_SIZE),
|
||||
PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
head_list->buffer[8].address = 0;
|
||||
head_list->buffer[9].address = 0;
|
||||
|
@ -2055,9 +2059,12 @@ static void TLan_FreeLists( struct net_device *dev )
|
|||
list = priv->txList + i;
|
||||
skb = TLan_GetSKB(list);
|
||||
if ( skb ) {
|
||||
pci_unmap_single(priv->pciDev,
|
||||
list->buffer[0].address, skb->len,
|
||||
PCI_DMA_TODEVICE);
|
||||
pci_unmap_single(
|
||||
priv->pciDev,
|
||||
list->buffer[0].address,
|
||||
max(skb->len,
|
||||
(unsigned int)TLAN_MIN_FRAME_SIZE),
|
||||
PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb_any( skb );
|
||||
list->buffer[8].address = 0;
|
||||
list->buffer[9].address = 0;
|
||||
|
|
|
@ -141,6 +141,7 @@ enum ctattr_protonat {
|
|||
#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1)
|
||||
|
||||
enum ctattr_natseq {
|
||||
CTA_NAT_SEQ_UNSPEC,
|
||||
CTA_NAT_SEQ_CORRECTION_POS,
|
||||
CTA_NAT_SEQ_OFFSET_BEFORE,
|
||||
CTA_NAT_SEQ_OFFSET_AFTER,
|
||||
|
|
|
@ -135,9 +135,11 @@ struct dongle_reg {
|
|||
|
||||
/*
|
||||
* Per-packet information we need to hide inside sk_buff
|
||||
* (must not exceed 48 bytes, check with struct sk_buff)
|
||||
* (must not exceed 48 bytes, check with struct sk_buff)
|
||||
* The default_qdisc_pad field is a temporary hack.
|
||||
*/
|
||||
struct irda_skb_cb {
|
||||
unsigned int default_qdisc_pad;
|
||||
magic_t magic; /* Be sure that we can trust the information */
|
||||
__u32 next_speed; /* The Speed to be set *after* this frame */
|
||||
__u16 mtt; /* Minimum turn around time */
|
||||
|
|
Loading…
Reference in a new issue