mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
net: Use netdev_alloc_skb_ip_align()
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
bff1c09640
commit
89d71a66c4
38 changed files with 90 additions and 232 deletions
|
@ -2560,7 +2560,7 @@ boomerang_rx(struct net_device *dev)
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
entry = vp->dirty_rx % RX_RING_SIZE;
|
entry = vp->dirty_rx % RX_RING_SIZE;
|
||||||
if (vp->rx_skbuff[entry] == NULL) {
|
if (vp->rx_skbuff[entry] == NULL) {
|
||||||
skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
static unsigned long last_jif;
|
static unsigned long last_jif;
|
||||||
if (time_after(jiffies, last_jif + 10 * HZ)) {
|
if (time_after(jiffies, last_jif + 10 * HZ)) {
|
||||||
|
@ -2572,7 +2572,6 @@ boomerang_rx(struct net_device *dev)
|
||||||
break; /* Bad news! */
|
break; /* Bad news! */
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
|
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
|
||||||
vp->rx_skbuff[entry] = skb;
|
vp->rx_skbuff[entry] = skb;
|
||||||
}
|
}
|
||||||
|
|
|
@ -549,14 +549,12 @@ rx_status_loop:
|
||||||
pr_debug("%s: rx slot %d status 0x%x len %d\n",
|
pr_debug("%s: rx slot %d status 0x%x len %d\n",
|
||||||
dev->name, rx_tail, status, len);
|
dev->name, rx_tail, status, len);
|
||||||
|
|
||||||
new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN);
|
new_skb = netdev_alloc_skb_ip_align(dev, buflen);
|
||||||
if (!new_skb) {
|
if (!new_skb) {
|
||||||
dev->stats.rx_dropped++;
|
dev->stats.rx_dropped++;
|
||||||
goto rx_next;
|
goto rx_next;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_reserve(new_skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
dma_unmap_single(&cp->pdev->dev, mapping,
|
dma_unmap_single(&cp->pdev->dev, mapping,
|
||||||
buflen, PCI_DMA_FROMDEVICE);
|
buflen, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
|
@ -1057,12 +1055,10 @@ static int cp_refill_rx(struct cp_private *cp)
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
dma_addr_t mapping;
|
dma_addr_t mapping;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
mapping = dma_map_single(&cp->pdev->dev, skb->data,
|
mapping = dma_map_single(&cp->pdev->dev, skb->data,
|
||||||
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
||||||
cp->rx_skb[i] = skb;
|
cp->rx_skb[i] = skb;
|
||||||
|
|
|
@ -2004,9 +2004,8 @@ no_early_rx:
|
||||||
/* Malloc up new buffer, compatible with net-2e. */
|
/* Malloc up new buffer, compatible with net-2e. */
|
||||||
/* Omit the four octet CRC from the length. */
|
/* Omit the four octet CRC from the length. */
|
||||||
|
|
||||||
skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(dev, pkt_size);
|
||||||
if (likely(skb)) {
|
if (likely(skb)) {
|
||||||
skb_reserve (skb, NET_IP_ALIGN); /* 16 byte align the IP fields. */
|
|
||||||
#if RX_BUF_IDX == 3
|
#if RX_BUF_IDX == 3
|
||||||
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
|
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -1433,14 +1433,12 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
|
||||||
|
|
||||||
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
|
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
|
||||||
RRS_PKT_SIZE_MASK) - 4; /* CRC */
|
RRS_PKT_SIZE_MASK) - 4; /* CRC */
|
||||||
skb = netdev_alloc_skb(netdev,
|
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
|
||||||
packet_size + NET_IP_ALIGN);
|
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
dev_warn(&pdev->dev, "%s: Memory squeeze,"
|
dev_warn(&pdev->dev, "%s: Memory squeeze,"
|
||||||
"deferring packet.\n", netdev->name);
|
"deferring packet.\n", netdev->name);
|
||||||
goto skip_pkt;
|
goto skip_pkt;
|
||||||
}
|
}
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
skb->dev = netdev;
|
skb->dev = netdev;
|
||||||
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
|
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
|
||||||
skb_put(skb, packet_size);
|
skb_put(skb, packet_size);
|
||||||
|
|
|
@ -1864,21 +1864,14 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
|
||||||
|
|
||||||
rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
|
rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
|
||||||
|
|
||||||
skb = netdev_alloc_skb(adapter->netdev,
|
skb = netdev_alloc_skb_ip_align(adapter->netdev,
|
||||||
adapter->rx_buffer_len + NET_IP_ALIGN);
|
adapter->rx_buffer_len);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
/* Better luck next round */
|
/* Better luck next round */
|
||||||
adapter->netdev->stats.rx_dropped++;
|
adapter->netdev->stats.rx_dropped++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->alloced = 1;
|
buffer_info->alloced = 1;
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
buffer_info->length = (u16) adapter->rx_buffer_len;
|
buffer_info->length = (u16) adapter->rx_buffer_len;
|
||||||
|
|
|
@ -409,7 +409,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
|
||||||
if (rxd->status.ok && rxd->status.pkt_size >= 60) {
|
if (rxd->status.ok && rxd->status.pkt_size >= 60) {
|
||||||
int rx_size = (int)(rxd->status.pkt_size - 4);
|
int rx_size = (int)(rxd->status.pkt_size - 4);
|
||||||
/* alloc new buffer */
|
/* alloc new buffer */
|
||||||
skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(netdev, rx_size);
|
||||||
if (NULL == skb) {
|
if (NULL == skb) {
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"%s: Mem squeeze, deferring packet.\n",
|
"%s: Mem squeeze, deferring packet.\n",
|
||||||
|
@ -421,7 +421,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
|
||||||
netdev->stats.rx_dropped++;
|
netdev->stats.rx_dropped++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
skb->dev = netdev;
|
skb->dev = netdev;
|
||||||
memcpy(skb->data, rxd->packet, rx_size);
|
memcpy(skb->data, rxd->packet, rx_size);
|
||||||
skb_put(skb, rx_size);
|
skb_put(skb, rx_size);
|
||||||
|
|
|
@ -320,16 +320,13 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
|
||||||
if (len < copybreak) {
|
if (len < copybreak) {
|
||||||
struct sk_buff *nskb;
|
struct sk_buff *nskb;
|
||||||
|
|
||||||
nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
|
nskb = netdev_alloc_skb_ip_align(dev, len);
|
||||||
if (!nskb) {
|
if (!nskb) {
|
||||||
/* forget packet, just rearm desc */
|
/* forget packet, just rearm desc */
|
||||||
priv->stats.rx_dropped++;
|
priv->stats.rx_dropped++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* since we're copying the data, we can align
|
|
||||||
* them properly */
|
|
||||||
skb_reserve(nskb, NET_IP_ALIGN);
|
|
||||||
dma_sync_single_for_cpu(kdev, desc->address,
|
dma_sync_single_for_cpu(kdev, desc->address,
|
||||||
len, DMA_FROM_DEVICE);
|
len, DMA_FROM_DEVICE);
|
||||||
memcpy(nskb->data, skb->data, len);
|
memcpy(nskb->data, skb->data, len);
|
||||||
|
|
|
@ -756,7 +756,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
|
||||||
if ((adapter->cap == 0x400) && !vtm)
|
if ((adapter->cap == 0x400) && !vtm)
|
||||||
vlanf = 0;
|
vlanf = 0;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
|
dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
|
||||||
|
@ -764,8 +764,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
skb_fill_rx_data(adapter, skb, rxcp);
|
skb_fill_rx_data(adapter, skb, rxcp);
|
||||||
|
|
||||||
if (do_pkt_csum(rxcp, adapter->rx_csum))
|
if (do_pkt_csum(rxcp, adapter->rx_csum))
|
||||||
|
|
|
@ -380,9 +380,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
|
skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
|
||||||
if (likely(skb)) {
|
if (likely(skb)) {
|
||||||
skb_reserve(skb, 2);
|
|
||||||
skb_put(desc->skb, desc->datalen);
|
skb_put(desc->skb, desc->datalen);
|
||||||
desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
|
desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
|
||||||
desc->skb->ip_summed = CHECKSUM_NONE;
|
desc->skb->ip_summed = CHECKSUM_NONE;
|
||||||
|
@ -991,12 +990,11 @@ static int cpmac_open(struct net_device *dev)
|
||||||
|
|
||||||
priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
|
priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
|
||||||
for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
|
for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
|
||||||
skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
|
skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
res = -ENOMEM;
|
res = -ENOMEM;
|
||||||
goto fail_desc;
|
goto fail_desc;
|
||||||
}
|
}
|
||||||
skb_reserve(skb, 2);
|
|
||||||
desc->skb = skb;
|
desc->skb = skb;
|
||||||
desc->data_mapping = dma_map_single(&dev->dev, skb->data,
|
desc->data_mapping = dma_map_single(&dev->dev, skb->data,
|
||||||
CPMAC_SKB_SIZE,
|
CPMAC_SKB_SIZE,
|
||||||
|
|
|
@ -505,7 +505,8 @@ rio_timer (unsigned long data)
|
||||||
entry = np->old_rx % RX_RING_SIZE;
|
entry = np->old_rx % RX_RING_SIZE;
|
||||||
/* Dropped packets don't need to re-allocate */
|
/* Dropped packets don't need to re-allocate */
|
||||||
if (np->rx_skbuff[entry] == NULL) {
|
if (np->rx_skbuff[entry] == NULL) {
|
||||||
skb = netdev_alloc_skb (dev, np->rx_buf_sz);
|
skb = netdev_alloc_skb_ip_align(dev,
|
||||||
|
np->rx_buf_sz);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
np->rx_ring[entry].fraginfo = 0;
|
np->rx_ring[entry].fraginfo = 0;
|
||||||
printk (KERN_INFO
|
printk (KERN_INFO
|
||||||
|
@ -514,8 +515,6 @@ rio_timer (unsigned long data)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
np->rx_skbuff[entry] = skb;
|
np->rx_skbuff[entry] = skb;
|
||||||
/* 16 byte align the IP header */
|
|
||||||
skb_reserve (skb, 2);
|
|
||||||
np->rx_ring[entry].fraginfo =
|
np->rx_ring[entry].fraginfo =
|
||||||
cpu_to_le64 (pci_map_single
|
cpu_to_le64 (pci_map_single
|
||||||
(np->pdev, skb->data, np->rx_buf_sz,
|
(np->pdev, skb->data, np->rx_buf_sz,
|
||||||
|
@ -576,7 +575,9 @@ alloc_list (struct net_device *dev)
|
||||||
/* Allocate the rx buffers */
|
/* Allocate the rx buffers */
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
/* Allocated fixed size of skbuff */
|
/* Allocated fixed size of skbuff */
|
||||||
struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz);
|
struct sk_buff *skb;
|
||||||
|
|
||||||
|
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
|
||||||
np->rx_skbuff[i] = skb;
|
np->rx_skbuff[i] = skb;
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
printk (KERN_ERR
|
printk (KERN_ERR
|
||||||
|
@ -584,7 +585,6 @@ alloc_list (struct net_device *dev)
|
||||||
dev->name);
|
dev->name);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
skb_reserve (skb, 2); /* 16 byte align the IP header. */
|
|
||||||
/* Rubicon now supports 40 bits of addressing space. */
|
/* Rubicon now supports 40 bits of addressing space. */
|
||||||
np->rx_ring[i].fraginfo =
|
np->rx_ring[i].fraginfo =
|
||||||
cpu_to_le64 ( pci_map_single (
|
cpu_to_le64 ( pci_map_single (
|
||||||
|
@ -871,13 +871,11 @@ receive_packet (struct net_device *dev)
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
skb_put (skb = np->rx_skbuff[entry], pkt_len);
|
skb_put (skb = np->rx_skbuff[entry], pkt_len);
|
||||||
np->rx_skbuff[entry] = NULL;
|
np->rx_skbuff[entry] = NULL;
|
||||||
} else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) {
|
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
|
||||||
pci_dma_sync_single_for_cpu(np->pdev,
|
pci_dma_sync_single_for_cpu(np->pdev,
|
||||||
desc_to_dma(desc),
|
desc_to_dma(desc),
|
||||||
np->rx_buf_sz,
|
np->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
/* 16 byte align the IP header */
|
|
||||||
skb_reserve (skb, 2);
|
|
||||||
skb_copy_to_linear_data (skb,
|
skb_copy_to_linear_data (skb,
|
||||||
np->rx_skbuff[entry]->data,
|
np->rx_skbuff[entry]->data,
|
||||||
pkt_len);
|
pkt_len);
|
||||||
|
@ -907,7 +905,7 @@ receive_packet (struct net_device *dev)
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
/* Dropped packets don't need to re-allocate */
|
/* Dropped packets don't need to re-allocate */
|
||||||
if (np->rx_skbuff[entry] == NULL) {
|
if (np->rx_skbuff[entry] == NULL) {
|
||||||
skb = netdev_alloc_skb(dev, np->rx_buf_sz);
|
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
np->rx_ring[entry].fraginfo = 0;
|
np->rx_ring[entry].fraginfo = 0;
|
||||||
printk (KERN_INFO
|
printk (KERN_INFO
|
||||||
|
@ -917,8 +915,6 @@ receive_packet (struct net_device *dev)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
np->rx_skbuff[entry] = skb;
|
np->rx_skbuff[entry] = skb;
|
||||||
/* 16 byte align the IP header */
|
|
||||||
skb_reserve (skb, 2);
|
|
||||||
np->rx_ring[entry].fraginfo =
|
np->rx_ring[entry].fraginfo =
|
||||||
cpu_to_le64 (pci_map_single
|
cpu_to_le64 (pci_map_single
|
||||||
(np->pdev, skb->data, np->rx_buf_sz,
|
(np->pdev, skb->data, np->rx_buf_sz,
|
||||||
|
|
|
@ -1839,11 +1839,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
|
||||||
#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
|
#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
|
||||||
static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
|
static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
|
||||||
{
|
{
|
||||||
if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
|
if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Align, init, and map the RFD. */
|
/* Init, and map the RFD. */
|
||||||
skb_reserve(rx->skb, NET_IP_ALIGN);
|
|
||||||
skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
|
skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
|
||||||
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
|
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
|
||||||
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
|
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
|
||||||
|
|
|
@ -3866,9 +3866,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
||||||
* of reassembly being done in the stack */
|
* of reassembly being done in the stack */
|
||||||
if (length < copybreak) {
|
if (length < copybreak) {
|
||||||
struct sk_buff *new_skb =
|
struct sk_buff *new_skb =
|
||||||
netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
|
netdev_alloc_skb_ip_align(netdev, length);
|
||||||
if (new_skb) {
|
if (new_skb) {
|
||||||
skb_reserve(new_skb, NET_IP_ALIGN);
|
|
||||||
skb_copy_to_linear_data_offset(new_skb,
|
skb_copy_to_linear_data_offset(new_skb,
|
||||||
-NET_IP_ALIGN,
|
-NET_IP_ALIGN,
|
||||||
(skb->data -
|
(skb->data -
|
||||||
|
@ -3937,9 +3936,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
||||||
struct e1000_buffer *buffer_info;
|
struct e1000_buffer *buffer_info;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int bufsz = 256 -
|
unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
|
||||||
16 /*for skb_reserve */ -
|
|
||||||
NET_IP_ALIGN;
|
|
||||||
|
|
||||||
i = rx_ring->next_to_use;
|
i = rx_ring->next_to_use;
|
||||||
buffer_info = &rx_ring->buffer_info[i];
|
buffer_info = &rx_ring->buffer_info[i];
|
||||||
|
@ -3951,7 +3948,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
||||||
goto check_page;
|
goto check_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb(netdev, bufsz);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
/* Better luck next round */
|
/* Better luck next round */
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
|
@ -3964,7 +3961,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
||||||
DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
|
DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
|
||||||
"at %p\n", bufsz, skb->data);
|
"at %p\n", bufsz, skb->data);
|
||||||
/* Try again, without freeing the previous */
|
/* Try again, without freeing the previous */
|
||||||
skb = netdev_alloc_skb(netdev, bufsz);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
/* Failed allocation, critical failure */
|
/* Failed allocation, critical failure */
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
dev_kfree_skb(oldskb);
|
dev_kfree_skb(oldskb);
|
||||||
|
@ -3982,12 +3979,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
||||||
/* Use new allocation */
|
/* Use new allocation */
|
||||||
dev_kfree_skb(oldskb);
|
dev_kfree_skb(oldskb);
|
||||||
}
|
}
|
||||||
/* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
buffer_info->length = adapter->rx_buffer_len;
|
buffer_info->length = adapter->rx_buffer_len;
|
||||||
check_page:
|
check_page:
|
||||||
|
@ -4044,7 +4035,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
||||||
struct e1000_buffer *buffer_info;
|
struct e1000_buffer *buffer_info;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
|
unsigned int bufsz = adapter->rx_buffer_len;
|
||||||
|
|
||||||
i = rx_ring->next_to_use;
|
i = rx_ring->next_to_use;
|
||||||
buffer_info = &rx_ring->buffer_info[i];
|
buffer_info = &rx_ring->buffer_info[i];
|
||||||
|
@ -4056,7 +4047,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
||||||
goto map_skb;
|
goto map_skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb(netdev, bufsz);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
/* Better luck next round */
|
/* Better luck next round */
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
|
@ -4069,7 +4060,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
||||||
DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
|
DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
|
||||||
"at %p\n", bufsz, skb->data);
|
"at %p\n", bufsz, skb->data);
|
||||||
/* Try again, without freeing the previous */
|
/* Try again, without freeing the previous */
|
||||||
skb = netdev_alloc_skb(netdev, bufsz);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
/* Failed allocation, critical failure */
|
/* Failed allocation, critical failure */
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
dev_kfree_skb(oldskb);
|
dev_kfree_skb(oldskb);
|
||||||
|
@ -4088,12 +4079,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
||||||
/* Use new allocation */
|
/* Use new allocation */
|
||||||
dev_kfree_skb(oldskb);
|
dev_kfree_skb(oldskb);
|
||||||
}
|
}
|
||||||
/* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
buffer_info->length = adapter->rx_buffer_len;
|
buffer_info->length = adapter->rx_buffer_len;
|
||||||
map_skb:
|
map_skb:
|
||||||
|
|
|
@ -167,7 +167,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
||||||
struct e1000_buffer *buffer_info;
|
struct e1000_buffer *buffer_info;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
|
unsigned int bufsz = adapter->rx_buffer_len;
|
||||||
|
|
||||||
i = rx_ring->next_to_use;
|
i = rx_ring->next_to_use;
|
||||||
buffer_info = &rx_ring->buffer_info[i];
|
buffer_info = &rx_ring->buffer_info[i];
|
||||||
|
@ -179,20 +179,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
|
||||||
goto map_skb;
|
goto map_skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb(netdev, bufsz);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
/* Better luck next round */
|
/* Better luck next round */
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
map_skb:
|
map_skb:
|
||||||
buffer_info->dma = pci_map_single(pdev, skb->data,
|
buffer_info->dma = pci_map_single(pdev, skb->data,
|
||||||
|
@ -284,21 +277,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
|
||||||
cpu_to_le64(ps_page->dma);
|
cpu_to_le64(ps_page->dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb(netdev,
|
skb = netdev_alloc_skb_ip_align(netdev,
|
||||||
adapter->rx_ps_bsize0 + NET_IP_ALIGN);
|
adapter->rx_ps_bsize0);
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
buffer_info->dma = pci_map_single(pdev, skb->data,
|
buffer_info->dma = pci_map_single(pdev, skb->data,
|
||||||
adapter->rx_ps_bsize0,
|
adapter->rx_ps_bsize0,
|
||||||
|
@ -359,9 +345,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
||||||
struct e1000_buffer *buffer_info;
|
struct e1000_buffer *buffer_info;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int bufsz = 256 -
|
unsigned int bufsz = 256 - 16 /* for skb_reserve */;
|
||||||
16 /* for skb_reserve */ -
|
|
||||||
NET_IP_ALIGN;
|
|
||||||
|
|
||||||
i = rx_ring->next_to_use;
|
i = rx_ring->next_to_use;
|
||||||
buffer_info = &rx_ring->buffer_info[i];
|
buffer_info = &rx_ring->buffer_info[i];
|
||||||
|
@ -373,19 +357,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
||||||
goto check_page;
|
goto check_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb(netdev, bufsz);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
/* Better luck next round */
|
/* Better luck next round */
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
check_page:
|
check_page:
|
||||||
/* allocate a new page if necessary */
|
/* allocate a new page if necessary */
|
||||||
|
@ -513,9 +491,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
||||||
*/
|
*/
|
||||||
if (length < copybreak) {
|
if (length < copybreak) {
|
||||||
struct sk_buff *new_skb =
|
struct sk_buff *new_skb =
|
||||||
netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
|
netdev_alloc_skb_ip_align(netdev, length);
|
||||||
if (new_skb) {
|
if (new_skb) {
|
||||||
skb_reserve(new_skb, NET_IP_ALIGN);
|
|
||||||
skb_copy_to_linear_data_offset(new_skb,
|
skb_copy_to_linear_data_offset(new_skb,
|
||||||
-NET_IP_ALIGN,
|
-NET_IP_ALIGN,
|
||||||
(skb->data -
|
(skb->data -
|
||||||
|
|
|
@ -447,7 +447,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
|
||||||
max_index_mask = q_skba->len - 1;
|
max_index_mask = q_skba->len - 1;
|
||||||
for (i = 0; i < fill_wqes; i++) {
|
for (i = 0; i < fill_wqes; i++) {
|
||||||
u64 tmp_addr;
|
u64 tmp_addr;
|
||||||
struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
|
struct sk_buff *skb;
|
||||||
|
|
||||||
|
skb = netdev_alloc_skb_ip_align(dev, packet_size);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
q_skba->os_skbs = fill_wqes - i;
|
q_skba->os_skbs = fill_wqes - i;
|
||||||
if (q_skba->os_skbs == q_skba->len - 2) {
|
if (q_skba->os_skbs == q_skba->len - 2) {
|
||||||
|
@ -457,7 +459,6 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
skb_arr[index] = skb;
|
skb_arr[index] = skb;
|
||||||
tmp_addr = ehea_map_vaddr(skb->data);
|
tmp_addr = ehea_map_vaddr(skb->data);
|
||||||
|
@ -500,7 +501,7 @@ static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
|
||||||
{
|
{
|
||||||
return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
|
return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
|
||||||
nr_of_wqes, EHEA_RWQE2_TYPE,
|
nr_of_wqes, EHEA_RWQE2_TYPE,
|
||||||
EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
|
EHEA_RQ2_PKT_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -508,7 +509,7 @@ static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
|
||||||
{
|
{
|
||||||
return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
|
return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
|
||||||
nr_of_wqes, EHEA_RWQE3_TYPE,
|
nr_of_wqes, EHEA_RWQE3_TYPE,
|
||||||
EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
|
EHEA_MAX_PACKET_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
|
static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
|
||||||
|
|
|
@ -870,19 +870,6 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
|
||||||
dev_kfree_skb_any(buf->os_buf);
|
dev_kfree_skb_any(buf->os_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev,
|
|
||||||
unsigned int size)
|
|
||||||
{
|
|
||||||
struct sk_buff *skb;
|
|
||||||
|
|
||||||
skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN);
|
|
||||||
|
|
||||||
if (skb)
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
return skb;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int enic_rq_alloc_buf(struct vnic_rq *rq)
|
static int enic_rq_alloc_buf(struct vnic_rq *rq)
|
||||||
{
|
{
|
||||||
struct enic *enic = vnic_dev_priv(rq->vdev);
|
struct enic *enic = vnic_dev_priv(rq->vdev);
|
||||||
|
@ -892,7 +879,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
|
||||||
unsigned int os_buf_index = 0;
|
unsigned int os_buf_index = 0;
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
skb = enic_rq_alloc_skb(netdev, len);
|
skb = netdev_alloc_skb_ip_align(netdev, len);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -404,10 +404,10 @@ static int ethoc_rx(struct net_device *dev, int limit)
|
||||||
|
|
||||||
if (ethoc_update_rx_stats(priv, &bd) == 0) {
|
if (ethoc_update_rx_stats(priv, &bd) == 0) {
|
||||||
int size = bd.stat >> 16;
|
int size = bd.stat >> 16;
|
||||||
struct sk_buff *skb = netdev_alloc_skb(dev, size);
|
struct sk_buff *skb;
|
||||||
|
|
||||||
size -= 4; /* strip the CRC */
|
size -= 4; /* strip the CRC */
|
||||||
skb_reserve(skb, 2); /* align TCP/IP header */
|
skb = netdev_alloc_skb_ip_align(dev, size);
|
||||||
|
|
||||||
if (likely(skb)) {
|
if (likely(skb)) {
|
||||||
void *src = phys_to_virt(bd.addr);
|
void *src = phys_to_virt(bd.addr);
|
||||||
|
|
|
@ -406,10 +406,9 @@ that case.
|
||||||
/* A few values that may be tweaked. */
|
/* A few values that may be tweaked. */
|
||||||
/* Size of each temporary Rx buffer, calculated as:
|
/* Size of each temporary Rx buffer, calculated as:
|
||||||
* 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
|
* 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
|
||||||
* the card) + 8 bytes of status info + 8 bytes for the Rx Checksum +
|
* the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
|
||||||
* 2 more because we use skb_reserve.
|
|
||||||
*/
|
*/
|
||||||
#define PKT_BUF_SZ 1538
|
#define PKT_BUF_SZ 1536
|
||||||
|
|
||||||
/* For now, this is going to be set to the maximum size of an ethernet
|
/* For now, this is going to be set to the maximum size of an ethernet
|
||||||
* packet. Eventually, we may want to make it a variable that is
|
* packet. Eventually, we may want to make it a variable that is
|
||||||
|
@ -1151,12 +1150,13 @@ static void hamachi_tx_timeout(struct net_device *dev)
|
||||||
}
|
}
|
||||||
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz);
|
struct sk_buff *skb;
|
||||||
|
|
||||||
|
skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
|
||||||
hmp->rx_skbuff[i] = skb;
|
hmp->rx_skbuff[i] = skb;
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
skb_reserve(skb, 2); /* 16 byte align the IP header. */
|
|
||||||
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
|
||||||
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
|
||||||
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
|
hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
|
||||||
|
@ -1195,7 +1195,7 @@ static void hamachi_init_ring(struct net_device *dev)
|
||||||
* card. -KDU
|
* card. -KDU
|
||||||
*/
|
*/
|
||||||
hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
|
hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
|
||||||
(((dev->mtu+26+7) & ~7) + 2 + 16));
|
(((dev->mtu+26+7) & ~7) + 16));
|
||||||
|
|
||||||
/* Initialize all Rx descriptors. */
|
/* Initialize all Rx descriptors. */
|
||||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||||
|
|
|
@ -4934,18 +4934,12 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!buffer_info->skb) {
|
if (!buffer_info->skb) {
|
||||||
skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
goto no_buffers;
|
goto no_buffers;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
buffer_info->dma = pci_map_single(pdev, skb->data,
|
buffer_info->dma = pci_map_single(pdev, skb->data,
|
||||||
bufsz,
|
bufsz,
|
||||||
|
|
|
@ -170,18 +170,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!buffer_info->skb) {
|
if (!buffer_info->skb) {
|
||||||
skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
goto no_buffers;
|
goto no_buffers;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
buffer_info->dma = pci_map_single(pdev, skb->data,
|
buffer_info->dma = pci_map_single(pdev, skb->data,
|
||||||
bufsz,
|
bufsz,
|
||||||
|
|
|
@ -738,17 +738,12 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry)
|
||||||
|
|
||||||
IPG_DEBUG_MSG("_get_rxbuff\n");
|
IPG_DEBUG_MSG("_get_rxbuff\n");
|
||||||
|
|
||||||
skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
sp->rx_buff[entry] = NULL;
|
sp->rx_buff[entry] = NULL;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust the data start location within the buffer to
|
|
||||||
* align IP address field to a 16 byte boundary.
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
/* Associate the receive buffer with the IPG NIC. */
|
/* Associate the receive buffer with the IPG NIC. */
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
|
|
||||||
|
|
|
@ -1972,9 +1972,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
|
||||||
* of reassembly being done in the stack */
|
* of reassembly being done in the stack */
|
||||||
if (length < copybreak) {
|
if (length < copybreak) {
|
||||||
struct sk_buff *new_skb =
|
struct sk_buff *new_skb =
|
||||||
netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
|
netdev_alloc_skb_ip_align(netdev, length);
|
||||||
if (new_skb) {
|
if (new_skb) {
|
||||||
skb_reserve(new_skb, NET_IP_ALIGN);
|
|
||||||
skb_copy_to_linear_data_offset(new_skb,
|
skb_copy_to_linear_data_offset(new_skb,
|
||||||
-NET_IP_ALIGN,
|
-NET_IP_ALIGN,
|
||||||
(skb->data -
|
(skb->data -
|
||||||
|
@ -2057,20 +2056,13 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
|
||||||
goto map_skb;
|
goto map_skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
|
skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
|
||||||
+ NET_IP_ALIGN);
|
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
/* Better luck next round */
|
/* Better luck next round */
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
buffer_info->length = adapter->rx_buffer_len;
|
buffer_info->length = adapter->rx_buffer_len;
|
||||||
map_skb:
|
map_skb:
|
||||||
|
|
|
@ -616,22 +616,14 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
|
||||||
|
|
||||||
if (!bi->skb) {
|
if (!bi->skb) {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
skb = netdev_alloc_skb(adapter->netdev,
|
skb = netdev_alloc_skb_ip_align(adapter->netdev,
|
||||||
(rx_ring->rx_buf_len +
|
rx_ring->rx_buf_len);
|
||||||
NET_IP_ALIGN));
|
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
goto no_buffers;
|
goto no_buffers;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Make buffer alignment 2 beyond a 16 byte boundary
|
|
||||||
* this will result in a 16 byte aligned IP header after
|
|
||||||
* the 14 byte MAC header is removed
|
|
||||||
*/
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
bi->skb = skb;
|
bi->skb = skb;
|
||||||
bi->dma = pci_map_single(pdev, skb->data,
|
bi->dma = pci_map_single(pdev, skb->data,
|
||||||
rx_ring->rx_buf_len,
|
rx_ring->rx_buf_len,
|
||||||
|
|
|
@ -108,9 +108,8 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget)
|
||||||
if (unlikely(!netif_running(nds[desc->channel])))
|
if (unlikely(!netif_running(nds[desc->channel])))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(dev, desc->pkt_length + 2);
|
skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
|
||||||
if (likely(skb != NULL)) {
|
if (likely(skb != NULL)) {
|
||||||
skb_reserve(skb, 2);
|
|
||||||
skb_copy_to_linear_data(skb, buf, desc->pkt_length);
|
skb_copy_to_linear_data(skb, buf, desc->pkt_length);
|
||||||
skb_put(skb, desc->pkt_length);
|
skb_put(skb, desc->pkt_length);
|
||||||
skb->protocol = eth_type_trans(skb, nds[desc->channel]);
|
skb->protocol = eth_type_trans(skb, nds[desc->channel]);
|
||||||
|
|
|
@ -400,7 +400,7 @@ static int korina_rx(struct net_device *dev, int limit)
|
||||||
dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
|
dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
|
||||||
|
|
||||||
/* Malloc up new buffer. */
|
/* Malloc up new buffer. */
|
||||||
skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
|
skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
|
||||||
|
|
||||||
if (!skb_new)
|
if (!skb_new)
|
||||||
break;
|
break;
|
||||||
|
@ -417,9 +417,6 @@ static int korina_rx(struct net_device *dev, int limit)
|
||||||
if (devcs & ETH_RX_MP)
|
if (devcs & ETH_RX_MP)
|
||||||
dev->stats.multicast++;
|
dev->stats.multicast++;
|
||||||
|
|
||||||
/* 16 bit align */
|
|
||||||
skb_reserve(skb_new, 2);
|
|
||||||
|
|
||||||
lp->rx_skb[lp->rx_next_done] = skb_new;
|
lp->rx_skb[lp->rx_next_done] = skb_new;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -357,7 +357,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
|
||||||
|
|
||||||
/* check the status */
|
/* check the status */
|
||||||
if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
|
if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
|
||||||
struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2);
|
struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
|
||||||
|
|
||||||
dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
|
dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
|
||||||
__func__, len);
|
__func__, len);
|
||||||
|
@ -369,9 +369,6 @@ static void ks8842_rx_frame(struct net_device *netdev,
|
||||||
if (status & RXSR_MULTICAST)
|
if (status & RXSR_MULTICAST)
|
||||||
netdev->stats.multicast++;
|
netdev->stats.multicast++;
|
||||||
|
|
||||||
/* Align socket buffer in 4-byte boundary for
|
|
||||||
better performance. */
|
|
||||||
skb_reserve(skb, 2);
|
|
||||||
data = (u32 *)skb_put(skb, len);
|
data = (u32 *)skb_put(skb, len);
|
||||||
|
|
||||||
ks8842_select_bank(adapter, 17);
|
ks8842_select_bank(adapter, 17);
|
||||||
|
|
|
@ -470,11 +470,11 @@ static inline int init_rx_bufs(struct net_device *dev)
|
||||||
|
|
||||||
for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
|
for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
|
struct sk_buff *skb;
|
||||||
|
|
||||||
|
skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
return -1;
|
return -1;
|
||||||
skb_reserve(skb, 2);
|
|
||||||
dma_addr = dma_map_single(dev->dev.parent, skb->data,
|
dma_addr = dma_map_single(dev->dev.parent, skb->data,
|
||||||
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||||
rbd->v_next = rbd+1;
|
rbd->v_next = rbd+1;
|
||||||
|
@ -697,12 +697,12 @@ static inline int i596_rx(struct net_device *dev)
|
||||||
(dma_addr_t)SWAP32(rbd->b_data),
|
(dma_addr_t)SWAP32(rbd->b_data),
|
||||||
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||||
/* Get fresh skbuff to replace filled one. */
|
/* Get fresh skbuff to replace filled one. */
|
||||||
newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
|
newskb = netdev_alloc_skb_ip_align(dev,
|
||||||
|
PKT_BUF_SZ);
|
||||||
if (newskb == NULL) {
|
if (newskb == NULL) {
|
||||||
skb = NULL; /* drop pkt */
|
skb = NULL; /* drop pkt */
|
||||||
goto memory_squeeze;
|
goto memory_squeeze;
|
||||||
}
|
}
|
||||||
skb_reserve(newskb, 2);
|
|
||||||
|
|
||||||
/* Pass up the skb already on the Rx ring. */
|
/* Pass up the skb already on the Rx ring. */
|
||||||
skb_put(skb, pkt_len);
|
skb_put(skb, pkt_len);
|
||||||
|
@ -716,7 +716,7 @@ static inline int i596_rx(struct net_device *dev)
|
||||||
rbd->b_data = SWAP32(dma_addr);
|
rbd->b_data = SWAP32(dma_addr);
|
||||||
DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
|
DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
|
||||||
} else
|
} else
|
||||||
skb = netdev_alloc_skb(dev, pkt_len + 2);
|
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
|
||||||
memory_squeeze:
|
memory_squeeze:
|
||||||
if (skb == NULL) {
|
if (skb == NULL) {
|
||||||
/* XXX tulip.c can defer packets here!! */
|
/* XXX tulip.c can defer packets here!! */
|
||||||
|
@ -730,7 +730,6 @@ memory_squeeze:
|
||||||
dma_sync_single_for_cpu(dev->dev.parent,
|
dma_sync_single_for_cpu(dev->dev.parent,
|
||||||
(dma_addr_t)SWAP32(rbd->b_data),
|
(dma_addr_t)SWAP32(rbd->b_data),
|
||||||
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
PKT_BUF_SZ, DMA_FROM_DEVICE);
|
||||||
skb_reserve(skb, 2);
|
|
||||||
memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
|
memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
|
||||||
dma_sync_single_for_device(dev->dev.parent,
|
dma_sync_single_for_device(dev->dev.parent,
|
||||||
(dma_addr_t)SWAP32(rbd->b_data),
|
(dma_addr_t)SWAP32(rbd->b_data),
|
||||||
|
|
|
@ -3555,13 +3555,12 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
|
||||||
if (pkt_size >= rx_copybreak)
|
if (pkt_size >= rx_copybreak)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
|
pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
|
skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
|
||||||
*sk_buff = skb;
|
*sk_buff = skb;
|
||||||
done = true;
|
done = true;
|
||||||
|
|
|
@ -793,7 +793,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
|
||||||
|
|
||||||
rx_len -= rx_size_align + 4;
|
rx_len -= rx_size_align + 4;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(dev, pkt_size);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
if (printk_ratelimit())
|
if (printk_ratelimit())
|
||||||
printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
|
printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
|
||||||
|
@ -801,8 +801,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
|
||||||
goto next;
|
goto next;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
|
if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
|
||||||
memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
|
memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
|
||||||
rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
|
rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
|
||||||
|
|
|
@ -365,11 +365,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
|
||||||
}
|
}
|
||||||
skb_reserve(newskb, 2);
|
skb_reserve(newskb, 2);
|
||||||
} else {
|
} else {
|
||||||
skb = netdev_alloc_skb(dev, len + 2);
|
skb = netdev_alloc_skb_ip_align(dev, len);
|
||||||
if (skb) {
|
if (skb)
|
||||||
skb_reserve(skb, 2);
|
|
||||||
skb_copy_to_linear_data(skb, rd->skb->data, len);
|
skb_copy_to_linear_data(skb, rd->skb->data, len);
|
||||||
}
|
|
||||||
newskb = rd->skb;
|
newskb = rd->skb;
|
||||||
}
|
}
|
||||||
memory_squeeze:
|
memory_squeeze:
|
||||||
|
|
|
@ -536,13 +536,12 @@ static bool sis190_try_rx_copy(struct sis190_private *tp,
|
||||||
if (pkt_size >= rx_copybreak)
|
if (pkt_size >= rx_copybreak)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
|
skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
|
pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
skb_reserve(skb, 2);
|
|
||||||
skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
|
skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
|
||||||
*sk_buff = skb;
|
*sk_buff = skb;
|
||||||
done = true;
|
done = true;
|
||||||
|
|
|
@ -3070,11 +3070,10 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (len < RX_COPY_THRESHOLD) {
|
if (len < RX_COPY_THRESHOLD) {
|
||||||
skb = netdev_alloc_skb(dev, len + 2);
|
skb = netdev_alloc_skb_ip_align(dev, len);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
goto resubmit;
|
goto resubmit;
|
||||||
|
|
||||||
skb_reserve(skb, 2);
|
|
||||||
pci_dma_sync_single_for_cpu(skge->hw->pdev,
|
pci_dma_sync_single_for_cpu(skge->hw->pdev,
|
||||||
pci_unmap_addr(e, mapaddr),
|
pci_unmap_addr(e, mapaddr),
|
||||||
len, PCI_DMA_FROMDEVICE);
|
len, PCI_DMA_FROMDEVICE);
|
||||||
|
@ -3085,11 +3084,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
|
||||||
skge_rx_reuse(e, skge->rx_buf_size);
|
skge_rx_reuse(e, skge->rx_buf_size);
|
||||||
} else {
|
} else {
|
||||||
struct sk_buff *nskb;
|
struct sk_buff *nskb;
|
||||||
nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
|
|
||||||
|
nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
|
||||||
if (!nskb)
|
if (!nskb)
|
||||||
goto resubmit;
|
goto resubmit;
|
||||||
|
|
||||||
skb_reserve(nskb, NET_IP_ALIGN);
|
|
||||||
pci_unmap_single(skge->hw->pdev,
|
pci_unmap_single(skge->hw->pdev,
|
||||||
pci_unmap_addr(e, mapaddr),
|
pci_unmap_addr(e, mapaddr),
|
||||||
pci_unmap_len(e, maplen),
|
pci_unmap_len(e, maplen),
|
||||||
|
|
|
@ -2191,9 +2191,8 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(sky2->netdev, length + 2);
|
skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
|
||||||
if (likely(skb)) {
|
if (likely(skb)) {
|
||||||
skb_reserve(skb, 2);
|
|
||||||
pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
|
pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
|
||||||
length, PCI_DMA_FROMDEVICE);
|
length, PCI_DMA_FROMDEVICE);
|
||||||
skb_copy_from_linear_data(re->skb, skb->data, length);
|
skb_copy_from_linear_data(re->skb, skb->data, length);
|
||||||
|
|
|
@ -1549,7 +1549,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
|
||||||
if (tmpCStat & TLAN_CSTAT_EOC)
|
if (tmpCStat & TLAN_CSTAT_EOC)
|
||||||
eoc = 1;
|
eoc = 1;
|
||||||
|
|
||||||
new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
|
new_skb = netdev_alloc_skb_ip_align(dev,
|
||||||
|
TLAN_MAX_FRAME_SIZE + 5);
|
||||||
if ( !new_skb )
|
if ( !new_skb )
|
||||||
goto drop_and_reuse;
|
goto drop_and_reuse;
|
||||||
|
|
||||||
|
@ -1563,7 +1564,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
|
||||||
skb->protocol = eth_type_trans( skb, dev );
|
skb->protocol = eth_type_trans( skb, dev );
|
||||||
netif_rx( skb );
|
netif_rx( skb );
|
||||||
|
|
||||||
skb_reserve( new_skb, NET_IP_ALIGN );
|
|
||||||
head_list->buffer[0].address = pci_map_single(priv->pciDev,
|
head_list->buffer[0].address = pci_map_single(priv->pciDev,
|
||||||
new_skb->data,
|
new_skb->data,
|
||||||
TLAN_MAX_FRAME_SIZE,
|
TLAN_MAX_FRAME_SIZE,
|
||||||
|
@ -1967,13 +1967,12 @@ static void TLan_ResetLists( struct net_device *dev )
|
||||||
list->cStat = TLAN_CSTAT_READY;
|
list->cStat = TLAN_CSTAT_READY;
|
||||||
list->frameSize = TLAN_MAX_FRAME_SIZE;
|
list->frameSize = TLAN_MAX_FRAME_SIZE;
|
||||||
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
|
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
|
||||||
skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
|
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
|
||||||
if ( !skb ) {
|
if ( !skb ) {
|
||||||
pr_err("TLAN: out of memory for received data.\n" );
|
pr_err("TLAN: out of memory for received data.\n" );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_reserve( skb, NET_IP_ALIGN );
|
|
||||||
list->buffer[0].address = pci_map_single(priv->pciDev,
|
list->buffer[0].address = pci_map_single(priv->pciDev,
|
||||||
skb->data,
|
skb->data,
|
||||||
TLAN_MAX_FRAME_SIZE,
|
TLAN_MAX_FRAME_SIZE,
|
||||||
|
|
|
@ -802,13 +802,11 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
|
||||||
int rx = data->rxhead;
|
int rx = data->rxhead;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
data->rxskbs[rx] = skb = netdev_alloc_skb(dev,
|
skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
|
||||||
TSI108_RXBUF_SIZE + 2);
|
data->rxskbs[rx] = skb;
|
||||||
if (!skb)
|
if (!skb)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
|
|
||||||
|
|
||||||
data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
|
data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
|
||||||
TSI108_RX_SKB_SIZE,
|
TSI108_RX_SKB_SIZE,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
@ -1356,7 +1354,7 @@ static int tsi108_open(struct net_device *dev)
|
||||||
for (i = 0; i < TSI108_RXRING_LEN; i++) {
|
for (i = 0; i < TSI108_RXRING_LEN; i++) {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
/* Bah. No memory for now, but maybe we'll get
|
/* Bah. No memory for now, but maybe we'll get
|
||||||
* some more later.
|
* some more later.
|
||||||
|
@ -1370,8 +1368,6 @@ static int tsi108_open(struct net_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
data->rxskbs[i] = skb;
|
data->rxskbs[i] = skb;
|
||||||
/* Align the payload on a 4-byte boundary */
|
|
||||||
skb_reserve(skb, 2);
|
|
||||||
data->rxskbs[i] = skb;
|
data->rxskbs[i] = skb;
|
||||||
data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
|
data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
|
||||||
data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
|
data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
|
||||||
|
|
|
@ -1484,15 +1484,15 @@ static int rhine_rx(struct net_device *dev, int limit)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb = NULL;
|
||||||
/* Length should omit the CRC */
|
/* Length should omit the CRC */
|
||||||
int pkt_len = data_size - 4;
|
int pkt_len = data_size - 4;
|
||||||
|
|
||||||
/* Check if the packet is long enough to accept without
|
/* Check if the packet is long enough to accept without
|
||||||
copying to a minimally-sized skbuff. */
|
copying to a minimally-sized skbuff. */
|
||||||
if (pkt_len < rx_copybreak &&
|
if (pkt_len < rx_copybreak)
|
||||||
(skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
|
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
|
||||||
skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */
|
if (skb) {
|
||||||
pci_dma_sync_single_for_cpu(rp->pdev,
|
pci_dma_sync_single_for_cpu(rp->pdev,
|
||||||
rp->rx_skbuff_dma[entry],
|
rp->rx_skbuff_dma[entry],
|
||||||
rp->rx_buf_sz,
|
rp->rx_buf_sz,
|
||||||
|
|
|
@ -1949,10 +1949,9 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
|
||||||
if (pkt_size < rx_copybreak) {
|
if (pkt_size < rx_copybreak) {
|
||||||
struct sk_buff *new_skb;
|
struct sk_buff *new_skb;
|
||||||
|
|
||||||
new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
|
new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
|
||||||
if (new_skb) {
|
if (new_skb) {
|
||||||
new_skb->ip_summed = rx_skb[0]->ip_summed;
|
new_skb->ip_summed = rx_skb[0]->ip_summed;
|
||||||
skb_reserve(new_skb, 2);
|
|
||||||
skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
|
skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
|
||||||
*rx_skb = new_skb;
|
*rx_skb = new_skb;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
|
@ -283,13 +283,12 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
|
||||||
do {
|
do {
|
||||||
struct skb_vnet_hdr *hdr;
|
struct skb_vnet_hdr *hdr;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
oom = true;
|
oom = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
skb_put(skb, MAX_PACKET_LEN);
|
skb_put(skb, MAX_PACKET_LEN);
|
||||||
|
|
||||||
hdr = skb_vnet_hdr(skb);
|
hdr = skb_vnet_hdr(skb);
|
||||||
|
@ -344,14 +343,12 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
|
||||||
do {
|
do {
|
||||||
skb_frag_t *f;
|
skb_frag_t *f;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
oom = true;
|
oom = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
f = &skb_shinfo(skb)->frags[0];
|
f = &skb_shinfo(skb)->frags[0];
|
||||||
f->page = get_a_page(vi, gfp);
|
f->page = get_a_page(vi, gfp);
|
||||||
if (!f->page) {
|
if (!f->page) {
|
||||||
|
|
|
@ -2604,20 +2604,13 @@ EXPORT_SYMBOL(napi_reuse_skb);
|
||||||
|
|
||||||
struct sk_buff *napi_get_frags(struct napi_struct *napi)
|
struct sk_buff *napi_get_frags(struct napi_struct *napi)
|
||||||
{
|
{
|
||||||
struct net_device *dev = napi->dev;
|
|
||||||
struct sk_buff *skb = napi->skb;
|
struct sk_buff *skb = napi->skb;
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
|
skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
|
||||||
if (!skb)
|
if (skb)
|
||||||
goto out;
|
napi->skb = skb;
|
||||||
|
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
|
||||||
|
|
||||||
napi->skb = skb;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(napi_get_frags);
|
EXPORT_SYMBOL(napi_get_frags);
|
||||||
|
|
Loading…
Reference in a new issue