mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
[NET]: Kill eth_copy_and_sum().
It hasn't "summed" anything in over 7 years, and it's just a straight mempcy ala skb_copy_to_linear_data() so just get rid of it. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
a7ab4b501f
commit
8c7b7faaa6
45 changed files with 66 additions and 73 deletions
|
@ -477,9 +477,9 @@ for (;;) {
|
|||
}
|
||||
else {
|
||||
skb_put(skb,pkt_len-4); /* Make room */
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *)__va(bdp->cbd_bufaddr),
|
||||
pkt_len-4, 0);
|
||||
pkt_len-4);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
|
|
@ -734,9 +734,9 @@ for (;;) {
|
|||
}
|
||||
else {
|
||||
skb_put(skb,pkt_len); /* Make room */
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *)__va(bdp->cbd_bufaddr),
|
||||
pkt_len, 0);
|
||||
pkt_len);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
|
|
@ -506,9 +506,9 @@ for (;;) {
|
|||
}
|
||||
else {
|
||||
skb_put(skb,pkt_len-4); /* Make room */
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
cep->rx_vaddr[bdp - cep->rx_bd_base],
|
||||
pkt_len-4, 0);
|
||||
pkt_len-4);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
|
|
@ -725,7 +725,7 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
|
|||
fep->stats.rx_dropped++;
|
||||
} else {
|
||||
skb_put(skb,pkt_len-4); /* Make room */
|
||||
eth_copy_and_sum(skb, data, pkt_len-4, 0);
|
||||
skb_copy_to_linear_data(skb, data, pkt_len-4);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
|
|
@ -990,7 +990,7 @@ static void elmc_rcv_int(struct net_device *dev)
|
|||
if (skb != NULL) {
|
||||
skb_reserve(skb, 2); /* 16 byte alignment */
|
||||
skb_put(skb,totlen);
|
||||
eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0);
|
||||
skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
netif_rx(skb);
|
||||
dev->last_rx = jiffies;
|
||||
|
|
|
@ -333,9 +333,9 @@ static int lance_rx (struct net_device *dev)
|
|||
|
||||
skb_reserve (skb, 2); /* 16 byte align */
|
||||
skb_put (skb, len); /* make room */
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
|
||||
len, 0);
|
||||
len);
|
||||
skb->protocol = eth_type_trans (skb, dev);
|
||||
netif_rx (skb);
|
||||
dev->last_rx = jiffies;
|
||||
|
|
|
@ -2017,7 +2017,7 @@ no_early_rx:
|
|||
#if RX_BUF_IDX == 3
|
||||
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
|
||||
#else
|
||||
eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
|
||||
skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
|
||||
#endif
|
||||
skb_put (skb, pkt_size);
|
||||
|
||||
|
|
|
@ -322,9 +322,9 @@ static int lance_rx (struct net_device *dev)
|
|||
|
||||
skb_reserve (skb, 2); /* 16 byte align */
|
||||
skb_put (skb, len); /* make room */
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
|
||||
len, 0);
|
||||
len);
|
||||
skb->protocol = eth_type_trans (skb, dev);
|
||||
netif_rx (skb);
|
||||
dev->last_rx = jiffies;
|
||||
|
|
|
@ -746,7 +746,7 @@ static int ariadne_rx(struct net_device *dev)
|
|||
|
||||
skb_reserve(skb,2); /* 16 byte align */
|
||||
skb_put(skb,pkt_len); /* Make room */
|
||||
eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0);
|
||||
skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
#if 0
|
||||
printk(KERN_DEBUG "RX pkt type 0x%04x from ",
|
||||
|
|
|
@ -258,7 +258,7 @@ static int ep93xx_rx(struct net_device *dev, int *budget)
|
|||
skb_reserve(skb, 2);
|
||||
dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
|
||||
length, DMA_FROM_DEVICE);
|
||||
eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0);
|
||||
skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
|
||||
skb_put(skb, length);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
|
|
|
@ -1205,8 +1205,8 @@ static int au1000_rx(struct net_device *dev)
|
|||
continue;
|
||||
}
|
||||
skb_reserve(skb, 2); /* 16 byte IP header align */
|
||||
eth_copy_and_sum(skb,
|
||||
(unsigned char *)pDB->vaddr, frmlen, 0);
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *)pDB->vaddr, frmlen);
|
||||
skb_put(skb, frmlen);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
netif_rx(skb); /* pass the packet to upper layers */
|
||||
|
|
|
@ -866,9 +866,9 @@ receive_packet (struct net_device *dev)
|
|||
PCI_DMA_FROMDEVICE);
|
||||
/* 16 byte align the IP header */
|
||||
skb_reserve (skb, 2);
|
||||
eth_copy_and_sum (skb,
|
||||
skb_copy_to_linear_data (skb,
|
||||
np->rx_skbuff[entry]->data,
|
||||
pkt_len, 0);
|
||||
pkt_len);
|
||||
skb_put (skb, pkt_len);
|
||||
pci_dma_sync_single_for_device(np->pdev,
|
||||
desc->fraginfo &
|
||||
|
|
|
@ -1801,7 +1801,7 @@ speedo_rx(struct net_device *dev)
|
|||
|
||||
#if 1 || USE_IP_CSUM
|
||||
/* Packet is in one chunk -- we can copy + cksum. */
|
||||
eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
#else
|
||||
skb_copy_from_linear_data(sp->rx_skbuff[entry],
|
||||
|
|
|
@ -1201,7 +1201,7 @@ static int epic_rx(struct net_device *dev, int budget)
|
|||
ep->rx_ring[entry].bufaddr,
|
||||
ep->rx_buf_sz,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
pci_dma_sync_single_for_device(ep->pci_dev,
|
||||
ep->rx_ring[entry].bufaddr,
|
||||
|
|
|
@ -1727,8 +1727,8 @@ static int netdev_rx(struct net_device *dev)
|
|||
/* Call copy + cksum if available. */
|
||||
|
||||
#if ! defined(__alpha__)
|
||||
eth_copy_and_sum(skb,
|
||||
np->cur_rx->skbuff->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb,
|
||||
np->cur_rx->skbuff->data, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
#else
|
||||
memcpy(skb_put(skb, pkt_len),
|
||||
|
|
|
@ -648,7 +648,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
|
|||
fep->stats.rx_dropped++;
|
||||
} else {
|
||||
skb_put(skb,pkt_len-4); /* Make room */
|
||||
eth_copy_and_sum(skb, data, pkt_len-4, 0);
|
||||
skb_copy_to_linear_data(skb, data, pkt_len-4);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
|
|
@ -1575,8 +1575,8 @@ static int hamachi_rx(struct net_device *dev)
|
|||
PCI_DMA_FROMDEVICE);
|
||||
/* Call copy + cksum if available. */
|
||||
#if 1 || USE_IP_COPYSUM
|
||||
eth_copy_and_sum(skb,
|
||||
hmp->rx_skbuff[entry]->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb,
|
||||
hmp->rx_skbuff[entry]->data, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
#else
|
||||
memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
|
||||
|
|
|
@ -111,7 +111,7 @@ static int ixpdev_rx(struct net_device *dev, int *budget)
|
|||
skb = dev_alloc_skb(desc->pkt_length + 2);
|
||||
if (likely(skb != NULL)) {
|
||||
skb_reserve(skb, 2);
|
||||
eth_copy_and_sum(skb, buf, desc->pkt_length, 0);
|
||||
skb_copy_to_linear_data(skb, buf, desc->pkt_length);
|
||||
skb_put(skb, desc->pkt_length);
|
||||
skb->protocol = eth_type_trans(skb, nds[desc->channel]);
|
||||
|
||||
|
|
|
@ -1186,9 +1186,9 @@ lance_rx(struct net_device *dev)
|
|||
}
|
||||
skb_reserve(skb,2); /* 16 byte align */
|
||||
skb_put(skb,pkt_len); /* Make room */
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
|
||||
pkt_len,0);
|
||||
pkt_len);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
netif_rx(skb);
|
||||
dev->last_rx = jiffies;
|
||||
|
|
|
@ -2357,8 +2357,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
|
|||
np->rx_dma[entry],
|
||||
buflen,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
eth_copy_and_sum(skb,
|
||||
np->rx_skbuff[entry]->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb,
|
||||
np->rx_skbuff[entry]->data, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
pci_dma_sync_single_for_device(np->pci_dev,
|
||||
np->rx_dma[entry],
|
||||
|
|
|
@ -936,7 +936,7 @@ static void ni52_rcv_int(struct net_device *dev)
|
|||
{
|
||||
skb_reserve(skb,2);
|
||||
skb_put(skb,totlen);
|
||||
eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
|
||||
skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
netif_rx(skb);
|
||||
dev->last_rx = jiffies;
|
||||
|
|
|
@ -1096,7 +1096,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
|
|||
#ifdef RCV_VIA_SKB
|
||||
if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
|
||||
skb_put(skb,len);
|
||||
eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
|
||||
skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
|
||||
}
|
||||
else {
|
||||
struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
|
||||
|
@ -1108,7 +1108,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
|
|||
}
|
||||
#else
|
||||
skb_put(skb,len);
|
||||
eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
|
||||
skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
|
||||
#endif
|
||||
p->stats.rx_packets++;
|
||||
p->stats.rx_bytes += len;
|
||||
|
|
|
@ -1567,7 +1567,7 @@ static void netdrv_rx_interrupt (struct net_device *dev,
|
|||
if (skb) {
|
||||
skb_reserve (skb, 2); /* 16 byte align the IP fields. */
|
||||
|
||||
eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
|
||||
skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
|
||||
skb_put (skb, pkt_size);
|
||||
|
||||
skb->protocol = eth_type_trans (skb, dev);
|
||||
|
|
|
@ -1235,9 +1235,9 @@ static void pcnet32_rx_entry(struct net_device *dev,
|
|||
lp->rx_dma_addr[entry],
|
||||
pkt_len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *)(lp->rx_skbuff[entry]->data),
|
||||
pkt_len, 0);
|
||||
pkt_len);
|
||||
pci_dma_sync_single_for_device(lp->pci_dev,
|
||||
lp->rx_dma_addr[entry],
|
||||
pkt_len,
|
||||
|
|
|
@ -690,9 +690,9 @@ static int lan_saa9730_rx(struct net_device *dev)
|
|||
lp->stats.rx_packets++;
|
||||
skb_reserve(skb, 2); /* 16 byte align */
|
||||
skb_put(skb, len); /* make room */
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *) pData,
|
||||
len, 0);
|
||||
len);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
netif_rx(skb);
|
||||
dev->last_rx = jiffies;
|
||||
|
|
|
@ -320,7 +320,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
|
|||
skb_put(skb, len);
|
||||
|
||||
/* Copy out of kseg1 to avoid silly cache flush. */
|
||||
eth_copy_and_sum(skb, pkt_pointer + 2, len, 0);
|
||||
skb_copy_to_linear_data(skb, pkt_pointer + 2, len);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
/* We don't want to receive our own packets */
|
||||
|
|
|
@ -548,7 +548,7 @@ static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
|
|||
skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
|
||||
if (skb) {
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
|
||||
skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
|
||||
*sk_buff = skb;
|
||||
sis190_give_to_asic(desc, rx_buf_sz);
|
||||
ret = 0;
|
||||
|
|
|
@ -1456,7 +1456,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
|
|||
pci_dma_sync_single_for_cpu(np->pci_dev,
|
||||
np->rx_info[entry].mapping,
|
||||
pkt_len, PCI_DMA_FROMDEVICE);
|
||||
eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
|
||||
pci_dma_sync_single_for_device(np->pci_dev,
|
||||
np->rx_info[entry].mapping,
|
||||
pkt_len, PCI_DMA_FROMDEVICE);
|
||||
|
|
|
@ -777,7 +777,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
|
|||
{
|
||||
skb_reserve(skb,2);
|
||||
skb_put(skb,totlen);
|
||||
eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0);
|
||||
skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
|
||||
skb->protocol=eth_type_trans(skb,dev);
|
||||
netif_rx(skb);
|
||||
p->stats.rx_packets++;
|
||||
|
|
|
@ -853,10 +853,9 @@ static int lance_rx( struct net_device *dev )
|
|||
|
||||
skb_reserve( skb, 2 ); /* 16 byte align */
|
||||
skb_put( skb, pkt_len ); /* Make room */
|
||||
// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len);
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
PKTBUF_ADDR(head),
|
||||
pkt_len, 0);
|
||||
pkt_len);
|
||||
|
||||
skb->protocol = eth_type_trans( skb, dev );
|
||||
netif_rx( skb );
|
||||
|
|
|
@ -860,7 +860,7 @@ static void bigmac_rx(struct bigmac *bp)
|
|||
sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
|
||||
this->rx_addr, len,
|
||||
SBUS_DMA_FROMDEVICE);
|
||||
eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0);
|
||||
skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
|
||||
sbus_dma_sync_single_for_device(bp->bigmac_sdev,
|
||||
this->rx_addr, len,
|
||||
SBUS_DMA_FROMDEVICE);
|
||||
|
|
|
@ -1313,7 +1313,7 @@ static void rx_poll(unsigned long data)
|
|||
np->rx_buf_sz,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
|
||||
pci_dma_sync_single_for_device(np->pci_dev,
|
||||
desc->frag[0].addr,
|
||||
np->rx_buf_sz,
|
||||
|
|
|
@ -549,9 +549,9 @@ static void lance_rx_dvma(struct net_device *dev)
|
|||
|
||||
skb_reserve(skb, 2); /* 16 byte align */
|
||||
skb_put(skb, len); /* make room */
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
(unsigned char *)&(ib->rx_buf [entry][0]),
|
||||
len, 0);
|
||||
len);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
netif_rx(skb);
|
||||
dev->last_rx = jiffies;
|
||||
|
|
|
@ -439,8 +439,8 @@ static void qe_rx(struct sunqe *qep)
|
|||
} else {
|
||||
skb_reserve(skb, 2);
|
||||
skb_put(skb, len);
|
||||
eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
|
||||
len, 0);
|
||||
skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
|
||||
len);
|
||||
skb->protocol = eth_type_trans(skb, qep->dev);
|
||||
netif_rx(skb);
|
||||
qep->dev->last_rx = jiffies;
|
||||
|
|
|
@ -197,8 +197,8 @@ int tulip_poll(struct net_device *dev, int *budget)
|
|||
tp->rx_buffers[entry].mapping,
|
||||
pkt_len, PCI_DMA_FROMDEVICE);
|
||||
#if ! defined(__alpha__)
|
||||
eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
|
||||
pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
|
||||
pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
#else
|
||||
memcpy(skb_put(skb, pkt_len),
|
||||
|
@ -420,8 +420,8 @@ static int tulip_rx(struct net_device *dev)
|
|||
tp->rx_buffers[entry].mapping,
|
||||
pkt_len, PCI_DMA_FROMDEVICE);
|
||||
#if ! defined(__alpha__)
|
||||
eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
|
||||
pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
|
||||
pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
#else
|
||||
memcpy(skb_put(skb, pkt_len),
|
||||
|
|
|
@ -1232,7 +1232,7 @@ static int netdev_rx(struct net_device *dev)
|
|||
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
|
||||
np->rx_skbuff[entry]->len,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
|
||||
np->rx_skbuff[entry]->len,
|
||||
|
|
|
@ -1208,7 +1208,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
|
|||
goto out;
|
||||
}
|
||||
skb_reserve(skb, 2);
|
||||
eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
netif_rx(skb);
|
||||
|
|
|
@ -1242,8 +1242,8 @@ xircom_rx(struct net_device *dev)
|
|||
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
|
||||
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
||||
#if ! defined(__alpha__)
|
||||
eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
|
||||
pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
|
||||
pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
#else
|
||||
memcpy(skb_put(skb, pkt_len),
|
||||
|
|
|
@ -1703,7 +1703,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
|
|||
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
|
||||
PKT_BUF_SZ,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
|
||||
pci_dma_sync_single_for_device(tp->pdev, dma_addr,
|
||||
PKT_BUF_SZ,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
|
|
@ -255,7 +255,7 @@ static void catc_rx_done(struct urb *urb)
|
|||
if (!(skb = dev_alloc_skb(pkt_len)))
|
||||
return;
|
||||
|
||||
eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, catc->netdev);
|
||||
|
|
|
@ -635,7 +635,7 @@ static void kaweth_usb_receive(struct urb *urb)
|
|||
|
||||
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
|
||||
|
||||
eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len);
|
||||
|
||||
skb_put(skb, pkt_len);
|
||||
|
||||
|
|
|
@ -1492,9 +1492,9 @@ static int rhine_rx(struct net_device *dev, int limit)
|
|||
rp->rx_buf_sz,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
eth_copy_and_sum(skb,
|
||||
skb_copy_to_linear_data(skb,
|
||||
rp->rx_skbuff[entry]->data,
|
||||
pkt_len, 0);
|
||||
pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
pci_dma_sync_single_for_device(rp->pdev,
|
||||
rp->rx_skbuff_dma[entry],
|
||||
|
|
|
@ -1011,7 +1011,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
|
|||
} else {
|
||||
skb->dev = dev;
|
||||
skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
|
||||
eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0);
|
||||
skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
|
||||
wl3501_receive(this, skb->data, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
|
|
@ -1137,7 +1137,7 @@ static int yellowfin_rx(struct net_device *dev)
|
|||
if (skb == NULL)
|
||||
break;
|
||||
skb_reserve(skb, 2); /* 16 byte align the IP header */
|
||||
eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
|
||||
skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
|
||||
skb_put(skb, pkt_len);
|
||||
pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
|
||||
yp->rx_buf_sz,
|
||||
|
|
|
@ -40,12 +40,6 @@ extern int eth_header_cache(struct neighbour *neigh,
|
|||
struct hh_cache *hh);
|
||||
|
||||
extern struct net_device *alloc_etherdev(int sizeof_priv);
|
||||
static inline void eth_copy_and_sum (struct sk_buff *dest,
|
||||
const unsigned char *src,
|
||||
int len, int base)
|
||||
{
|
||||
memcpy (dest->data, src, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
|
||||
|
|
Loading…
Reference in a new issue