mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (31 commits) Revert "ipv4: arp announce, arp_proxy and windows ip conflict verification" igb: return PCI_ERS_RESULT_DISCONNECT on permanent error e1000e: io_error_detected callback should return PCI_ERS_RESULT_DISCONNECT e1000: return PCI_ERS_RESULT_DISCONNECT on permanent error e1000: fix unmap bug igb: fix unmap length bug ixgbe: fix unmap length bug ixgbe: Fix link capabilities during adapter resets ixgbe: Fix device capabilities of 82599 single speed fiber NICs. ixgbe: Fix SFP log messages usbnet: Remove private stats structure usbnet: Use netdev stats structure smsc95xx: Use netdev stats structure rndis_host: Use netdev stats structure net1080: Use netdev stats structure dm9601: Use netdev stats structure cdc_eem: Use netdev stats structure ipv4: Fix fib_trie rebalancing, part 3 bnx2x: Fix the behavior of ethtool when ONBOOT=no sctp: xmit sctp packet always return no route error ...
This commit is contained in:
commit
7b85425fac
30 changed files with 235 additions and 145 deletions
|
@ -2931,7 +2931,7 @@ P: Dmitry Eremin-Solenikov
|
||||||
M: dbaryshkov@gmail.com
|
M: dbaryshkov@gmail.com
|
||||||
P: Sergey Lapin
|
P: Sergey Lapin
|
||||||
M: slapin@ossfans.org
|
M: slapin@ossfans.org
|
||||||
L: linux-zigbee-devel@lists.sourceforge.net
|
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
|
||||||
W: http://apps.sourceforge.net/trac/linux-zigbee
|
W: http://apps.sourceforge.net/trac/linux-zigbee
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
|
@ -8637,6 +8637,14 @@ static int bnx2x_nway_reset(struct net_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32
|
||||||
|
bnx2x_get_link(struct net_device *dev)
|
||||||
|
{
|
||||||
|
struct bnx2x *bp = netdev_priv(dev);
|
||||||
|
|
||||||
|
return bp->link_vars.link_up;
|
||||||
|
}
|
||||||
|
|
||||||
static int bnx2x_get_eeprom_len(struct net_device *dev)
|
static int bnx2x_get_eeprom_len(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct bnx2x *bp = netdev_priv(dev);
|
struct bnx2x *bp = netdev_priv(dev);
|
||||||
|
@ -10034,7 +10042,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
|
||||||
.get_msglevel = bnx2x_get_msglevel,
|
.get_msglevel = bnx2x_get_msglevel,
|
||||||
.set_msglevel = bnx2x_set_msglevel,
|
.set_msglevel = bnx2x_set_msglevel,
|
||||||
.nway_reset = bnx2x_nway_reset,
|
.nway_reset = bnx2x_nway_reset,
|
||||||
.get_link = ethtool_op_get_link,
|
.get_link = bnx2x_get_link,
|
||||||
.get_eeprom_len = bnx2x_get_eeprom_len,
|
.get_eeprom_len = bnx2x_get_eeprom_len,
|
||||||
.get_eeprom = bnx2x_get_eeprom,
|
.get_eeprom = bnx2x_get_eeprom,
|
||||||
.set_eeprom = bnx2x_set_eeprom,
|
.set_eeprom = bnx2x_set_eeprom,
|
||||||
|
|
|
@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
|
||||||
/* Free all the Rx ring sk_buffs */
|
/* Free all the Rx ring sk_buffs */
|
||||||
for (i = 0; i < rx_ring->count; i++) {
|
for (i = 0; i < rx_ring->count; i++) {
|
||||||
buffer_info = &rx_ring->buffer_info[i];
|
buffer_info = &rx_ring->buffer_info[i];
|
||||||
if (buffer_info->skb) {
|
if (buffer_info->dma) {
|
||||||
pci_unmap_single(pdev,
|
pci_unmap_single(pdev,
|
||||||
buffer_info->dma,
|
buffer_info->dma,
|
||||||
buffer_info->length,
|
buffer_info->length,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer_info->dma = 0;
|
||||||
|
|
||||||
|
if (buffer_info->skb) {
|
||||||
dev_kfree_skb(buffer_info->skb);
|
dev_kfree_skb(buffer_info->skb);
|
||||||
buffer_info->skb = NULL;
|
buffer_info->skb = NULL;
|
||||||
}
|
}
|
||||||
|
@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
||||||
buffer_info->dma,
|
buffer_info->dma,
|
||||||
buffer_info->length,
|
buffer_info->length,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
buffer_info->dma = 0;
|
||||||
|
|
||||||
length = le16_to_cpu(rx_desc->length);
|
length = le16_to_cpu(rx_desc->length);
|
||||||
/* !EOP means multiple descriptors were used to store a single
|
/* !EOP means multiple descriptors were used to store a single
|
||||||
|
@ -4222,6 +4227,7 @@ map_skb:
|
||||||
pci_unmap_single(pdev, buffer_info->dma,
|
pci_unmap_single(pdev, buffer_info->dma,
|
||||||
adapter->rx_buffer_len,
|
adapter->rx_buffer_len,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
buffer_info->dma = 0;
|
||||||
|
|
||||||
break; /* while !buffer_info->skb */
|
break; /* while !buffer_info->skb */
|
||||||
}
|
}
|
||||||
|
@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
|
||||||
|
|
||||||
netif_device_detach(netdev);
|
netif_device_detach(netdev);
|
||||||
|
|
||||||
|
if (state == pci_channel_io_perm_failure)
|
||||||
|
return PCI_ERS_RESULT_DISCONNECT;
|
||||||
|
|
||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
e1000_down(adapter);
|
e1000_down(adapter);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
|
|
|
@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
|
||||||
|
|
||||||
netif_device_detach(netdev);
|
netif_device_detach(netdev);
|
||||||
|
|
||||||
|
if (state == pci_channel_io_perm_failure)
|
||||||
|
return PCI_ERS_RESULT_DISCONNECT;
|
||||||
|
|
||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
e1000e_down(adapter);
|
e1000e_down(adapter);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
|
|
|
@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
|
||||||
cleaned = true;
|
cleaned = true;
|
||||||
cleaned_count++;
|
cleaned_count++;
|
||||||
|
|
||||||
|
/* this is the fast path for the non-packet split case */
|
||||||
if (!adapter->rx_ps_hdr_size) {
|
if (!adapter->rx_ps_hdr_size) {
|
||||||
pci_unmap_single(pdev, buffer_info->dma,
|
pci_unmap_single(pdev, buffer_info->dma,
|
||||||
adapter->rx_buffer_len +
|
adapter->rx_buffer_len,
|
||||||
NET_IP_ALIGN,
|
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
buffer_info->dma = 0;
|
||||||
skb_put(skb, length);
|
skb_put(skb, length);
|
||||||
goto send_up;
|
goto send_up;
|
||||||
}
|
}
|
||||||
|
@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
|
||||||
|
|
||||||
if (!skb_shinfo(skb)->nr_frags) {
|
if (!skb_shinfo(skb)->nr_frags) {
|
||||||
pci_unmap_single(pdev, buffer_info->dma,
|
pci_unmap_single(pdev, buffer_info->dma,
|
||||||
adapter->rx_ps_hdr_size + NET_IP_ALIGN,
|
adapter->rx_ps_hdr_size,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
buffer_info->dma = 0;
|
||||||
skb_put(skb, hlen);
|
skb_put(skb, hlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
|
||||||
bufsz = adapter->rx_ps_hdr_size;
|
bufsz = adapter->rx_ps_hdr_size;
|
||||||
else
|
else
|
||||||
bufsz = adapter->rx_buffer_len;
|
bufsz = adapter->rx_buffer_len;
|
||||||
bufsz += NET_IP_ALIGN;
|
|
||||||
|
|
||||||
while (cleaned_count--) {
|
while (cleaned_count--) {
|
||||||
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
|
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
|
||||||
|
@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!buffer_info->skb) {
|
if (!buffer_info->skb) {
|
||||||
skb = netdev_alloc_skb(netdev, bufsz);
|
skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
goto no_buffers;
|
goto no_buffers;
|
||||||
|
@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
|
||||||
|
|
||||||
netif_device_detach(netdev);
|
netif_device_detach(netdev);
|
||||||
|
|
||||||
|
if (state == pci_channel_io_perm_failure)
|
||||||
|
return PCI_ERS_RESULT_DISCONNECT;
|
||||||
|
|
||||||
if (netif_running(netdev))
|
if (netif_running(netdev))
|
||||||
igb_down(adapter);
|
igb_down(adapter);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
|
|
|
@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct net_device_ops bfin_sir_ndo = {
|
||||||
|
.ndo_open = bfin_sir_open,
|
||||||
|
.ndo_stop = bfin_sir_stop,
|
||||||
|
.ndo_start_xmit = bfin_sir_hard_xmit,
|
||||||
|
.ndo_do_ioctl = bfin_sir_ioctl,
|
||||||
|
.ndo_get_stats = bfin_sir_stats,
|
||||||
|
};
|
||||||
|
|
||||||
static int __devinit bfin_sir_probe(struct platform_device *pdev)
|
static int __devinit bfin_sir_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
|
@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev)
|
||||||
if (err)
|
if (err)
|
||||||
goto err_mem_3;
|
goto err_mem_3;
|
||||||
|
|
||||||
dev->hard_start_xmit = bfin_sir_hard_xmit;
|
dev->netdev_ops = &bfin_sir_ndo;
|
||||||
dev->open = bfin_sir_open;
|
dev->irq = sir_port->irq;
|
||||||
dev->stop = bfin_sir_stop;
|
|
||||||
dev->do_ioctl = bfin_sir_ioctl;
|
|
||||||
dev->get_stats = bfin_sir_stats;
|
|
||||||
dev->irq = sir_port->irq;
|
|
||||||
|
|
||||||
irda_init_max_qos_capabilies(&self->qos);
|
irda_init_max_qos_capabilies(&self->qos);
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
|
||||||
ecmd->autoneg = AUTONEG_ENABLE;
|
ecmd->autoneg = AUTONEG_ENABLE;
|
||||||
ecmd->transceiver = XCVR_EXTERNAL;
|
ecmd->transceiver = XCVR_EXTERNAL;
|
||||||
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
|
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
|
||||||
(hw->mac.type == ixgbe_mac_82599EB)) {
|
(hw->phy.multispeed_fiber)) {
|
||||||
ecmd->supported |= (SUPPORTED_1000baseT_Full |
|
ecmd->supported |= (SUPPORTED_1000baseT_Full |
|
||||||
SUPPORTED_Autoneg);
|
SUPPORTED_Autoneg);
|
||||||
|
|
||||||
|
@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
|
||||||
s32 err = 0;
|
s32 err = 0;
|
||||||
|
|
||||||
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
|
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
|
||||||
(hw->mac.type == ixgbe_mac_82599EB)) {
|
(hw->phy.multispeed_fiber)) {
|
||||||
/* 10000/copper and 1000/copper must autoneg
|
/* 10000/copper and 1000/copper must autoneg
|
||||||
* this function does not support any duplex forcing, but can
|
* this function does not support any duplex forcing, but can
|
||||||
* limit the advertising of the adapter to only 10000 or 1000 */
|
* limit the advertising of the adapter to only 10000 or 1000 */
|
||||||
|
@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
|
||||||
} else {
|
} else {
|
||||||
/* in this case we currently only support 10Gb/FULL */
|
/* in this case we currently only support 10Gb/FULL */
|
||||||
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
|
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
|
||||||
|
(ecmd->advertising != ADVERTISED_10000baseT_Full) ||
|
||||||
(ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
|
(ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
|
||||||
union ixgbe_adv_rx_desc *rx_desc;
|
union ixgbe_adv_rx_desc *rx_desc;
|
||||||
struct ixgbe_rx_buffer *bi;
|
struct ixgbe_rx_buffer *bi;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
|
|
||||||
|
|
||||||
i = rx_ring->next_to_use;
|
i = rx_ring->next_to_use;
|
||||||
bi = &rx_ring->rx_buffer_info[i];
|
bi = &rx_ring->rx_buffer_info[i];
|
||||||
|
@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
|
||||||
|
|
||||||
if (!bi->skb) {
|
if (!bi->skb) {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
skb = netdev_alloc_skb(adapter->netdev, bufsz);
|
skb = netdev_alloc_skb(adapter->netdev,
|
||||||
|
(rx_ring->rx_buf_len +
|
||||||
|
NET_IP_ALIGN));
|
||||||
|
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
adapter->alloc_rx_buff_failed++;
|
adapter->alloc_rx_buff_failed++;
|
||||||
|
@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
|
||||||
skb_reserve(skb, NET_IP_ALIGN);
|
skb_reserve(skb, NET_IP_ALIGN);
|
||||||
|
|
||||||
bi->skb = skb;
|
bi->skb = skb;
|
||||||
bi->dma = pci_map_single(pdev, skb->data, bufsz,
|
bi->dma = pci_map_single(pdev, skb->data,
|
||||||
|
rx_ring->rx_buf_len,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
}
|
}
|
||||||
/* Refresh the desc even if buffer_addrs didn't change because
|
/* Refresh the desc even if buffer_addrs didn't change because
|
||||||
|
@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||||
pci_unmap_single(pdev, rx_buffer_info->dma,
|
pci_unmap_single(pdev, rx_buffer_info->dma,
|
||||||
rx_ring->rx_buf_len,
|
rx_ring->rx_buf_len,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
rx_buffer_info->dma = 0;
|
||||||
skb_put(skb, len);
|
skb_put(skb, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2701,7 +2704,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
|
||||||
*/
|
*/
|
||||||
err = hw->phy.ops.identify(hw);
|
err = hw->phy.ops.identify(hw);
|
||||||
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
||||||
DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
|
dev_err(&adapter->pdev->dev, "failed to initialize because "
|
||||||
|
"an unsupported SFP+ module type was detected.\n"
|
||||||
|
"Reload the driver after installing a supported "
|
||||||
|
"module.\n");
|
||||||
ixgbe_down(adapter);
|
ixgbe_down(adapter);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -2812,9 +2818,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
|
||||||
}
|
}
|
||||||
if (!rx_buffer_info->page)
|
if (!rx_buffer_info->page)
|
||||||
continue;
|
continue;
|
||||||
pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
|
if (rx_buffer_info->page_dma) {
|
||||||
PCI_DMA_FROMDEVICE);
|
pci_unmap_page(pdev, rx_buffer_info->page_dma,
|
||||||
rx_buffer_info->page_dma = 0;
|
PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
|
||||||
|
rx_buffer_info->page_dma = 0;
|
||||||
|
}
|
||||||
put_page(rx_buffer_info->page);
|
put_page(rx_buffer_info->page);
|
||||||
rx_buffer_info->page = NULL;
|
rx_buffer_info->page = NULL;
|
||||||
rx_buffer_info->page_offset = 0;
|
rx_buffer_info->page_offset = 0;
|
||||||
|
@ -3720,10 +3728,11 @@ static void ixgbe_sfp_task(struct work_struct *work)
|
||||||
goto reschedule;
|
goto reschedule;
|
||||||
ret = hw->phy.ops.reset(hw);
|
ret = hw->phy.ops.reset(hw);
|
||||||
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
||||||
DPRINTK(PROBE, ERR, "failed to initialize because an "
|
dev_err(&adapter->pdev->dev, "failed to initialize "
|
||||||
"unsupported SFP+ module type was detected.\n"
|
"because an unsupported SFP+ module type "
|
||||||
"Reload the driver after installing a "
|
"was detected.\n"
|
||||||
"supported module.\n");
|
"Reload the driver after installing a "
|
||||||
|
"supported module.\n");
|
||||||
unregister_netdev(adapter->netdev);
|
unregister_netdev(adapter->netdev);
|
||||||
} else {
|
} else {
|
||||||
DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
|
DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
|
||||||
|
@ -4502,7 +4511,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
|
||||||
u32 autoneg;
|
u32 autoneg;
|
||||||
|
|
||||||
adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
|
adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
|
||||||
if (hw->mac.ops.get_link_capabilities)
|
autoneg = hw->phy.autoneg_advertised;
|
||||||
|
if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
|
||||||
hw->mac.ops.get_link_capabilities(hw, &autoneg,
|
hw->mac.ops.get_link_capabilities(hw, &autoneg,
|
||||||
&hw->mac.autoneg);
|
&hw->mac.autoneg);
|
||||||
if (hw->mac.ops.setup_link_speed)
|
if (hw->mac.ops.setup_link_speed)
|
||||||
|
@ -4526,7 +4536,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
|
||||||
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
|
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
|
||||||
err = hw->phy.ops.identify_sfp(hw);
|
err = hw->phy.ops.identify_sfp(hw);
|
||||||
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
||||||
DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
|
dev_err(&adapter->pdev->dev, "failed to initialize because "
|
||||||
|
"an unsupported SFP+ module type was detected.\n"
|
||||||
|
"Reload the driver after installing a supported "
|
||||||
|
"module.\n");
|
||||||
ixgbe_down(adapter);
|
ixgbe_down(adapter);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -5513,8 +5526,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
||||||
round_jiffies(jiffies + (2 * HZ)));
|
round_jiffies(jiffies + (2 * HZ)));
|
||||||
err = 0;
|
err = 0;
|
||||||
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
|
||||||
dev_err(&adapter->pdev->dev, "failed to load because an "
|
dev_err(&adapter->pdev->dev, "failed to initialize because "
|
||||||
"unsupported SFP+ module type was detected.\n");
|
"an unsupported SFP+ module type was detected.\n"
|
||||||
|
"Reload the driver after installing a supported "
|
||||||
|
"module.\n");
|
||||||
goto err_sw_init;
|
goto err_sw_init;
|
||||||
} else if (err) {
|
} else if (err) {
|
||||||
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
|
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
|
||||||
|
|
|
@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||||
return crc == crc2;
|
return crc == crc2;
|
||||||
|
|
||||||
if (unlikely(crc != crc2)) {
|
if (unlikely(crc != crc2)) {
|
||||||
dev->stats.rx_errors++;
|
dev->net->stats.rx_errors++;
|
||||||
dev_kfree_skb_any(skb2);
|
dev_kfree_skb_any(skb2);
|
||||||
} else
|
} else
|
||||||
usbnet_skb_return(dev, skb2);
|
usbnet_skb_return(dev, skb2);
|
||||||
|
|
|
@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||||
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
|
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
|
||||||
|
|
||||||
if (unlikely(status & 0xbf)) {
|
if (unlikely(status & 0xbf)) {
|
||||||
if (status & 0x01) dev->stats.rx_fifo_errors++;
|
if (status & 0x01) dev->net->stats.rx_fifo_errors++;
|
||||||
if (status & 0x02) dev->stats.rx_crc_errors++;
|
if (status & 0x02) dev->net->stats.rx_crc_errors++;
|
||||||
if (status & 0x04) dev->stats.rx_frame_errors++;
|
if (status & 0x04) dev->net->stats.rx_frame_errors++;
|
||||||
if (status & 0x20) dev->stats.rx_missed_errors++;
|
if (status & 0x20) dev->net->stats.rx_missed_errors++;
|
||||||
if (status & 0x90) dev->stats.rx_length_errors++;
|
if (status & 0x90) dev->net->stats.rx_length_errors++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||||
dbg("rx framesize %d range %d..%d mtu %d", skb->len,
|
dbg("rx framesize %d range %d..%d mtu %d", skb->len,
|
||||||
net->hard_header_len, dev->hard_mtu, net->mtu);
|
net->hard_header_len, dev->hard_mtu, net->mtu);
|
||||||
#endif
|
#endif
|
||||||
dev->stats.rx_frame_errors++;
|
dev->net->stats.rx_frame_errors++;
|
||||||
nc_ensure_sync(dev);
|
nc_ensure_sync(dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||||
hdr_len = le16_to_cpup(&header->hdr_len);
|
hdr_len = le16_to_cpup(&header->hdr_len);
|
||||||
packet_len = le16_to_cpup(&header->packet_len);
|
packet_len = le16_to_cpup(&header->packet_len);
|
||||||
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
|
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
|
||||||
dev->stats.rx_frame_errors++;
|
dev->net->stats.rx_frame_errors++;
|
||||||
dbg("packet too big, %d", packet_len);
|
dbg("packet too big, %d", packet_len);
|
||||||
nc_ensure_sync(dev);
|
nc_ensure_sync(dev);
|
||||||
return 0;
|
return 0;
|
||||||
} else if (hdr_len < MIN_HEADER) {
|
} else if (hdr_len < MIN_HEADER) {
|
||||||
dev->stats.rx_frame_errors++;
|
dev->net->stats.rx_frame_errors++;
|
||||||
dbg("header too short, %d", hdr_len);
|
dbg("header too short, %d", hdr_len);
|
||||||
nc_ensure_sync(dev);
|
nc_ensure_sync(dev);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||||
|
|
||||||
if ((packet_len & 0x01) == 0) {
|
if ((packet_len & 0x01) == 0) {
|
||||||
if (skb->data [packet_len] != PAD_BYTE) {
|
if (skb->data [packet_len] != PAD_BYTE) {
|
||||||
dev->stats.rx_frame_errors++;
|
dev->net->stats.rx_frame_errors++;
|
||||||
dbg("bad pad");
|
dbg("bad pad");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
skb_trim(skb, skb->len - 1);
|
skb_trim(skb, skb->len - 1);
|
||||||
}
|
}
|
||||||
if (skb->len != packet_len) {
|
if (skb->len != packet_len) {
|
||||||
dev->stats.rx_frame_errors++;
|
dev->net->stats.rx_frame_errors++;
|
||||||
dbg("bad packet len %d (expected %d)",
|
dbg("bad packet len %d (expected %d)",
|
||||||
skb->len, packet_len);
|
skb->len, packet_len);
|
||||||
nc_ensure_sync(dev);
|
nc_ensure_sync(dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
|
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
|
||||||
dev->stats.rx_fifo_errors++;
|
dev->net->stats.rx_fifo_errors++;
|
||||||
dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
|
dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
|
||||||
le16_to_cpu(header->packet_id),
|
le16_to_cpu(header->packet_id),
|
||||||
le16_to_cpu(trailer->packet_id));
|
le16_to_cpu(trailer->packet_id));
|
||||||
|
|
|
@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||||
if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
|
if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
|
||||||
|| skb->len < msg_len
|
|| skb->len < msg_len
|
||||||
|| (data_offset + data_len + 8) > msg_len)) {
|
|| (data_offset + data_len + 8) > msg_len)) {
|
||||||
dev->stats.rx_frame_errors++;
|
dev->net->stats.rx_frame_errors++;
|
||||||
devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
|
devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
|
||||||
le32_to_cpu(hdr->msg_type),
|
le32_to_cpu(hdr->msg_type),
|
||||||
msg_len, data_offset, data_len, skb->len);
|
msg_len, data_offset, data_len, skb->len);
|
||||||
|
|
|
@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||||
if (unlikely(header & RX_STS_ES_)) {
|
if (unlikely(header & RX_STS_ES_)) {
|
||||||
if (netif_msg_rx_err(dev))
|
if (netif_msg_rx_err(dev))
|
||||||
devdbg(dev, "Error header=0x%08x", header);
|
devdbg(dev, "Error header=0x%08x", header);
|
||||||
dev->stats.rx_errors++;
|
dev->net->stats.rx_errors++;
|
||||||
dev->stats.rx_dropped++;
|
dev->net->stats.rx_dropped++;
|
||||||
|
|
||||||
if (header & RX_STS_CRC_) {
|
if (header & RX_STS_CRC_) {
|
||||||
dev->stats.rx_crc_errors++;
|
dev->net->stats.rx_crc_errors++;
|
||||||
} else {
|
} else {
|
||||||
if (header & (RX_STS_TL_ | RX_STS_RF_))
|
if (header & (RX_STS_TL_ | RX_STS_RF_))
|
||||||
dev->stats.rx_frame_errors++;
|
dev->net->stats.rx_frame_errors++;
|
||||||
|
|
||||||
if ((header & RX_STS_LE_) &&
|
if ((header & RX_STS_LE_) &&
|
||||||
(!(header & RX_STS_FT_)))
|
(!(header & RX_STS_FT_)))
|
||||||
dev->stats.rx_length_errors++;
|
dev->net->stats.rx_length_errors++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
|
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
|
||||||
|
|
|
@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
skb->protocol = eth_type_trans (skb, dev->net);
|
skb->protocol = eth_type_trans (skb, dev->net);
|
||||||
dev->stats.rx_packets++;
|
dev->net->stats.rx_packets++;
|
||||||
dev->stats.rx_bytes += skb->len;
|
dev->net->stats.rx_bytes += skb->len;
|
||||||
|
|
||||||
if (netif_msg_rx_status (dev))
|
if (netif_msg_rx_status (dev))
|
||||||
devdbg (dev, "< rx, len %zu, type 0x%x",
|
devdbg (dev, "< rx, len %zu, type 0x%x",
|
||||||
|
@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
|
||||||
if (netif_msg_rx_err (dev))
|
if (netif_msg_rx_err (dev))
|
||||||
devdbg (dev, "drop");
|
devdbg (dev, "drop");
|
||||||
error:
|
error:
|
||||||
dev->stats.rx_errors++;
|
dev->net->stats.rx_errors++;
|
||||||
skb_queue_tail (&dev->done, skb);
|
skb_queue_tail (&dev->done, skb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb)
|
||||||
case 0:
|
case 0:
|
||||||
if (skb->len < dev->net->hard_header_len) {
|
if (skb->len < dev->net->hard_header_len) {
|
||||||
entry->state = rx_cleanup;
|
entry->state = rx_cleanup;
|
||||||
dev->stats.rx_errors++;
|
dev->net->stats.rx_errors++;
|
||||||
dev->stats.rx_length_errors++;
|
dev->net->stats.rx_length_errors++;
|
||||||
if (netif_msg_rx_err (dev))
|
if (netif_msg_rx_err (dev))
|
||||||
devdbg (dev, "rx length %d", skb->len);
|
devdbg (dev, "rx length %d", skb->len);
|
||||||
}
|
}
|
||||||
|
@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb)
|
||||||
* storm, recovering as needed.
|
* storm, recovering as needed.
|
||||||
*/
|
*/
|
||||||
case -EPIPE:
|
case -EPIPE:
|
||||||
dev->stats.rx_errors++;
|
dev->net->stats.rx_errors++;
|
||||||
usbnet_defer_kevent (dev, EVENT_RX_HALT);
|
usbnet_defer_kevent (dev, EVENT_RX_HALT);
|
||||||
// FALLTHROUGH
|
// FALLTHROUGH
|
||||||
|
|
||||||
|
@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb)
|
||||||
case -EPROTO:
|
case -EPROTO:
|
||||||
case -ETIME:
|
case -ETIME:
|
||||||
case -EILSEQ:
|
case -EILSEQ:
|
||||||
dev->stats.rx_errors++;
|
dev->net->stats.rx_errors++;
|
||||||
if (!timer_pending (&dev->delay)) {
|
if (!timer_pending (&dev->delay)) {
|
||||||
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
|
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
|
||||||
if (netif_msg_link (dev))
|
if (netif_msg_link (dev))
|
||||||
|
@ -465,12 +465,12 @@ block:
|
||||||
|
|
||||||
/* data overrun ... flush fifo? */
|
/* data overrun ... flush fifo? */
|
||||||
case -EOVERFLOW:
|
case -EOVERFLOW:
|
||||||
dev->stats.rx_over_errors++;
|
dev->net->stats.rx_over_errors++;
|
||||||
// FALLTHROUGH
|
// FALLTHROUGH
|
||||||
|
|
||||||
default:
|
default:
|
||||||
entry->state = rx_cleanup;
|
entry->state = rx_cleanup;
|
||||||
dev->stats.rx_errors++;
|
dev->net->stats.rx_errors++;
|
||||||
if (netif_msg_rx_err (dev))
|
if (netif_msg_rx_err (dev))
|
||||||
devdbg (dev, "rx status %d", urb_status);
|
devdbg (dev, "rx status %d", urb_status);
|
||||||
break;
|
break;
|
||||||
|
@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net)
|
||||||
|
|
||||||
if (netif_msg_ifdown (dev))
|
if (netif_msg_ifdown (dev))
|
||||||
devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
|
devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
|
||||||
dev->stats.rx_packets, dev->stats.tx_packets,
|
net->stats.rx_packets, net->stats.tx_packets,
|
||||||
dev->stats.rx_errors, dev->stats.tx_errors
|
net->stats.rx_errors, net->stats.tx_errors
|
||||||
);
|
);
|
||||||
|
|
||||||
// ensure there are no more active urbs
|
// ensure there are no more active urbs
|
||||||
|
@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb)
|
||||||
struct usbnet *dev = entry->dev;
|
struct usbnet *dev = entry->dev;
|
||||||
|
|
||||||
if (urb->status == 0) {
|
if (urb->status == 0) {
|
||||||
dev->stats.tx_packets++;
|
dev->net->stats.tx_packets++;
|
||||||
dev->stats.tx_bytes += entry->length;
|
dev->net->stats.tx_bytes += entry->length;
|
||||||
} else {
|
} else {
|
||||||
dev->stats.tx_errors++;
|
dev->net->stats.tx_errors++;
|
||||||
|
|
||||||
switch (urb->status) {
|
switch (urb->status) {
|
||||||
case -EPIPE:
|
case -EPIPE:
|
||||||
|
@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
|
||||||
devdbg (dev, "drop, code %d", retval);
|
devdbg (dev, "drop, code %d", retval);
|
||||||
drop:
|
drop:
|
||||||
retval = NET_XMIT_SUCCESS;
|
retval = NET_XMIT_SUCCESS;
|
||||||
dev->stats.tx_dropped++;
|
dev->net->stats.tx_dropped++;
|
||||||
if (skb)
|
if (skb)
|
||||||
dev_kfree_skb_any (skb);
|
dev_kfree_skb_any (skb);
|
||||||
usb_free_urb (urb);
|
usb_free_urb (urb);
|
||||||
|
|
|
@ -81,4 +81,17 @@ struct xt_conntrack_mtinfo1 {
|
||||||
__u8 state_mask, status_mask;
|
__u8 state_mask, status_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct xt_conntrack_mtinfo2 {
|
||||||
|
union nf_inet_addr origsrc_addr, origsrc_mask;
|
||||||
|
union nf_inet_addr origdst_addr, origdst_mask;
|
||||||
|
union nf_inet_addr replsrc_addr, replsrc_mask;
|
||||||
|
union nf_inet_addr repldst_addr, repldst_mask;
|
||||||
|
__u32 expires_min, expires_max;
|
||||||
|
__u16 l4proto;
|
||||||
|
__be16 origsrc_port, origdst_port;
|
||||||
|
__be16 replsrc_port, repldst_port;
|
||||||
|
__u16 match_flags, invert_flags;
|
||||||
|
__u16 state_mask, status_mask;
|
||||||
|
};
|
||||||
|
|
||||||
#endif /*_XT_CONNTRACK_H*/
|
#endif /*_XT_CONNTRACK_H*/
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
#ifndef _XT_OSF_H
|
#ifndef _XT_OSF_H
|
||||||
#define _XT_OSF_H
|
#define _XT_OSF_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
#define MAXGENRELEN 32
|
#define MAXGENRELEN 32
|
||||||
|
|
||||||
#define XT_OSF_GENRE (1<<0)
|
#define XT_OSF_GENRE (1<<0)
|
||||||
|
|
|
@ -42,7 +42,6 @@ struct usbnet {
|
||||||
|
|
||||||
/* protocol/interface state */
|
/* protocol/interface state */
|
||||||
struct net_device *net;
|
struct net_device *net;
|
||||||
struct net_device_stats stats;
|
|
||||||
int msg_enable;
|
int msg_enable;
|
||||||
unsigned long data [5];
|
unsigned long data [5];
|
||||||
u32 xid;
|
u32 xid;
|
||||||
|
|
|
@ -258,8 +258,8 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
|
||||||
/* Update TCP window tracking data when NAT mangles the packet */
|
/* Update TCP window tracking data when NAT mangles the packet */
|
||||||
extern void nf_conntrack_tcp_update(const struct sk_buff *skb,
|
extern void nf_conntrack_tcp_update(const struct sk_buff *skb,
|
||||||
unsigned int dataoff,
|
unsigned int dataoff,
|
||||||
struct nf_conn *ct,
|
struct nf_conn *ct, int dir,
|
||||||
int dir);
|
s16 offset);
|
||||||
|
|
||||||
/* Fake conntrack entry for untracked connections */
|
/* Fake conntrack entry for untracked connections */
|
||||||
extern struct nf_conn nf_conntrack_untracked;
|
extern struct nf_conn nf_conntrack_untracked;
|
||||||
|
|
|
@ -276,6 +276,9 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
|
||||||
else
|
else
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (!dev)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
if (dev->type != ARPHRD_IEEE802154) {
|
if (dev->type != ARPHRD_IEEE802154) {
|
||||||
dev_put(dev);
|
dev_put(dev);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -521,3 +524,6 @@ static void __exit ieee802154_nl_exit(void)
|
||||||
}
|
}
|
||||||
module_exit(ieee802154_nl_exit);
|
module_exit(ieee802154_nl_exit);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL v2");
|
||||||
|
MODULE_DESCRIPTION("ieee 802.15.4 configuration interface");
|
||||||
|
|
||||||
|
|
|
@ -801,11 +801,8 @@ static int arp_process(struct sk_buff *skb)
|
||||||
* cache.
|
* cache.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/* Special case: IPv4 duplicate address detection packet (RFC2131) */
|
||||||
* Special case: IPv4 duplicate address detection packet (RFC2131)
|
if (sip == 0) {
|
||||||
* and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4)
|
|
||||||
*/
|
|
||||||
if (sip == 0 || tip == sip) {
|
|
||||||
if (arp->ar_op == htons(ARPOP_REQUEST) &&
|
if (arp->ar_op == htons(ARPOP_REQUEST) &&
|
||||||
inet_addr_type(net, tip) == RTN_LOCAL &&
|
inet_addr_type(net, tip) == RTN_LOCAL &&
|
||||||
!arp_ignore(in_dev, sip, tip))
|
!arp_ignore(in_dev, sip, tip))
|
||||||
|
|
|
@ -1021,6 +1021,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
|
||||||
(struct node *)tn, wasfull);
|
(struct node *)tn, wasfull);
|
||||||
|
|
||||||
tp = node_parent((struct node *) tn);
|
tp = node_parent((struct node *) tn);
|
||||||
|
if (!tp)
|
||||||
|
rcu_assign_pointer(t->trie, (struct node *)tn);
|
||||||
|
|
||||||
tnode_free_flush();
|
tnode_free_flush();
|
||||||
if (!tp)
|
if (!tp)
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -191,7 +191,8 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
|
||||||
ct, ctinfo);
|
ct, ctinfo);
|
||||||
/* Tell TCP window tracking about seq change */
|
/* Tell TCP window tracking about seq change */
|
||||||
nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
|
nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
|
||||||
ct, CTINFO2DIR(ctinfo));
|
ct, CTINFO2DIR(ctinfo),
|
||||||
|
(int)rep_len - (int)match_len);
|
||||||
|
|
||||||
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
|
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
|
||||||
}
|
}
|
||||||
|
@ -377,6 +378,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
|
||||||
struct tcphdr *tcph;
|
struct tcphdr *tcph;
|
||||||
int dir;
|
int dir;
|
||||||
__be32 newseq, newack;
|
__be32 newseq, newack;
|
||||||
|
s16 seqoff, ackoff;
|
||||||
struct nf_conn_nat *nat = nfct_nat(ct);
|
struct nf_conn_nat *nat = nfct_nat(ct);
|
||||||
struct nf_nat_seq *this_way, *other_way;
|
struct nf_nat_seq *this_way, *other_way;
|
||||||
|
|
||||||
|
@ -390,15 +392,18 @@ nf_nat_seq_adjust(struct sk_buff *skb,
|
||||||
|
|
||||||
tcph = (void *)skb->data + ip_hdrlen(skb);
|
tcph = (void *)skb->data + ip_hdrlen(skb);
|
||||||
if (after(ntohl(tcph->seq), this_way->correction_pos))
|
if (after(ntohl(tcph->seq), this_way->correction_pos))
|
||||||
newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
|
seqoff = this_way->offset_after;
|
||||||
else
|
else
|
||||||
newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
|
seqoff = this_way->offset_before;
|
||||||
|
|
||||||
if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
|
if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
|
||||||
other_way->correction_pos))
|
other_way->correction_pos))
|
||||||
newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
|
ackoff = other_way->offset_after;
|
||||||
else
|
else
|
||||||
newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
|
ackoff = other_way->offset_before;
|
||||||
|
|
||||||
|
newseq = htonl(ntohl(tcph->seq) + seqoff);
|
||||||
|
newack = htonl(ntohl(tcph->ack_seq) - ackoff);
|
||||||
|
|
||||||
inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
|
inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
|
||||||
inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
|
inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
|
||||||
|
@ -413,7 +418,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
|
||||||
if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
|
if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir);
|
nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -903,13 +903,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
|
||||||
iov++;
|
iov++;
|
||||||
|
|
||||||
while (seglen > 0) {
|
while (seglen > 0) {
|
||||||
int copy;
|
int copy = 0;
|
||||||
|
int max = size_goal;
|
||||||
|
|
||||||
skb = tcp_write_queue_tail(sk);
|
skb = tcp_write_queue_tail(sk);
|
||||||
|
if (tcp_send_head(sk)) {
|
||||||
|
if (skb->ip_summed == CHECKSUM_NONE)
|
||||||
|
max = mss_now;
|
||||||
|
copy = max - skb->len;
|
||||||
|
}
|
||||||
|
|
||||||
if (!tcp_send_head(sk) ||
|
if (copy <= 0) {
|
||||||
(copy = size_goal - skb->len) <= 0) {
|
|
||||||
|
|
||||||
new_segment:
|
new_segment:
|
||||||
/* Allocate new segment. If the interface is SG,
|
/* Allocate new segment. If the interface is SG,
|
||||||
* allocate skb fitting to single page.
|
* allocate skb fitting to single page.
|
||||||
|
@ -930,6 +934,7 @@ new_segment:
|
||||||
|
|
||||||
skb_entail(sk, skb);
|
skb_entail(sk, skb);
|
||||||
copy = size_goal;
|
copy = size_goal;
|
||||||
|
max = size_goal;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try to append data to the end of skb. */
|
/* Try to append data to the end of skb. */
|
||||||
|
@ -1028,7 +1033,7 @@ new_segment:
|
||||||
if ((seglen -= copy) == 0 && iovlen == 0)
|
if ((seglen -= copy) == 0 && iovlen == 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (skb->len < size_goal || (flags & MSG_OOB))
|
if (skb->len < max || (flags & MSG_OOB))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (forced_push(tp)) {
|
if (forced_push(tp)) {
|
||||||
|
|
|
@ -725,7 +725,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
|
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
|
||||||
unsigned int mss_now)
|
unsigned int mss_now)
|
||||||
{
|
{
|
||||||
if (skb->len <= mss_now || !sk_can_gso(sk)) {
|
if (skb->len <= mss_now || !sk_can_gso(sk) ||
|
||||||
|
skb->ip_summed == CHECKSUM_NONE) {
|
||||||
/* Avoid the costly divide in the normal
|
/* Avoid the costly divide in the normal
|
||||||
* non-TSO case.
|
* non-TSO case.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -617,8 +617,10 @@ err1:
|
||||||
void nf_conntrack_expect_fini(struct net *net)
|
void nf_conntrack_expect_fini(struct net *net)
|
||||||
{
|
{
|
||||||
exp_proc_remove(net);
|
exp_proc_remove(net);
|
||||||
if (net_eq(net, &init_net))
|
if (net_eq(net, &init_net)) {
|
||||||
|
rcu_barrier(); /* Wait for call_rcu() before destroy */
|
||||||
kmem_cache_destroy(nf_ct_expect_cachep);
|
kmem_cache_destroy(nf_ct_expect_cachep);
|
||||||
|
}
|
||||||
nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
|
nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
|
||||||
nf_ct_expect_hsize);
|
nf_ct_expect_hsize);
|
||||||
}
|
}
|
||||||
|
|
|
@ -186,6 +186,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
|
||||||
rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
|
rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
|
||||||
update_alloc_size(type);
|
update_alloc_size(type);
|
||||||
mutex_unlock(&nf_ct_ext_type_mutex);
|
mutex_unlock(&nf_ct_ext_type_mutex);
|
||||||
synchronize_rcu();
|
rcu_barrier(); /* Wait for completion of call_rcu()'s */
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
|
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
|
||||||
|
|
|
@ -720,8 +720,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
|
||||||
/* Caller must linearize skb at tcp header. */
|
/* Caller must linearize skb at tcp header. */
|
||||||
void nf_conntrack_tcp_update(const struct sk_buff *skb,
|
void nf_conntrack_tcp_update(const struct sk_buff *skb,
|
||||||
unsigned int dataoff,
|
unsigned int dataoff,
|
||||||
struct nf_conn *ct,
|
struct nf_conn *ct, int dir,
|
||||||
int dir)
|
s16 offset)
|
||||||
{
|
{
|
||||||
const struct tcphdr *tcph = (const void *)skb->data + dataoff;
|
const struct tcphdr *tcph = (const void *)skb->data + dataoff;
|
||||||
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
|
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
|
||||||
|
@ -734,7 +734,7 @@ void nf_conntrack_tcp_update(const struct sk_buff *skb,
|
||||||
/*
|
/*
|
||||||
* We have to worry for the ack in the reply packet only...
|
* We have to worry for the ack in the reply packet only...
|
||||||
*/
|
*/
|
||||||
if (after(end, ct->proto.tcp.seen[dir].td_end))
|
if (ct->proto.tcp.seen[dir].td_end + offset == end)
|
||||||
ct->proto.tcp.seen[dir].td_end = end;
|
ct->proto.tcp.seen[dir].td_end = end;
|
||||||
ct->proto.tcp.last_end = end;
|
ct->proto.tcp.last_end = end;
|
||||||
spin_unlock_bh(&ct->lock);
|
spin_unlock_bh(&ct->lock);
|
||||||
|
|
|
@ -129,7 +129,7 @@ conntrack_addrcmp(const union nf_inet_addr *kaddr,
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
conntrack_mt_origsrc(const struct nf_conn *ct,
|
conntrack_mt_origsrc(const struct nf_conn *ct,
|
||||||
const struct xt_conntrack_mtinfo1 *info,
|
const struct xt_conntrack_mtinfo2 *info,
|
||||||
u_int8_t family)
|
u_int8_t family)
|
||||||
{
|
{
|
||||||
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
|
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
|
||||||
|
@ -138,7 +138,7 @@ conntrack_mt_origsrc(const struct nf_conn *ct,
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
conntrack_mt_origdst(const struct nf_conn *ct,
|
conntrack_mt_origdst(const struct nf_conn *ct,
|
||||||
const struct xt_conntrack_mtinfo1 *info,
|
const struct xt_conntrack_mtinfo2 *info,
|
||||||
u_int8_t family)
|
u_int8_t family)
|
||||||
{
|
{
|
||||||
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3,
|
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3,
|
||||||
|
@ -147,7 +147,7 @@ conntrack_mt_origdst(const struct nf_conn *ct,
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
conntrack_mt_replsrc(const struct nf_conn *ct,
|
conntrack_mt_replsrc(const struct nf_conn *ct,
|
||||||
const struct xt_conntrack_mtinfo1 *info,
|
const struct xt_conntrack_mtinfo2 *info,
|
||||||
u_int8_t family)
|
u_int8_t family)
|
||||||
{
|
{
|
||||||
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3,
|
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3,
|
||||||
|
@ -156,7 +156,7 @@ conntrack_mt_replsrc(const struct nf_conn *ct,
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
conntrack_mt_repldst(const struct nf_conn *ct,
|
conntrack_mt_repldst(const struct nf_conn *ct,
|
||||||
const struct xt_conntrack_mtinfo1 *info,
|
const struct xt_conntrack_mtinfo2 *info,
|
||||||
u_int8_t family)
|
u_int8_t family)
|
||||||
{
|
{
|
||||||
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3,
|
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3,
|
||||||
|
@ -164,7 +164,7 @@ conntrack_mt_repldst(const struct nf_conn *ct,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
|
ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
|
||||||
const struct nf_conn *ct)
|
const struct nf_conn *ct)
|
||||||
{
|
{
|
||||||
const struct nf_conntrack_tuple *tuple;
|
const struct nf_conntrack_tuple *tuple;
|
||||||
|
@ -204,7 +204,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
|
||||||
static bool
|
static bool
|
||||||
conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||||
{
|
{
|
||||||
const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
|
const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
|
||||||
enum ip_conntrack_info ctinfo;
|
enum ip_conntrack_info ctinfo;
|
||||||
const struct nf_conn *ct;
|
const struct nf_conn *ct;
|
||||||
unsigned int statebit;
|
unsigned int statebit;
|
||||||
|
@ -278,6 +278,16 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
|
||||||
|
{
|
||||||
|
const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo;
|
||||||
|
struct xt_match_param newpar = *par;
|
||||||
|
|
||||||
|
newpar.matchinfo = *info;
|
||||||
|
return conntrack_mt(skb, &newpar);
|
||||||
|
}
|
||||||
|
|
||||||
static bool conntrack_mt_check(const struct xt_mtchk_param *par)
|
static bool conntrack_mt_check(const struct xt_mtchk_param *par)
|
||||||
{
|
{
|
||||||
if (nf_ct_l3proto_try_module_get(par->family) < 0) {
|
if (nf_ct_l3proto_try_module_get(par->family) < 0) {
|
||||||
|
@ -288,11 +298,45 @@ static bool conntrack_mt_check(const struct xt_mtchk_param *par)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par)
|
||||||
|
{
|
||||||
|
struct xt_conntrack_mtinfo1 *info = par->matchinfo;
|
||||||
|
struct xt_conntrack_mtinfo2 *up;
|
||||||
|
int ret = conntrack_mt_check(par);
|
||||||
|
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
up = kmalloc(sizeof(*up), GFP_KERNEL);
|
||||||
|
if (up == NULL) {
|
||||||
|
nf_ct_l3proto_module_put(par->family);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The strategy here is to minimize the overhead of v1 matching,
|
||||||
|
* by prebuilding a v2 struct and putting the pointer into the
|
||||||
|
* v1 dataspace.
|
||||||
|
*/
|
||||||
|
memcpy(up, info, offsetof(typeof(*info), state_mask));
|
||||||
|
up->state_mask = info->state_mask;
|
||||||
|
up->status_mask = info->status_mask;
|
||||||
|
*(void **)info = up;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
|
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
|
||||||
{
|
{
|
||||||
nf_ct_l3proto_module_put(par->family);
|
nf_ct_l3proto_module_put(par->family);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par)
|
||||||
|
{
|
||||||
|
struct xt_conntrack_mtinfo2 **info = par->matchinfo;
|
||||||
|
kfree(*info);
|
||||||
|
conntrack_mt_destroy(par);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
struct compat_xt_conntrack_info
|
struct compat_xt_conntrack_info
|
||||||
{
|
{
|
||||||
|
@ -363,6 +407,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
|
||||||
.revision = 1,
|
.revision = 1,
|
||||||
.family = NFPROTO_UNSPEC,
|
.family = NFPROTO_UNSPEC,
|
||||||
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
|
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
|
||||||
|
.match = conntrack_mt_v1,
|
||||||
|
.checkentry = conntrack_mt_check_v1,
|
||||||
|
.destroy = conntrack_mt_destroy_v1,
|
||||||
|
.me = THIS_MODULE,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "conntrack",
|
||||||
|
.revision = 2,
|
||||||
|
.family = NFPROTO_UNSPEC,
|
||||||
|
.matchsize = sizeof(struct xt_conntrack_mtinfo2),
|
||||||
.match = conntrack_mt,
|
.match = conntrack_mt,
|
||||||
.checkentry = conntrack_mt_check,
|
.checkentry = conntrack_mt_check,
|
||||||
.destroy = conntrack_mt_destroy,
|
.destroy = conntrack_mt_destroy,
|
||||||
|
|
|
@ -407,7 +407,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
|
||||||
}
|
}
|
||||||
dst = dst_clone(tp->dst);
|
dst = dst_clone(tp->dst);
|
||||||
skb_dst_set(nskb, dst);
|
skb_dst_set(nskb, dst);
|
||||||
if (dst)
|
if (!dst)
|
||||||
goto no_route;
|
goto no_route;
|
||||||
|
|
||||||
/* Build the SCTP header. */
|
/* Build the SCTP header. */
|
||||||
|
|
|
@ -668,22 +668,10 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
|
||||||
hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
|
hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
|
||||||
if (x->props.family != family ||
|
if (x->props.family != family ||
|
||||||
x->id.spi != spi ||
|
x->id.spi != spi ||
|
||||||
x->id.proto != proto)
|
x->id.proto != proto ||
|
||||||
|
xfrm_addr_cmp(&x->id.daddr, daddr, family))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
switch (family) {
|
|
||||||
case AF_INET:
|
|
||||||
if (x->id.daddr.a4 != daddr->a4)
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
case AF_INET6:
|
|
||||||
if (!ipv6_addr_equal((struct in6_addr *)daddr,
|
|
||||||
(struct in6_addr *)
|
|
||||||
x->id.daddr.a6))
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
xfrm_state_hold(x);
|
xfrm_state_hold(x);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
@ -699,26 +687,11 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre
|
||||||
|
|
||||||
hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
|
hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
|
||||||
if (x->props.family != family ||
|
if (x->props.family != family ||
|
||||||
x->id.proto != proto)
|
x->id.proto != proto ||
|
||||||
|
xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
|
||||||
|
xfrm_addr_cmp(&x->props.saddr, saddr, family))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
switch (family) {
|
|
||||||
case AF_INET:
|
|
||||||
if (x->id.daddr.a4 != daddr->a4 ||
|
|
||||||
x->props.saddr.a4 != saddr->a4)
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
case AF_INET6:
|
|
||||||
if (!ipv6_addr_equal((struct in6_addr *)daddr,
|
|
||||||
(struct in6_addr *)
|
|
||||||
x->id.daddr.a6) ||
|
|
||||||
!ipv6_addr_equal((struct in6_addr *)saddr,
|
|
||||||
(struct in6_addr *)
|
|
||||||
x->props.saddr.a6))
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
xfrm_state_hold(x);
|
xfrm_state_hold(x);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
@ -1001,25 +974,11 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
|
||||||
x->props.family != family ||
|
x->props.family != family ||
|
||||||
x->km.state != XFRM_STATE_ACQ ||
|
x->km.state != XFRM_STATE_ACQ ||
|
||||||
x->id.spi != 0 ||
|
x->id.spi != 0 ||
|
||||||
x->id.proto != proto)
|
x->id.proto != proto ||
|
||||||
|
xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
|
||||||
|
xfrm_addr_cmp(&x->props.saddr, saddr, family))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
switch (family) {
|
|
||||||
case AF_INET:
|
|
||||||
if (x->id.daddr.a4 != daddr->a4 ||
|
|
||||||
x->props.saddr.a4 != saddr->a4)
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
case AF_INET6:
|
|
||||||
if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
|
|
||||||
(struct in6_addr *)daddr) ||
|
|
||||||
!ipv6_addr_equal((struct in6_addr *)
|
|
||||||
x->props.saddr.a6,
|
|
||||||
(struct in6_addr *)saddr))
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
xfrm_state_hold(x);
|
xfrm_state_hold(x);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue