[NET]: Do not check netif_running() and carrier state in ->poll()

Drivers do this to try to break out of the ->poll()'ing loop
when the device is being brought administratively down.

Now that we have a napi_disable() "pending" state we are going
to solve that problem generically.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2008-01-07 20:48:21 -08:00
parent a0a46196cd
commit 4ec2411980
15 changed files with 9 additions and 55 deletions

View file

@ -1997,7 +1997,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
tx_cleaned = e100_tx_clean(nic); tx_cleaned = e100_tx_clean(nic);
/* If no Rx and Tx cleanup work was done, exit polling mode. */ /* If no Rx and Tx cleanup work was done, exit polling mode. */
if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { if((!tx_cleaned && (work_done == 0))) {
netif_rx_complete(netdev, napi); netif_rx_complete(netdev, napi);
e100_enable_irq(nic); e100_enable_irq(nic);
} }

View file

@ -3924,10 +3924,6 @@ e1000_clean(struct napi_struct *napi, int budget)
/* Must NOT use netdev_priv macro here. */ /* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv; adapter = poll_dev->priv;
/* Keep link state information with original netdev */
if (!netif_carrier_ok(poll_dev))
goto quit_polling;
/* e1000_clean is called per-cpu. This lock protects /* e1000_clean is called per-cpu. This lock protects
* tx_ring[0] from being cleaned by multiple cpus * tx_ring[0] from being cleaned by multiple cpus
* simultaneously. A failure obtaining the lock means * simultaneously. A failure obtaining the lock means
@ -3942,9 +3938,7 @@ e1000_clean(struct napi_struct *napi, int budget)
&work_done, budget); &work_done, budget);
/* If no Tx and not enough Rx work done, exit the polling mode */ /* If no Tx and not enough Rx work done, exit the polling mode */
if ((!tx_cleaned && (work_done == 0)) || if ((!tx_cleaned && (work_done == 0))) {
!netif_running(poll_dev)) {
quit_polling:
if (likely(adapter->itr_setting & 3)) if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter); e1000_set_itr(adapter);
netif_rx_complete(poll_dev, napi); netif_rx_complete(poll_dev, napi);

View file

@ -1389,10 +1389,6 @@ static int e1000_clean(struct napi_struct *napi, int budget)
/* Must NOT use netdev_priv macro here. */ /* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv; adapter = poll_dev->priv;
/* Keep link state information with original netdev */
if (!netif_carrier_ok(poll_dev))
goto quit_polling;
/* e1000_clean is called per-cpu. This lock protects /* e1000_clean is called per-cpu. This lock protects
* tx_ring from being cleaned by multiple cpus * tx_ring from being cleaned by multiple cpus
* simultaneously. A failure obtaining the lock means * simultaneously. A failure obtaining the lock means
@ -1405,9 +1401,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
adapter->clean_rx(adapter, &work_done, budget); adapter->clean_rx(adapter, &work_done, budget);
/* If no Tx and not enough Rx work done, exit the polling mode */ /* If no Tx and not enough Rx work done, exit the polling mode */
if ((!tx_cleaned && (work_done < budget)) || if ((!tx_cleaned && (work_done < budget))) {
!netif_running(poll_dev)) {
quit_polling:
if (adapter->itr_setting & 3) if (adapter->itr_setting & 3)
e1000_set_itr(adapter); e1000_set_itr(adapter);
netif_rx_complete(poll_dev, napi); netif_rx_complete(poll_dev, napi);

View file

@ -1273,7 +1273,7 @@ rx_action:
epic_rx_err(dev, ep); epic_rx_err(dev, ep);
if (netif_running(dev) && (work_done < budget)) { if (work_done < budget) {
unsigned long flags; unsigned long flags;
int more; int more;

View file

@ -476,11 +476,6 @@ static int fec_enet_rx_common(struct fec_enet_private *ep,
__u16 pkt_len, sc; __u16 pkt_len, sc;
int curidx; int curidx;
if (fpi->use_napi) {
if (!netif_running(dev))
return 0;
}
/* /*
* First, grab all of the stats for the incoming packet. * First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition. * These get messed up if we get called due to a busy condition.

View file

@ -96,9 +96,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
u16 pkt_len, sc; u16 pkt_len, sc;
int curidx; int curidx;
if (!netif_running(dev))
return 0;
/* /*
* First, grab all of the stats for the incoming packet. * First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition. * These get messed up if we get called due to a busy condition.

View file

@ -1794,7 +1794,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
ixgb_clean_rx_irq(adapter, &work_done, budget); ixgb_clean_rx_irq(adapter, &work_done, budget);
/* if no Tx and not enough Rx work done, exit the polling mode */ /* if no Tx and not enough Rx work done, exit the polling mode */
if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { if((!tx_cleaned && (work_done == 0))) {
netif_rx_complete(netdev, napi); netif_rx_complete(netdev, napi);
ixgb_irq_enable(adapter); ixgb_irq_enable(adapter);
} }

View file

@ -1470,19 +1470,13 @@ static int ixgbe_clean(struct napi_struct *napi, int budget)
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
int tx_cleaned = 0, work_done = 0; int tx_cleaned = 0, work_done = 0;
/* Keep link state information with original netdev */
if (!netif_carrier_ok(adapter->netdev))
goto quit_polling;
/* In non-MSIX case, there is no multi-Tx/Rx queue */ /* In non-MSIX case, there is no multi-Tx/Rx queue */
tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done, ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done,
budget); budget);
/* If no Tx and not enough Rx work done, exit the polling mode */ /* If no Tx and not enough Rx work done, exit the polling mode */
if ((!tx_cleaned && (work_done < budget)) || if ((!tx_cleaned && (work_done < budget))) {
!netif_running(adapter->netdev)) {
quit_polling:
netif_rx_complete(netdev, napi); netif_rx_complete(netdev, napi);
ixgbe_irq_enable(adapter); ixgbe_irq_enable(adapter);
} }

View file

@ -135,8 +135,6 @@ static int ixpdev_poll(struct napi_struct *napi, int budget)
struct net_device *dev = ip->dev; struct net_device *dev = ip->dev;
int rx; int rx;
/* @@@ Have to stop polling when nds[0] is administratively
* downed while we are polling. */
rx = 0; rx = 0;
do { do {
ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff); ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);

View file

@ -1239,7 +1239,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
/* process as many rx events as NAPI will allow */ /* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(mgp, budget); work_done = myri10ge_clean_rx_done(mgp, budget);
if (work_done < budget || !netif_running(netdev)) { if (work_done < budget) {
netif_rx_complete(netdev, napi); netif_rx_complete(netdev, napi);
put_be32(htonl(3), mgp->irq_claim); put_be32(htonl(3), mgp->irq_claim);
} }

View file

@ -2266,7 +2266,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
/* Reenable interrupts providing nothing is trying to shut /* Reenable interrupts providing nothing is trying to shut
* the chip down. */ * the chip down. */
spin_lock(&np->lock); spin_lock(&np->lock);
if (!np->hands_off && netif_running(dev)) if (!np->hands_off)
natsemi_irq_enable(dev); natsemi_irq_enable(dev);
spin_unlock(&np->lock); spin_unlock(&np->lock);

View file

@ -2320,14 +2320,9 @@ static int ql_poll(struct napi_struct *napi, int budget)
unsigned long hw_flags; unsigned long hw_flags;
struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
if (!netif_carrier_ok(ndev))
goto quit_polling;
ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
if (tx_cleaned + rx_cleaned != budget || if (tx_cleaned + rx_cleaned != budget) {
!netif_running(ndev)) {
quit_polling:
spin_lock_irqsave(&qdev->hw_lock, hw_flags); spin_lock_irqsave(&qdev->hw_lock, hw_flags);
__netif_rx_complete(ndev, napi); __netif_rx_complete(ndev, napi);
ql_update_small_bufq_prod_index(qdev); ql_update_small_bufq_prod_index(qdev);

View file

@ -2704,9 +2704,6 @@ static int s2io_poll(struct napi_struct *napi, int budget)
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
int i; int i;
if (!is_s2io_card_up(nic))
return 0;
mac_control = &nic->mac_control; mac_control = &nic->mac_control;
config = &nic->config; config = &nic->config;

View file

@ -117,9 +117,6 @@ int tulip_poll(struct napi_struct *napi, int budget)
int received = 0; int received = 0;
#endif #endif
if (!netif_running(dev))
goto done;
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
/* that one buffer is needed for mit activation; or might be a /* that one buffer is needed for mit activation; or might be a
@ -261,8 +258,6 @@ int tulip_poll(struct napi_struct *napi, int budget)
* finally: amount of IO did not increase at all. */ * finally: amount of IO did not increase at all. */
} while ((ioread32(tp->base_addr + CSR5) & RxIntr)); } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
done:
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
/* We use this simplistic scheme for IM. It's proven by /* We use this simplistic scheme for IM. It's proven by

View file

@ -852,11 +852,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
spin_lock(&np->rx_lock); spin_lock(&np->rx_lock);
if (unlikely(!netif_carrier_ok(dev))) {
spin_unlock(&np->rx_lock);
return 0;
}
skb_queue_head_init(&rxq); skb_queue_head_init(&rxq);
skb_queue_head_init(&errq); skb_queue_head_init(&errq);
skb_queue_head_init(&tmpq); skb_queue_head_init(&tmpq);