net: Optimize hard_start_xmit() return checking

Recent changes in the TX error propagation require additional checking
and masking of values returned from hard_start_xmit(), mainly to
separate cases where skb was consumed. This aim can be simplified by
changing the order of NETDEV_TX and NET_XMIT codes, because the latter
are treated similarly to negative (ERRNO) values.

After this change much simpler dev_xmit_complete() is also used in
sch_direct_xmit(), so it is moved to netdevice.h.

Additionally NET_RX definitions in netdevice.h are moved up from
between TX codes to avoid confusion while reading the TX comment.

Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jarek Poplawski 2009-11-15 07:20:12 +00:00 committed by David S. Miller
parent cb43e23435
commit 9a1654ba0b
3 changed files with 35 additions and 47 deletions

View file

@ -63,6 +63,10 @@ struct wireless_dev;
#define HAVE_FREE_NETDEV /* free_netdev() */ #define HAVE_FREE_NETDEV /* free_netdev() */
#define HAVE_NETDEV_PRIV /* netdev_priv() */ #define HAVE_NETDEV_PRIV /* netdev_priv() */
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
/* /*
* Transmit return codes: transmit return codes originate from three different * Transmit return codes: transmit return codes originate from three different
* namespaces: * namespaces:
@ -82,14 +86,10 @@ struct wireless_dev;
/* qdisc ->enqueue() return codes. */ /* qdisc ->enqueue() return codes. */
#define NET_XMIT_SUCCESS 0x00 #define NET_XMIT_SUCCESS 0x00
#define NET_XMIT_DROP 0x10 /* skb dropped */ #define NET_XMIT_DROP 0x01 /* skb dropped */
#define NET_XMIT_CN 0x20 /* congestion notification */ #define NET_XMIT_CN 0x02 /* congestion notification */
#define NET_XMIT_POLICED 0x30 /* skb is shot by police */ #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
#define NET_XMIT_MASK 0xf0 /* qdisc flags in net/sch_generic.h */ #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
* indicates that the device will soon be dropping packets, or already drops * indicates that the device will soon be dropping packets, or already drops
@ -98,16 +98,34 @@ struct wireless_dev;
#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
/* Driver transmit return codes */ /* Driver transmit return codes */
#define NETDEV_TX_MASK 0xf #define NETDEV_TX_MASK 0xf0
enum netdev_tx { enum netdev_tx {
__NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_OK = 0, /* driver took care of packet */ NETDEV_TX_OK = 0x00, /* driver took care of packet */
NETDEV_TX_BUSY = 1, /* driver tx path was busy*/ NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
NETDEV_TX_LOCKED = 2, /* driver tx lock was already taken */ NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
}; };
typedef enum netdev_tx netdev_tx_t; typedef enum netdev_tx netdev_tx_t;
/*
* Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
* hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
*/
static inline bool dev_xmit_complete(int rc)
{
/*
* Positive cases with an skb consumed by a driver:
* - successful transmission (rc == NETDEV_TX_OK)
* - error while transmitting (rc < 0)
* - error while queueing to a different device (rc & NET_XMIT_MASK)
*/
if (likely(rc < NET_XMIT_MASK))
return true;
return false;
}
#endif #endif
#define MAX_ADDR_LEN 32 /* Largest hardware address length */ #define MAX_ADDR_LEN 32 /* Largest hardware address length */

View file

@ -1924,23 +1924,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc; return rc;
} }
static inline bool dev_xmit_complete(int rc)
{
/* successful transmission */
if (rc == NETDEV_TX_OK)
return true;
/* error while transmitting, driver consumed skb */
if (rc < 0)
return true;
/* error while queueing to a different device, driver consumed skb */
if (rc & NET_XMIT_MASK)
return true;
return false;
}
/** /**
* dev_queue_xmit - transmit a buffer * dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit * @skb: buffer to transmit

View file

@ -119,39 +119,26 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spin_unlock(root_lock); spin_unlock(root_lock);
HARD_TX_LOCK(dev, txq, smp_processor_id()); HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_tx_queue_stopped(txq) && if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
!netif_tx_queue_frozen(txq)) {
ret = dev_hard_start_xmit(skb, dev, txq); ret = dev_hard_start_xmit(skb, dev, txq);
/* an error implies that the skb was consumed */
if (ret < 0)
ret = NETDEV_TX_OK;
/* all NET_XMIT codes map to NETDEV_TX_OK */
ret &= ~NET_XMIT_MASK;
}
HARD_TX_UNLOCK(dev, txq); HARD_TX_UNLOCK(dev, txq);
spin_lock(root_lock); spin_lock(root_lock);
switch (ret) { if (dev_xmit_complete(ret)) {
case NETDEV_TX_OK: /* Driver sent out skb successfully or skb was consumed */
/* Driver sent out skb successfully */
ret = qdisc_qlen(q); ret = qdisc_qlen(q);
break; } else if (ret == NETDEV_TX_LOCKED) {
case NETDEV_TX_LOCKED:
/* Driver try lock failed */ /* Driver try lock failed */
ret = handle_dev_cpu_collision(skb, txq, q); ret = handle_dev_cpu_collision(skb, txq, q);
break; } else {
default:
/* Driver returned NETDEV_TX_BUSY - requeue skb */ /* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
printk(KERN_WARNING "BUG %s code %d qlen %d\n", printk(KERN_WARNING "BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen); dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q); ret = dev_requeue_skb(skb, q);
break;
} }
if (ret && (netif_tx_queue_stopped(txq) || if (ret && (netif_tx_queue_stopped(txq) ||