net: sk_drops consolidation

sock_queue_rcv_skb() can update sk_drops itself, removing need for
callers to take care of it. This is more consistent since
sock_queue_rcv_skb() also reads sk_drops when queueing a skb.

This adds sk_drops managment to many protocols that not cared yet.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2009-10-14 20:40:11 -07:00 committed by David S. Miller
parent 48bccd25df
commit 766e9037cc
9 changed files with 20 additions and 34 deletions

View file

@ -274,7 +274,7 @@ static void sock_disable_timestamp(struct sock *sk, int flag)
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
int err = 0; int err;
int skb_len; int skb_len;
unsigned long flags; unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue; struct sk_buff_head *list = &sk->sk_receive_queue;
@ -284,17 +284,17 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/ */
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf) { (unsigned)sk->sk_rcvbuf) {
err = -ENOMEM; atomic_inc(&sk->sk_drops);
goto out; return -ENOMEM;
} }
err = sk_filter(sk, skb); err = sk_filter(sk, skb);
if (err) if (err)
goto out; return err;
if (!sk_rmem_schedule(sk, skb->truesize)) { if (!sk_rmem_schedule(sk, skb->truesize)) {
err = -ENOBUFS; atomic_inc(&sk->sk_drops);
goto out; return -ENOBUFS;
} }
skb->dev = NULL; skb->dev = NULL;
@ -314,8 +314,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (!sock_flag(sk, SOCK_DEAD)) if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb_len); sk->sk_data_ready(sk, skb_len);
out: return 0;
return err;
} }
EXPORT_SYMBOL(sock_queue_rcv_skb); EXPORT_SYMBOL(sock_queue_rcv_skb);

View file

@ -318,7 +318,6 @@ out:
static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
if (sock_queue_rcv_skb(sk, skb) < 0) { if (sock_queue_rcv_skb(sk, skb) < 0) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb); kfree_skb(skb);
return NET_RX_DROP; return NET_RX_DROP;
} }

View file

@ -206,7 +206,6 @@ out:
static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
if (sock_queue_rcv_skb(sk, skb) < 0) { if (sock_queue_rcv_skb(sk, skb) < 0) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb); kfree_skb(skb);
return NET_RX_DROP; return NET_RX_DROP;
} }

View file

@ -292,7 +292,6 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
/* Charge it to the socket. */ /* Charge it to the socket. */
if (sock_queue_rcv_skb(sk, skb) < 0) { if (sock_queue_rcv_skb(sk, skb) < 0) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb); kfree_skb(skb);
return NET_RX_DROP; return NET_RX_DROP;
} }

View file

@ -1063,25 +1063,22 @@ EXPORT_SYMBOL(udp_lib_unhash);
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ {
int is_udplite = IS_UDPLITE(sk); int rc = sock_queue_rcv_skb(sk, skb);
int rc;
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
/* Note that an ENOMEM error is charged twice */ /* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM) { if (rc == -ENOMEM)
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite); is_udplite);
atomic_inc(&sk->sk_drops); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
} kfree_skb(skb);
goto drop; return -1;
} }
return 0; return 0;
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
return -1;
} }
/* returns: /* returns:

View file

@ -381,8 +381,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
} }
/* Charge it to the socket. */ /* Charge it to the socket. */
if (sock_queue_rcv_skb(sk,skb)<0) { if (sock_queue_rcv_skb(sk, skb) < 0) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb); kfree_skb(skb);
return NET_RX_DROP; return NET_RX_DROP;
} }

View file

@ -385,13 +385,11 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
goto drop; goto drop;
} }
if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
/* Note that an ENOMEM error is charged twice */ /* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM) { if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk), UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite); UDP_MIB_RCVBUFERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
}
goto drop; goto drop;
} }

View file

@ -159,11 +159,9 @@ out_nofree:
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{ {
int err = sock_queue_rcv_skb(sk, skb); int err = sock_queue_rcv_skb(sk, skb);
if (err < 0) {
if (err < 0)
kfree_skb(skb); kfree_skb(skb);
if (err == -ENOMEM)
atomic_inc(&sk->sk_drops);
}
return err ? NET_RX_DROP : NET_RX_SUCCESS; return err ? NET_RX_DROP : NET_RX_SUCCESS;
} }

View file

@ -360,8 +360,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
err = sock_queue_rcv_skb(sk, skb); err = sock_queue_rcv_skb(sk, skb);
if (!err) if (!err)
return 0; return 0;
if (err == -ENOMEM)
atomic_inc(&sk->sk_drops);
break; break;
} }