mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
net: sk_drops consolidation
sock_queue_rcv_skb() can update sk_drops itself, removing need for callers to take care of it. This is more consistent since sock_queue_rcv_skb() also reads sk_drops when queueing a skb. This adds sk_drops managment to many protocols that not cared yet. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
48bccd25df
commit
766e9037cc
9 changed files with 20 additions and 34 deletions
|
@ -274,7 +274,7 @@ static void sock_disable_timestamp(struct sock *sk, int flag)
|
|||
|
||||
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
int skb_len;
|
||||
unsigned long flags;
|
||||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||
|
@ -284,17 +284,17 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
*/
|
||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
||||
(unsigned)sk->sk_rcvbuf) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
atomic_inc(&sk->sk_drops);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = sk_filter(sk, skb);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
if (!sk_rmem_schedule(sk, skb->truesize)) {
|
||||
err = -ENOBUFS;
|
||||
goto out;
|
||||
atomic_inc(&sk->sk_drops);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
skb->dev = NULL;
|
||||
|
@ -314,8 +314,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
if (!sock_flag(sk, SOCK_DEAD))
|
||||
sk->sk_data_ready(sk, skb_len);
|
||||
out:
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(sock_queue_rcv_skb);
|
||||
|
||||
|
|
|
@ -318,7 +318,6 @@ out:
|
|||
static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
if (sock_queue_rcv_skb(sk, skb) < 0) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
|
|
@ -206,7 +206,6 @@ out:
|
|||
static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
if (sock_queue_rcv_skb(sk, skb) < 0) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
|
|
@ -292,7 +292,6 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
|
|||
/* Charge it to the socket. */
|
||||
|
||||
if (sock_queue_rcv_skb(sk, skb) < 0) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
|
|
@ -1063,25 +1063,22 @@ EXPORT_SYMBOL(udp_lib_unhash);
|
|||
|
||||
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
int is_udplite = IS_UDPLITE(sk);
|
||||
int rc;
|
||||
int rc = sock_queue_rcv_skb(sk, skb);
|
||||
|
||||
if (rc < 0) {
|
||||
int is_udplite = IS_UDPLITE(sk);
|
||||
|
||||
if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
|
||||
/* Note that an ENOMEM error is charged twice */
|
||||
if (rc == -ENOMEM) {
|
||||
if (rc == -ENOMEM)
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
|
||||
is_udplite);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
}
|
||||
goto drop;
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
drop:
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* returns:
|
||||
|
|
|
@ -381,8 +381,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
|
|||
}
|
||||
|
||||
/* Charge it to the socket. */
|
||||
if (sock_queue_rcv_skb(sk,skb)<0) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
if (sock_queue_rcv_skb(sk, skb) < 0) {
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
|
|
@ -385,13 +385,11 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
|
|||
goto drop;
|
||||
}
|
||||
|
||||
if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
|
||||
if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
|
||||
/* Note that an ENOMEM error is charged twice */
|
||||
if (rc == -ENOMEM) {
|
||||
if (rc == -ENOMEM)
|
||||
UDP6_INC_STATS_BH(sock_net(sk),
|
||||
UDP_MIB_RCVBUFERRORS, is_udplite);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
}
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
|
|
@ -159,11 +159,9 @@ out_nofree:
|
|||
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
int err = sock_queue_rcv_skb(sk, skb);
|
||||
if (err < 0) {
|
||||
|
||||
if (err < 0)
|
||||
kfree_skb(skb);
|
||||
if (err == -ENOMEM)
|
||||
atomic_inc(&sk->sk_drops);
|
||||
}
|
||||
return err ? NET_RX_DROP : NET_RX_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -360,8 +360,6 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
err = sock_queue_rcv_skb(sk, skb);
|
||||
if (!err)
|
||||
return 0;
|
||||
if (err == -ENOMEM)
|
||||
atomic_inc(&sk->sk_drops);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue