mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
[TCP]: Fix stretch ACK performance killer when doing ucopy.
When we are doing ucopy, we try to defer the ACK generation to cleanup_rbuf(). This works most of the time very well, but if the ucopy prequeue is large, this ACKing behavior kills performance. With TSO, it is possible to fill the prequeue so large that by the time the ACK is sent and gets back to the sender, most of the window has emptied of data and performance suffers significantly. This behavior does help in some cases, so we should think about re-enabling this trick in the future, using some kind of limit in order to avoid the bug case. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e16fa6b9d2
commit
314324121f
1 changed files with 1 additions and 10 deletions
|
@ -4355,16 +4355,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|||
goto no_ack;
|
||||
}
|
||||
|
||||
if (eaten) {
|
||||
if (tcp_in_quickack_mode(tp)) {
|
||||
tcp_send_ack(sk);
|
||||
} else {
|
||||
tcp_send_delayed_ack(sk);
|
||||
}
|
||||
} else {
|
||||
__tcp_ack_snd_check(sk, 0);
|
||||
}
|
||||
|
||||
__tcp_ack_snd_check(sk, 0);
|
||||
no_ack:
|
||||
if (eaten)
|
||||
__kfree_skb(skb);
|
||||
|
|
Loading…
Reference in a new issue