mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: tcp: Revert 'process defer accept as established' changes. ipv6: Fix duplicate initialization of rawv6_prot.destroy bnx2x: Updating the Maintainer net: Eliminate flush_scheduled_work() calls while RTNL is held. drivers/net/r6040.c: correct bad use of round_jiffies() fec_mpc52xx: MPC52xx_MESSAGES_DEFAULT: 2nd NETIF_MSG_IFDOWN => IFUP ipg: fix receivemode IPG_RM_RECEIVEMULTICAST{,HASH} in ipg_nic_set_multicast_list() netfilter: nf_conntrack: fix ctnetlink related crash in nf_nat_setup_info() netfilter: Make nflog quiet when no one listen in userspace. ipv6: Fail with appropriate error code when setting not-applicable sockopt. ipv6: Check IPV6_MULTICAST_LOOP option value. ipv6: Check the hop limit setting in ancillary data. ipv6 route: Fix route lifetime in netlink message. ipv6 mcast: Check address family of gf_group in getsockopt(MS_FILTER). dccp: Bug in initial acknowledgment number assignment dccp ccid-3: X truncated due to type conversion dccp ccid-3: TFRC reverse-lookup Bug-Fix dccp ccid-2: Bug-Fix - Ack Vectors need to be ignored on request sockets dccp: Fix sparse warnings dccp ccid-3: Bug-Fix - Zero RTT is possible
This commit is contained in:
commit
51558576ea
40 changed files with 130 additions and 228 deletions
|
@ -995,8 +995,8 @@ L: netdev@vger.kernel.org
|
|||
S: Supported
|
||||
|
||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||
P: Eliezer Tamir
|
||||
M: eliezert@broadcom.com
|
||||
P: Eilon Greenstein
|
||||
M: eilong@broadcom.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
|
|
|
@ -5724,14 +5724,12 @@ bnx2_reset_task(struct work_struct *work)
|
|||
if (!netif_running(bp->dev))
|
||||
return;
|
||||
|
||||
bp->in_reset_task = 1;
|
||||
bnx2_netif_stop(bp);
|
||||
|
||||
bnx2_init_nic(bp);
|
||||
|
||||
atomic_set(&bp->intr_sem, 1);
|
||||
bnx2_netif_start(bp);
|
||||
bp->in_reset_task = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -5907,12 +5905,7 @@ bnx2_close(struct net_device *dev)
|
|||
struct bnx2 *bp = netdev_priv(dev);
|
||||
u32 reset_code;
|
||||
|
||||
/* Calling flush_scheduled_work() may deadlock because
|
||||
* linkwatch_event() may be on the workqueue and it will try to get
|
||||
* the rtnl_lock which we are holding.
|
||||
*/
|
||||
while (bp->in_reset_task)
|
||||
msleep(1);
|
||||
cancel_work_sync(&bp->reset_task);
|
||||
|
||||
bnx2_disable_int_sync(bp);
|
||||
bnx2_napi_disable(bp);
|
||||
|
|
|
@ -6656,7 +6656,6 @@ struct bnx2 {
|
|||
int current_interval;
|
||||
struct timer_list timer;
|
||||
struct work_struct reset_task;
|
||||
int in_reset_task;
|
||||
|
||||
/* Used to synchronize phy accesses. */
|
||||
spinlock_t phy_lock;
|
||||
|
|
|
@ -6,7 +6,8 @@
|
|||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* Written by: Eliezer Tamir <eliezert@broadcom.com>
|
||||
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
|
||||
* Written by: Eliezer Tamir
|
||||
* Based on code from Michael Chan's bnx2 driver
|
||||
* UDP CSUM errata workaround by Arik Gendelman
|
||||
* Slowpath rework by Vladislav Zolotarov
|
||||
|
@ -74,7 +75,7 @@ static char version[] __devinitdata =
|
|||
"Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
|
||||
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
||||
|
||||
MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
|
||||
MODULE_AUTHOR("Eliezer Tamir");
|
||||
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
|
|
@ -6,7 +6,8 @@
|
|||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* Written by: Eliezer Tamir <eliezert@broadcom.com>
|
||||
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
|
||||
* Written by: Eliezer Tamir
|
||||
* Based on code from Michael Chan's bnx2 driver
|
||||
*/
|
||||
|
||||
|
|
|
@ -6,7 +6,8 @@
|
|||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* Written by: Eliezer Tamir <eliezert@broadcom.com>
|
||||
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
|
||||
* Written by: Eliezer Tamir
|
||||
*/
|
||||
|
||||
#ifndef BNX2X_INIT_H
|
||||
|
|
|
@ -2605,7 +2605,8 @@ static int ehea_stop(struct net_device *dev)
|
|||
if (netif_msg_ifdown(port))
|
||||
ehea_info("disabling port %s", dev->name);
|
||||
|
||||
flush_scheduled_work();
|
||||
cancel_work_sync(&port->reset_task);
|
||||
|
||||
mutex_lock(&port->port_lock);
|
||||
netif_stop_queue(dev);
|
||||
port_napi_disable(port);
|
||||
|
|
|
@ -78,7 +78,7 @@ module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
|
|||
MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
|
||||
|
||||
#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
|
||||
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFDOWN )
|
||||
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
|
||||
static int debug = -1; /* the above default */
|
||||
module_param(debug, int, 0);
|
||||
MODULE_PARM_DESC(debug, "debugging messages level");
|
||||
|
|
|
@ -959,7 +959,7 @@ static int epp_close(struct net_device *dev)
|
|||
unsigned char tmp[1];
|
||||
|
||||
bc->work_running = 0;
|
||||
flush_scheduled_work();
|
||||
cancel_delayed_work_sync(&bc->run_work);
|
||||
bc->stat = EPP_DCDBIT;
|
||||
tmp[0] = 0;
|
||||
pp->ops->epp_write_addr(pp, tmp, 1, 0);
|
||||
|
|
|
@ -577,12 +577,12 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
|
|||
/* NIC to be configured in promiscuous mode. */
|
||||
receivemode = IPG_RM_RECEIVEALLFRAMES;
|
||||
} else if ((dev->flags & IFF_ALLMULTI) ||
|
||||
(dev->flags & IFF_MULTICAST &
|
||||
((dev->flags & IFF_MULTICAST) &&
|
||||
(dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {
|
||||
/* NIC to be configured to receive all multicast
|
||||
* frames. */
|
||||
receivemode |= IPG_RM_RECEIVEMULTICAST;
|
||||
} else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) {
|
||||
} else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) {
|
||||
/* NIC to be configured to receive selected
|
||||
* multicast addresses. */
|
||||
receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
|
||||
|
|
|
@ -733,7 +733,7 @@ static void r6040_timer(unsigned long data)
|
|||
}
|
||||
|
||||
/* Timer active again */
|
||||
mod_timer(&lp->timer, jiffies + round_jiffies(HZ));
|
||||
mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
|
||||
}
|
||||
|
||||
/* Read/set MAC address routines */
|
||||
|
|
|
@ -136,7 +136,6 @@ struct smc911x_local {
|
|||
|
||||
/* work queue */
|
||||
struct work_struct phy_configure;
|
||||
int work_pending;
|
||||
|
||||
int tx_throttle;
|
||||
spinlock_t lock;
|
||||
|
@ -960,11 +959,11 @@ static void smc911x_phy_configure(struct work_struct *work)
|
|||
* We should not be called if phy_type is zero.
|
||||
*/
|
||||
if (lp->phy_type == 0)
|
||||
goto smc911x_phy_configure_exit_nolock;
|
||||
return;
|
||||
|
||||
if (smc911x_phy_reset(dev, phyaddr)) {
|
||||
printk("%s: PHY reset timed out\n", dev->name);
|
||||
goto smc911x_phy_configure_exit_nolock;
|
||||
return;
|
||||
}
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
|
||||
|
@ -1033,8 +1032,6 @@ static void smc911x_phy_configure(struct work_struct *work)
|
|||
|
||||
smc911x_phy_configure_exit:
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
smc911x_phy_configure_exit_nolock:
|
||||
lp->work_pending = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1356,11 +1353,8 @@ static void smc911x_timeout(struct net_device *dev)
|
|||
* smc911x_phy_configure() calls msleep() which calls schedule_timeout()
|
||||
* which calls schedule(). Hence we use a work queue.
|
||||
*/
|
||||
if (lp->phy_type != 0) {
|
||||
if (schedule_work(&lp->phy_configure)) {
|
||||
lp->work_pending = 1;
|
||||
}
|
||||
}
|
||||
if (lp->phy_type != 0)
|
||||
schedule_work(&lp->phy_configure);
|
||||
|
||||
/* We can accept TX packets again */
|
||||
dev->trans_start = jiffies;
|
||||
|
@ -1531,16 +1525,8 @@ static int smc911x_close(struct net_device *dev)
|
|||
if (lp->phy_type != 0) {
|
||||
/* We need to ensure that no calls to
|
||||
* smc911x_phy_configure are pending.
|
||||
|
||||
* flush_scheduled_work() cannot be called because we
|
||||
* are running with the netlink semaphore held (from
|
||||
* devinet_ioctl()) and the pending work queue
|
||||
* contains linkwatch_event() (scheduled by
|
||||
* netif_carrier_off() above). linkwatch_event() also
|
||||
* wants the netlink semaphore.
|
||||
*/
|
||||
while (lp->work_pending)
|
||||
schedule();
|
||||
cancel_work_sync(&lp->phy_configure);
|
||||
smc911x_phy_powerdown(dev, lp->mii.phy_id);
|
||||
}
|
||||
|
||||
|
|
|
@ -1016,15 +1016,8 @@ static void smc_phy_powerdown(struct net_device *dev)
|
|||
|
||||
/* We need to ensure that no calls to smc_phy_configure are
|
||||
pending.
|
||||
|
||||
flush_scheduled_work() cannot be called because we are
|
||||
running with the netlink semaphore held (from
|
||||
devinet_ioctl()) and the pending work queue contains
|
||||
linkwatch_event() (scheduled by netif_carrier_off()
|
||||
above). linkwatch_event() also wants the netlink semaphore.
|
||||
*/
|
||||
while(lp->work_pending)
|
||||
yield();
|
||||
cancel_work_sync(&lp->phy_configure);
|
||||
|
||||
bmcr = smc_phy_read(dev, phy, MII_BMCR);
|
||||
smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
|
||||
|
@ -1161,7 +1154,6 @@ static void smc_phy_configure(struct work_struct *work)
|
|||
smc_phy_configure_exit:
|
||||
SMC_SELECT_BANK(lp, 2);
|
||||
spin_unlock_irq(&lp->lock);
|
||||
lp->work_pending = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1389,11 +1381,8 @@ static void smc_timeout(struct net_device *dev)
|
|||
* smc_phy_configure() calls msleep() which calls schedule_timeout()
|
||||
* which calls schedule(). Hence we use a work queue.
|
||||
*/
|
||||
if (lp->phy_type != 0) {
|
||||
if (schedule_work(&lp->phy_configure)) {
|
||||
lp->work_pending = 1;
|
||||
}
|
||||
}
|
||||
if (lp->phy_type != 0)
|
||||
schedule_work(&lp->phy_configure);
|
||||
|
||||
/* We can accept TX packets again */
|
||||
dev->trans_start = jiffies;
|
||||
|
|
|
@ -731,7 +731,7 @@ static void tulip_down (struct net_device *dev)
|
|||
void __iomem *ioaddr = tp->base_addr;
|
||||
unsigned long flags;
|
||||
|
||||
flush_scheduled_work();
|
||||
cancel_work_sync(&tp->media_work);
|
||||
|
||||
#ifdef CONFIG_TULIP_NAPI
|
||||
napi_disable(&tp->napi);
|
||||
|
|
|
@ -706,7 +706,7 @@ static void kaweth_kill_urbs(struct kaweth_device *kaweth)
|
|||
usb_kill_urb(kaweth->rx_urb);
|
||||
usb_kill_urb(kaweth->tx_urb);
|
||||
|
||||
flush_scheduled_work();
|
||||
cancel_delayed_work_sync(&kaweth->lowmem_work);
|
||||
|
||||
/* a scheduled work may have resubmitted,
|
||||
we hit them again */
|
||||
|
|
|
@ -682,7 +682,13 @@ static int prism2_close(struct net_device *dev)
|
|||
netif_device_detach(dev);
|
||||
}
|
||||
|
||||
flush_scheduled_work();
|
||||
cancel_work_sync(&local->reset_queue);
|
||||
cancel_work_sync(&local->set_multicast_list_queue);
|
||||
cancel_work_sync(&local->set_tim_queue);
|
||||
#ifndef PRISM2_NO_STATION_MODES
|
||||
cancel_work_sync(&local->info_queue);
|
||||
#endif
|
||||
cancel_work_sync(&local->comms_qual_update);
|
||||
|
||||
module_put(local->hw_module);
|
||||
|
||||
|
|
|
@ -239,11 +239,6 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
|
|||
return (struct tcp_request_sock *)req;
|
||||
}
|
||||
|
||||
struct tcp_deferred_accept_info {
|
||||
struct sock *listen_sk;
|
||||
struct request_sock *request;
|
||||
};
|
||||
|
||||
struct tcp_sock {
|
||||
/* inet_connection_sock has to be the first member of tcp_sock */
|
||||
struct inet_connection_sock inet_conn;
|
||||
|
@ -379,8 +374,6 @@ struct tcp_sock {
|
|||
unsigned int keepalive_intvl; /* time interval between keep alive probes */
|
||||
int linger2;
|
||||
|
||||
struct tcp_deferred_accept_info defer_tcp_accept;
|
||||
|
||||
unsigned long last_synq_overflow;
|
||||
|
||||
u32 tso_deferred;
|
||||
|
|
|
@ -115,8 +115,8 @@ struct request_sock_queue {
|
|||
struct request_sock *rskq_accept_head;
|
||||
struct request_sock *rskq_accept_tail;
|
||||
rwlock_t syn_wait_lock;
|
||||
u16 rskq_defer_accept;
|
||||
/* 2 bytes hole, try to pack */
|
||||
u8 rskq_defer_accept;
|
||||
/* 3 bytes hole, try to pack */
|
||||
struct listen_sock *listen_opt;
|
||||
};
|
||||
|
||||
|
|
|
@ -139,7 +139,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
|
|||
#define MAX_TCP_KEEPINTVL 32767
|
||||
#define MAX_TCP_KEEPCNT 127
|
||||
#define MAX_TCP_SYNCNT 127
|
||||
#define MAX_TCP_ACCEPT_DEFERRED 65535
|
||||
|
||||
#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
|
||||
|
||||
|
|
|
@ -290,12 +290,12 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
|
|||
|
||||
while (1) {
|
||||
const u8 len = dccp_ackvec_len(av, index);
|
||||
const u8 state = dccp_ackvec_state(av, index);
|
||||
const u8 av_state = dccp_ackvec_state(av, index);
|
||||
/*
|
||||
* valid packets not yet in av_buf have a reserved
|
||||
* entry, with a len equal to 0.
|
||||
*/
|
||||
if (state == DCCP_ACKVEC_STATE_NOT_RECEIVED &&
|
||||
if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED &&
|
||||
len == 0 && delta == 0) { /* Found our
|
||||
reserved seat! */
|
||||
dccp_pr_debug("Found %llu reserved seat!\n",
|
||||
|
@ -325,31 +325,6 @@ out_duplicate:
|
|||
return -EILSEQ;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IP_DCCP_DEBUG
|
||||
void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len)
|
||||
{
|
||||
dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len,
|
||||
(unsigned long long)ackno);
|
||||
|
||||
while (len--) {
|
||||
const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6;
|
||||
const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
|
||||
|
||||
dccp_pr_debug_cat("%d,%d|", state, rl);
|
||||
++vector;
|
||||
}
|
||||
|
||||
dccp_pr_debug_cat("\n");
|
||||
}
|
||||
|
||||
void dccp_ackvec_print(const struct dccp_ackvec *av)
|
||||
{
|
||||
dccp_ackvector_print(av->av_buf_ackno,
|
||||
av->av_buf + av->av_buf_head,
|
||||
av->av_vec_len);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void dccp_ackvec_throw_record(struct dccp_ackvec *av,
|
||||
struct dccp_ackvec_record *avr)
|
||||
{
|
||||
|
|
|
@ -159,8 +159,8 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
|
|||
} else if (ktime_us_delta(now, hctx->ccid3hctx_t_ld)
|
||||
- (s64)hctx->ccid3hctx_rtt >= 0) {
|
||||
|
||||
hctx->ccid3hctx_x =
|
||||
max(min(2 * hctx->ccid3hctx_x, min_rate),
|
||||
hctx->ccid3hctx_x = min(2 * hctx->ccid3hctx_x, min_rate);
|
||||
hctx->ccid3hctx_x = max(hctx->ccid3hctx_x,
|
||||
scaled_div(((__u64)hctx->ccid3hctx_s) << 6,
|
||||
hctx->ccid3hctx_rtt));
|
||||
hctx->ccid3hctx_t_ld = now;
|
||||
|
@ -329,8 +329,14 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
|
|||
hctx->ccid3hctx_x = rfc3390_initial_rate(sk);
|
||||
hctx->ccid3hctx_t_ld = now;
|
||||
} else {
|
||||
/* Sender does not have RTT sample: X_pps = 1 pkt/sec */
|
||||
hctx->ccid3hctx_x = hctx->ccid3hctx_s;
|
||||
/*
|
||||
* Sender does not have RTT sample:
|
||||
* - set fallback RTT (RFC 4340, 3.4) since a RTT value
|
||||
* is needed in several parts (e.g. window counter);
|
||||
* - set sending rate X_pps = 1pps as per RFC 3448, 4.2.
|
||||
*/
|
||||
hctx->ccid3hctx_rtt = DCCP_FALLBACK_RTT;
|
||||
hctx->ccid3hctx_x = hctx->ccid3hctx_s;
|
||||
hctx->ccid3hctx_x <<= 6;
|
||||
}
|
||||
ccid3_update_send_interval(hctx);
|
||||
|
|
|
@ -14,14 +14,6 @@ module_param(tfrc_debug, bool, 0444);
|
|||
MODULE_PARM_DESC(tfrc_debug, "Enable debug messages");
|
||||
#endif
|
||||
|
||||
extern int tfrc_tx_packet_history_init(void);
|
||||
extern void tfrc_tx_packet_history_exit(void);
|
||||
extern int tfrc_rx_packet_history_init(void);
|
||||
extern void tfrc_rx_packet_history_exit(void);
|
||||
|
||||
extern int tfrc_li_init(void);
|
||||
extern void tfrc_li_exit(void);
|
||||
|
||||
static int __init tfrc_module_init(void)
|
||||
{
|
||||
int rc = tfrc_li_init();
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* (at your option) any later version.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <asm/div64.h>
|
||||
#include <linux/math64.h>
|
||||
#include "../../dccp.h"
|
||||
/* internal includes that this module exports: */
|
||||
#include "loss_interval.h"
|
||||
|
@ -29,21 +29,19 @@ extern int tfrc_debug;
|
|||
#endif
|
||||
|
||||
/* integer-arithmetic divisions of type (a * 1000000)/b */
|
||||
static inline u64 scaled_div(u64 a, u32 b)
|
||||
static inline u64 scaled_div(u64 a, u64 b)
|
||||
{
|
||||
BUG_ON(b==0);
|
||||
a *= 1000000;
|
||||
do_div(a, b);
|
||||
return a;
|
||||
return div64_u64(a * 1000000, b);
|
||||
}
|
||||
|
||||
static inline u32 scaled_div32(u64 a, u32 b)
|
||||
static inline u32 scaled_div32(u64 a, u64 b)
|
||||
{
|
||||
u64 result = scaled_div(a, b);
|
||||
|
||||
if (result > UINT_MAX) {
|
||||
DCCP_CRIT("Overflow: a(%llu)/b(%u) > ~0U",
|
||||
(unsigned long long)a, b);
|
||||
DCCP_CRIT("Overflow: %llu/%llu > UINT_MAX",
|
||||
(unsigned long long)a, (unsigned long long)b);
|
||||
return UINT_MAX;
|
||||
}
|
||||
return result;
|
||||
|
@ -58,7 +56,14 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight)
|
|||
return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval;
|
||||
}
|
||||
|
||||
extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
|
||||
extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
|
||||
extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
|
||||
extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
|
||||
|
||||
extern int tfrc_tx_packet_history_init(void);
|
||||
extern void tfrc_tx_packet_history_exit(void);
|
||||
extern int tfrc_rx_packet_history_init(void);
|
||||
extern void tfrc_rx_packet_history_exit(void);
|
||||
|
||||
extern int tfrc_li_init(void);
|
||||
extern void tfrc_li_exit(void);
|
||||
#endif /* _TFRC_H_ */
|
||||
|
|
|
@ -661,7 +661,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
|
|||
|
||||
EXPORT_SYMBOL_GPL(tfrc_calc_x);
|
||||
|
||||
/*
|
||||
/**
|
||||
* tfrc_calc_x_reverse_lookup - try to find p given f(p)
|
||||
*
|
||||
* @fvalue: function value to match, scaled by 1000000
|
||||
|
@ -676,11 +676,11 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
|
|||
|
||||
/* Error cases. */
|
||||
if (fvalue < tfrc_calc_x_lookup[0][1]) {
|
||||
DCCP_WARN("fvalue %d smaller than resolution\n", fvalue);
|
||||
return tfrc_calc_x_lookup[0][1];
|
||||
DCCP_WARN("fvalue %u smaller than resolution\n", fvalue);
|
||||
return TFRC_SMALLEST_P;
|
||||
}
|
||||
if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0]) {
|
||||
DCCP_WARN("fvalue %d exceeds bounds!\n", fvalue);
|
||||
DCCP_WARN("fvalue %u exceeds bounds!\n", fvalue);
|
||||
return 1000000;
|
||||
}
|
||||
|
||||
|
|
|
@ -165,12 +165,12 @@ out_free:
|
|||
/* See dccp_v4_conn_request */
|
||||
newdmsk->dccpms_sequence_window = req->rcv_wnd;
|
||||
|
||||
newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr;
|
||||
dccp_update_gsr(newsk, dreq->dreq_isr);
|
||||
|
||||
newdp->dccps_iss = dreq->dreq_iss;
|
||||
newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss;
|
||||
dccp_update_gss(newsk, dreq->dreq_iss);
|
||||
|
||||
newdp->dccps_isr = dreq->dreq_isr;
|
||||
dccp_update_gsr(newsk, dreq->dreq_isr);
|
||||
|
||||
/*
|
||||
* SWL and AWL are initially adjusted so that they are not less than
|
||||
* the initial Sequence Numbers received and sent, respectively:
|
||||
|
|
|
@ -107,9 +107,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
|
|||
*
|
||||
* CCID-specific options are ignored during connection setup, as
|
||||
* negotiation may still be in progress (see RFC 4340, 10.3).
|
||||
* The same applies to Ack Vectors, as these depend on the CCID.
|
||||
*
|
||||
*/
|
||||
if (dreq != NULL && opt >= 128)
|
||||
if (dreq != NULL && (opt >= 128 ||
|
||||
opt == DCCPO_ACK_VECTOR_0 || opt == DCCPO_ACK_VECTOR_1))
|
||||
goto ignore_option;
|
||||
|
||||
switch (opt) {
|
||||
|
|
|
@ -508,6 +508,7 @@ void dccp_send_ack(struct sock *sk)
|
|||
|
||||
EXPORT_SYMBOL_GPL(dccp_send_ack);
|
||||
|
||||
#if 0
|
||||
/* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
|
||||
void dccp_send_delayed_ack(struct sock *sk)
|
||||
{
|
||||
|
@ -538,6 +539,7 @@ void dccp_send_delayed_ack(struct sock *sk)
|
|||
icsk->icsk_ack.timeout = timeout;
|
||||
sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
|
||||
}
|
||||
#endif
|
||||
|
||||
void dccp_send_sync(struct sock *sk, const u64 ackno,
|
||||
const enum dccp_pkt_type pkt_type)
|
||||
|
|
|
@ -42,7 +42,7 @@ static int bufsize = 64 * 1024;
|
|||
|
||||
static const char procname[] = "dccpprobe";
|
||||
|
||||
struct {
|
||||
static struct {
|
||||
struct kfifo *fifo;
|
||||
spinlock_t lock;
|
||||
wait_queue_head_t wait;
|
||||
|
|
|
@ -419,7 +419,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
|
|||
struct inet_connection_sock *icsk = inet_csk(parent);
|
||||
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
||||
struct listen_sock *lopt = queue->listen_opt;
|
||||
int thresh = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
|
||||
int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
|
||||
int thresh = max_retries;
|
||||
unsigned long now = jiffies;
|
||||
struct request_sock **reqp, *req;
|
||||
int i, budget;
|
||||
|
@ -455,6 +456,9 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
|
|||
}
|
||||
}
|
||||
|
||||
if (queue->rskq_defer_accept)
|
||||
max_retries = queue->rskq_defer_accept;
|
||||
|
||||
budget = 2 * (lopt->nr_table_entries / (timeout / interval));
|
||||
i = lopt->clock_hand;
|
||||
|
||||
|
@ -462,8 +466,9 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
|
|||
reqp=&lopt->syn_table[i];
|
||||
while ((req = *reqp) != NULL) {
|
||||
if (time_after_eq(now, req->expires)) {
|
||||
if (req->retrans < thresh &&
|
||||
!req->rsk_ops->rtx_syn_ack(parent, req)) {
|
||||
if ((req->retrans < (inet_rsk(req)->acked ? max_retries : thresh)) &&
|
||||
(inet_rsk(req)->acked ||
|
||||
!req->rsk_ops->rtx_syn_ack(parent, req))) {
|
||||
unsigned long timeo;
|
||||
|
||||
if (req->retrans++ == 0)
|
||||
|
|
|
@ -2112,12 +2112,15 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
|||
break;
|
||||
|
||||
case TCP_DEFER_ACCEPT:
|
||||
if (val < 0) {
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
if (val > MAX_TCP_ACCEPT_DEFERRED)
|
||||
val = MAX_TCP_ACCEPT_DEFERRED;
|
||||
icsk->icsk_accept_queue.rskq_defer_accept = val;
|
||||
icsk->icsk_accept_queue.rskq_defer_accept = 0;
|
||||
if (val > 0) {
|
||||
/* Translate value in seconds to number of
|
||||
* retransmits */
|
||||
while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
|
||||
val > ((TCP_TIMEOUT_INIT / HZ) <<
|
||||
icsk->icsk_accept_queue.rskq_defer_accept))
|
||||
icsk->icsk_accept_queue.rskq_defer_accept++;
|
||||
icsk->icsk_accept_queue.rskq_defer_accept++;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -2299,7 +2302,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
|
|||
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
|
||||
break;
|
||||
case TCP_DEFER_ACCEPT:
|
||||
val = icsk->icsk_accept_queue.rskq_defer_accept;
|
||||
val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
|
||||
((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
|
||||
break;
|
||||
case TCP_WINDOW_CLAMP:
|
||||
val = tp->window_clamp;
|
||||
|
|
|
@ -4541,49 +4541,6 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
|
|||
}
|
||||
}
|
||||
|
||||
static int tcp_defer_accept_check(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (tp->defer_tcp_accept.request) {
|
||||
int queued_data = tp->rcv_nxt - tp->copied_seq;
|
||||
int hasfin = !skb_queue_empty(&sk->sk_receive_queue) ?
|
||||
tcp_hdr((struct sk_buff *)
|
||||
sk->sk_receive_queue.prev)->fin : 0;
|
||||
|
||||
if (queued_data && hasfin)
|
||||
queued_data--;
|
||||
|
||||
if (queued_data &&
|
||||
tp->defer_tcp_accept.listen_sk->sk_state == TCP_LISTEN) {
|
||||
if (sock_flag(sk, SOCK_KEEPOPEN)) {
|
||||
inet_csk_reset_keepalive_timer(sk,
|
||||
keepalive_time_when(tp));
|
||||
} else {
|
||||
inet_csk_delete_keepalive_timer(sk);
|
||||
}
|
||||
|
||||
inet_csk_reqsk_queue_add(
|
||||
tp->defer_tcp_accept.listen_sk,
|
||||
tp->defer_tcp_accept.request,
|
||||
sk);
|
||||
|
||||
tp->defer_tcp_accept.listen_sk->sk_data_ready(
|
||||
tp->defer_tcp_accept.listen_sk, 0);
|
||||
|
||||
sock_put(tp->defer_tcp_accept.listen_sk);
|
||||
sock_put(sk);
|
||||
tp->defer_tcp_accept.listen_sk = NULL;
|
||||
tp->defer_tcp_accept.request = NULL;
|
||||
} else if (hasfin ||
|
||||
tp->defer_tcp_accept.listen_sk->sk_state != TCP_LISTEN) {
|
||||
tcp_reset(sk);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
@ -4944,8 +4901,6 @@ step5:
|
|||
|
||||
tcp_data_snd_check(sk);
|
||||
tcp_ack_snd_check(sk);
|
||||
|
||||
tcp_defer_accept_check(sk);
|
||||
return 0;
|
||||
|
||||
csum_error:
|
||||
|
|
|
@ -1918,14 +1918,6 @@ int tcp_v4_destroy_sock(struct sock *sk)
|
|||
sk->sk_sndmsg_page = NULL;
|
||||
}
|
||||
|
||||
if (tp->defer_tcp_accept.request) {
|
||||
reqsk_free(tp->defer_tcp_accept.request);
|
||||
sock_put(tp->defer_tcp_accept.listen_sk);
|
||||
sock_put(sk);
|
||||
tp->defer_tcp_accept.listen_sk = NULL;
|
||||
tp->defer_tcp_accept.request = NULL;
|
||||
}
|
||||
|
||||
atomic_dec(&tcp_sockets_allocated);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -571,8 +571,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
|
|||
does sequence test, SYN is truncated, and thus we consider
|
||||
it a bare ACK.
|
||||
|
||||
Both ends (listening sockets) accept the new incoming
|
||||
connection and try to talk to each other. 8-)
|
||||
If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
|
||||
bare ACK. Otherwise, we create an established connection. Both
|
||||
ends (listening sockets) accept the new incoming connection and try
|
||||
to talk to each other. 8-)
|
||||
|
||||
Note: This case is both harmless, and rare. Possibility is about the
|
||||
same as us discovering intelligent life on another plant tomorrow.
|
||||
|
@ -640,6 +642,13 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
|
|||
if (!(flg & TCP_FLAG_ACK))
|
||||
return NULL;
|
||||
|
||||
/* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
|
||||
if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
|
||||
inet_rsk(req)->acked = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* OK, ACK is valid, create big socket and
|
||||
* feed this segment to it. It will repeat all
|
||||
* the tests. THIS SEGMENT MUST MOVE SOCKET TO
|
||||
|
@ -678,24 +687,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
|
|||
inet_csk_reqsk_queue_unlink(sk, req, prev);
|
||||
inet_csk_reqsk_queue_removed(sk, req);
|
||||
|
||||
if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
|
||||
|
||||
/* the accept queue handling is done is est recv slow
|
||||
* path so lets make sure to start there
|
||||
*/
|
||||
tcp_sk(child)->pred_flags = 0;
|
||||
sock_hold(sk);
|
||||
sock_hold(child);
|
||||
tcp_sk(child)->defer_tcp_accept.listen_sk = sk;
|
||||
tcp_sk(child)->defer_tcp_accept.request = req;
|
||||
|
||||
inet_csk_reset_keepalive_timer(child,
|
||||
inet_csk(sk)->icsk_accept_queue.rskq_defer_accept * HZ);
|
||||
} else {
|
||||
inet_csk_reqsk_queue_add(sk, req, child);
|
||||
}
|
||||
|
||||
inet_csk_reqsk_queue_add(sk, req, child);
|
||||
return child;
|
||||
|
||||
listen_overflow:
|
||||
|
|
|
@ -489,11 +489,6 @@ static void tcp_keepalive_timer (unsigned long data)
|
|||
goto death;
|
||||
}
|
||||
|
||||
if (tp->defer_tcp_accept.request && sk->sk_state == TCP_ESTABLISHED) {
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
goto death;
|
||||
}
|
||||
|
||||
if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -705,6 +705,11 @@ int datagram_send_ctl(struct net *net,
|
|||
}
|
||||
|
||||
*hlimit = *(int *)CMSG_DATA(cmsg);
|
||||
if (*hlimit < -1 || *hlimit > 0xff) {
|
||||
err = -EINVAL;
|
||||
goto exit_f;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case IPV6_TCLASS:
|
||||
|
|
|
@ -67,7 +67,7 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *))
|
|||
|
||||
/* RA packet may be delivered ONLY to IPPROTO_RAW socket */
|
||||
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW)
|
||||
return -EINVAL;
|
||||
return -ENOPROTOOPT;
|
||||
|
||||
new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
|
||||
|
||||
|
@ -446,7 +446,7 @@ done:
|
|||
|
||||
case IPV6_MULTICAST_HOPS:
|
||||
if (sk->sk_type == SOCK_STREAM)
|
||||
goto e_inval;
|
||||
break;
|
||||
if (optlen < sizeof(int))
|
||||
goto e_inval;
|
||||
if (val > 255 || val < -1)
|
||||
|
@ -458,13 +458,15 @@ done:
|
|||
case IPV6_MULTICAST_LOOP:
|
||||
if (optlen < sizeof(int))
|
||||
goto e_inval;
|
||||
if (val != valbool)
|
||||
goto e_inval;
|
||||
np->mc_loop = valbool;
|
||||
retv = 0;
|
||||
break;
|
||||
|
||||
case IPV6_MULTICAST_IF:
|
||||
if (sk->sk_type == SOCK_STREAM)
|
||||
goto e_inval;
|
||||
break;
|
||||
if (optlen < sizeof(int))
|
||||
goto e_inval;
|
||||
|
||||
|
@ -860,7 +862,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|||
if (sk->sk_protocol != IPPROTO_UDP &&
|
||||
sk->sk_protocol != IPPROTO_UDPLITE &&
|
||||
sk->sk_protocol != IPPROTO_TCP)
|
||||
return -EINVAL;
|
||||
return -ENOPROTOOPT;
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
return -ENOTCONN;
|
||||
val = sk->sk_family;
|
||||
|
@ -874,6 +876,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|||
return -EINVAL;
|
||||
if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0)))
|
||||
return -EFAULT;
|
||||
if (gsf.gf_group.ss_family != AF_INET6)
|
||||
return -EADDRNOTAVAIL;
|
||||
lock_sock(sk);
|
||||
err = ip6_mc_msfget(sk, &gsf,
|
||||
(struct group_filter __user *)optval, optlen);
|
||||
|
|
|
@ -1169,7 +1169,8 @@ static int raw6_destroy(struct sock *sk)
|
|||
lock_sock(sk);
|
||||
ip6_flush_pending_frames(sk);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
|
||||
return inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
static int rawv6_init_sk(struct sock *sk)
|
||||
|
@ -1200,7 +1201,6 @@ struct proto rawv6_prot = {
|
|||
.disconnect = udp_disconnect,
|
||||
.ioctl = rawv6_ioctl,
|
||||
.init = rawv6_init_sk,
|
||||
.destroy = inet6_destroy_sock,
|
||||
.setsockopt = rawv6_setsockopt,
|
||||
.getsockopt = rawv6_getsockopt,
|
||||
.sendmsg = rawv6_sendmsg,
|
||||
|
|
|
@ -2196,8 +2196,12 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
|
|||
|
||||
NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
|
||||
|
||||
expires = (rt->rt6i_flags & RTF_EXPIRES) ?
|
||||
rt->rt6i_expires - jiffies : 0;
|
||||
if (!(rt->rt6i_flags & RTF_EXPIRES))
|
||||
expires = 0;
|
||||
else if (rt->rt6i_expires - jiffies < INT_MAX)
|
||||
expires = rt->rt6i_expires - jiffies;
|
||||
else
|
||||
expires = INT_MAX;
|
||||
|
||||
if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
|
||||
expires, rt->u.dst.error) < 0)
|
||||
|
|
|
@ -196,8 +196,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
|||
if (l4proto && l4proto->destroy)
|
||||
l4proto->destroy(ct);
|
||||
|
||||
nf_ct_ext_destroy(ct);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
spin_lock_bh(&nf_conntrack_lock);
|
||||
|
@ -520,6 +518,7 @@ static void nf_conntrack_free_rcu(struct rcu_head *head)
|
|||
|
||||
void nf_conntrack_free(struct nf_conn *ct)
|
||||
{
|
||||
nf_ct_ext_destroy(ct);
|
||||
call_rcu(&ct->rcu, nf_conntrack_free_rcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_free);
|
||||
|
|
|
@ -92,10 +92,6 @@ void nf_log_packet(int pf,
|
|||
vsnprintf(prefix, sizeof(prefix), fmt, args);
|
||||
va_end(args);
|
||||
logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix);
|
||||
} else if (net_ratelimit()) {
|
||||
printk(KERN_WARNING "nf_log_packet: can\'t log since "
|
||||
"no backend logging module loaded in! Please either "
|
||||
"load one, or disable logging explicitly\n");
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue