mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: bnx2: Fix the behavior of ethtool when ONBOOT=no qla3xxx: Don't sleep while holding lock. qla3xxx: Give the PHY time to come out of reset. ipv4 routing: Ensure that route cache entries are usable and reclaimable with caching is off net: Move rx skb_orphan call to where needed ipv6: Use correct data types for ICMPv6 type and code net: let KS8842 driver depend on HAS_IOMEM can: let SJA1000 driver depend on HAS_IOMEM netxen: fix firmware init handshake netxen: fix build with without CONFIG_PM netfilter: xt_rateest: fix comparison with self netfilter: xt_quota: fix incomplete initialization netfilter: nf_log: fix direct userspace memory access in proc handler netfilter: fix some sparse endianess warnings netfilter: nf_conntrack: fix conntrack lookup race netfilter: nf_conntrack: fix confirmation race condition netfilter: nf_conntrack: death_by_timeout() fix
This commit is contained in:
commit
09ce42d316
39 changed files with 150 additions and 89 deletions
|
@ -1725,6 +1725,7 @@ config TLAN
|
|||
|
||||
config KS8842
|
||||
tristate "Micrel KSZ8842"
|
||||
depends on HAS_IOMEM
|
||||
help
|
||||
This platform driver is for Micrel KSZ8842 chip.
|
||||
|
||||
|
|
|
@ -6825,6 +6825,14 @@ bnx2_nway_reset(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32
|
||||
bnx2_get_link(struct net_device *dev)
|
||||
{
|
||||
struct bnx2 *bp = netdev_priv(dev);
|
||||
|
||||
return bp->link_up;
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2_get_eeprom_len(struct net_device *dev)
|
||||
{
|
||||
|
@ -7392,7 +7400,7 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
|
|||
.get_wol = bnx2_get_wol,
|
||||
.set_wol = bnx2_set_wol,
|
||||
.nway_reset = bnx2_nway_reset,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_link = bnx2_get_link,
|
||||
.get_eeprom_len = bnx2_get_eeprom_len,
|
||||
.get_eeprom = bnx2_get_eeprom,
|
||||
.set_eeprom = bnx2_set_eeprom,
|
||||
|
|
|
@ -36,7 +36,7 @@ config CAN_CALC_BITTIMING
|
|||
If unsure, say Y.
|
||||
|
||||
config CAN_SJA1000
|
||||
depends on CAN_DEV
|
||||
depends on CAN_DEV && HAS_IOMEM
|
||||
tristate "Philips SJA1000"
|
||||
---help---
|
||||
Driver for the SJA1000 CAN controllers from Philips or NXP
|
||||
|
|
|
@ -944,28 +944,31 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
|
|||
u32 val = 0;
|
||||
int retries = 60;
|
||||
|
||||
if (!pegtune_val) {
|
||||
do {
|
||||
val = NXRD32(adapter, CRB_CMDPEG_STATE);
|
||||
if (pegtune_val)
|
||||
return 0;
|
||||
|
||||
if (val == PHAN_INITIALIZE_COMPLETE ||
|
||||
val == PHAN_INITIALIZE_ACK)
|
||||
return 0;
|
||||
do {
|
||||
val = NXRD32(adapter, CRB_CMDPEG_STATE);
|
||||
|
||||
msleep(500);
|
||||
|
||||
} while (--retries);
|
||||
|
||||
if (!retries) {
|
||||
pegtune_val = NXRD32(adapter,
|
||||
NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
|
||||
printk(KERN_WARNING "netxen_phantom_init: init failed, "
|
||||
"pegtune_val=%x\n", pegtune_val);
|
||||
return -1;
|
||||
switch (val) {
|
||||
case PHAN_INITIALIZE_COMPLETE:
|
||||
case PHAN_INITIALIZE_ACK:
|
||||
return 0;
|
||||
case PHAN_INITIALIZE_FAILED:
|
||||
goto out_err;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
msleep(500);
|
||||
|
||||
} while (--retries);
|
||||
|
||||
NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
|
||||
|
||||
out_err:
|
||||
dev_warn(&adapter->pdev->dev, "firmware init failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -705,7 +705,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
|
|||
first_driver = (adapter->ahw.pci_func == 0);
|
||||
|
||||
if (!first_driver)
|
||||
return 0;
|
||||
goto wait_init;
|
||||
|
||||
first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
|
||||
|
||||
|
@ -752,6 +752,7 @@ netxen_start_firmware(struct netxen_adapter *adapter, int request_fw)
|
|||
| (_NETXEN_NIC_LINUX_SUBVERSION);
|
||||
NXWR32(adapter, CRB_DRIVER_VERSION, val);
|
||||
|
||||
wait_init:
|
||||
/* Handshake with the card before we register the devices. */
|
||||
err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
|
||||
if (err) {
|
||||
|
@ -1178,6 +1179,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
|
|||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int
|
||||
netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
|
@ -1242,6 +1244,7 @@ netxen_nic_resume(struct pci_dev *pdev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int netxen_nic_open(struct net_device *netdev)
|
||||
{
|
||||
|
@ -1771,8 +1774,10 @@ static struct pci_driver netxen_driver = {
|
|||
.id_table = netxen_pci_tbl,
|
||||
.probe = netxen_nic_probe,
|
||||
.remove = __devexit_p(netxen_nic_remove),
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = netxen_nic_suspend,
|
||||
.resume = netxen_nic_resume
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Driver Registration on NetXen card */
|
||||
|
|
|
@ -3142,6 +3142,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
|
|||
(void __iomem *)port_regs;
|
||||
u32 delay = 10;
|
||||
int status = 0;
|
||||
unsigned long hw_flags = 0;
|
||||
|
||||
if(ql_mii_setup(qdev))
|
||||
return -1;
|
||||
|
@ -3150,7 +3151,8 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
|
|||
ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
|
||||
(ISP_SERIAL_PORT_IF_WE |
|
||||
(ISP_SERIAL_PORT_IF_WE << 16)));
|
||||
|
||||
/* Give the PHY time to come out of reset. */
|
||||
mdelay(100);
|
||||
qdev->port_link_state = LS_DOWN;
|
||||
netif_carrier_off(qdev->ndev);
|
||||
|
||||
|
@ -3350,7 +3352,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
|
|||
value = ql_read_page0_reg(qdev, &port_regs->portStatus);
|
||||
if (value & PORT_STATUS_IC)
|
||||
break;
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
msleep(500);
|
||||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
} while (--delay);
|
||||
|
||||
if (delay == 0) {
|
||||
|
|
|
@ -175,16 +175,16 @@ struct icmp6_filter {
|
|||
|
||||
|
||||
extern void icmpv6_send(struct sk_buff *skb,
|
||||
int type, int code,
|
||||
u8 type, u8 code,
|
||||
__u32 info,
|
||||
struct net_device *dev);
|
||||
|
||||
extern int icmpv6_init(void);
|
||||
extern int icmpv6_err_convert(int type, int code,
|
||||
extern int icmpv6_err_convert(u8 type, u8 code,
|
||||
int *err);
|
||||
extern void icmpv6_cleanup(void);
|
||||
extern void icmpv6_param_prob(struct sk_buff *skb,
|
||||
int code, int pos);
|
||||
u8 code, int pos);
|
||||
|
||||
struct flowi;
|
||||
struct in6_addr;
|
||||
|
|
|
@ -53,7 +53,7 @@ struct inet6_protocol
|
|||
|
||||
void (*err_handler)(struct sk_buff *skb,
|
||||
struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset,
|
||||
u8 type, u8 code, int offset,
|
||||
__be32 info);
|
||||
|
||||
int (*gso_send_check)(struct sk_buff *skb);
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <net/protocol.h>
|
||||
|
||||
void raw6_icmp_error(struct sk_buff *, int nexthdr,
|
||||
int type, int code, int inner_offset, __be32);
|
||||
u8 type, u8 code, int inner_offset, __be32);
|
||||
int raw6_local_deliver(struct sk_buff *, int);
|
||||
|
||||
extern int rawv6_rcv(struct sock *sk,
|
||||
|
|
|
@ -448,6 +448,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
|
|||
{
|
||||
struct sctp_ulpevent *event = sctp_skb2event(skb);
|
||||
|
||||
skb_orphan(skb);
|
||||
skb->sk = sk;
|
||||
skb->destructor = sctp_sock_rfree;
|
||||
atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
|
||||
|
|
|
@ -1252,6 +1252,7 @@ static inline int sk_has_allocations(const struct sock *sk)
|
|||
|
||||
static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
skb_orphan(skb);
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_wfree;
|
||||
/*
|
||||
|
@ -1264,6 +1265,7 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
|
|||
|
||||
static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
skb_orphan(skb);
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_rfree;
|
||||
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
|
||||
|
|
|
@ -1274,7 +1274,7 @@ struct xfrm_tunnel {
|
|||
struct xfrm6_tunnel {
|
||||
int (*handler)(struct sk_buff *skb);
|
||||
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info);
|
||||
u8 type, u8 code, int offset, __be32 info);
|
||||
struct xfrm6_tunnel *next;
|
||||
int priority;
|
||||
};
|
||||
|
|
|
@ -437,8 +437,7 @@ free:
|
|||
int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
struct packet_type *ptype, struct net_device *orig_dev)
|
||||
{
|
||||
skb->sk = NULL; /* Initially we don't know who it's for */
|
||||
skb->destructor = NULL; /* Who initializes this, dammit?! */
|
||||
skb_orphan(skb);
|
||||
|
||||
if (!net_eq(dev_net(dev), &init_net)) {
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -2310,8 +2310,6 @@ ncls:
|
|||
if (!skb)
|
||||
goto out;
|
||||
|
||||
skb_orphan(skb);
|
||||
|
||||
type = skb->protocol;
|
||||
list_for_each_entry_rcu(ptype,
|
||||
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
|
||||
|
|
|
@ -85,7 +85,7 @@ static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
|
||||
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
|
||||
|
|
|
@ -1093,8 +1093,27 @@ restart:
|
|||
* If we drop it here, the callers have no way to resolve routes
|
||||
* when we're not caching. Instead, just point *rp at rt, so
|
||||
* the caller gets a single use out of the route
|
||||
* Note that we do rt_free on this new route entry, so that
|
||||
* once its refcount hits zero, we are still able to reap it
|
||||
* (Thanks Alexey)
|
||||
* Note also the rt_free uses call_rcu. We don't actually
|
||||
* need rcu protection here, this is just our path to get
|
||||
* on the route gc list.
|
||||
*/
|
||||
goto report_and_exit;
|
||||
|
||||
if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
|
||||
int err = arp_bind_neighbour(&rt->u.dst);
|
||||
if (err) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"Neighbour table failure & not caching routes.\n");
|
||||
rt_drop(rt);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
rt_free(rt);
|
||||
goto skip_hashing;
|
||||
}
|
||||
|
||||
rthp = &rt_hash_table[hash].chain;
|
||||
|
@ -1211,7 +1230,8 @@ restart:
|
|||
#if RT_CACHE_DEBUG >= 2
|
||||
if (rt->u.dst.rt_next) {
|
||||
struct rtable *trt;
|
||||
printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst);
|
||||
printk(KERN_DEBUG "rt_cache @%02x: %pI4",
|
||||
hash, &rt->rt_dst);
|
||||
for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
|
||||
printk(" . %pI4", &trt->rt_dst);
|
||||
printk("\n");
|
||||
|
@ -1226,7 +1246,7 @@ restart:
|
|||
|
||||
spin_unlock_bh(rt_hash_lock_addr(hash));
|
||||
|
||||
report_and_exit:
|
||||
skip_hashing:
|
||||
if (rp)
|
||||
*rp = rt;
|
||||
else
|
||||
|
|
|
@ -405,7 +405,7 @@ out:
|
|||
}
|
||||
|
||||
static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
|
||||
|
|
|
@ -354,7 +354,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
|
|||
}
|
||||
|
||||
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
|
||||
|
|
|
@ -117,7 +117,7 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
|
|||
/*
|
||||
* Slightly more convenient version of icmpv6_send.
|
||||
*/
|
||||
void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
|
||||
void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
|
||||
{
|
||||
icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
|
||||
kfree_skb(skb);
|
||||
|
@ -161,7 +161,7 @@ static int is_ineligible(struct sk_buff *skb)
|
|||
/*
|
||||
* Check the ICMP output rate limit
|
||||
*/
|
||||
static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
|
||||
static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
|
||||
struct flowi *fl)
|
||||
{
|
||||
struct dst_entry *dst;
|
||||
|
@ -305,7 +305,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
|
|||
/*
|
||||
* Send an ICMP message in response to a packet in error
|
||||
*/
|
||||
void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
|
||||
void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
|
@ -590,7 +590,7 @@ out:
|
|||
icmpv6_xmit_unlock(sk);
|
||||
}
|
||||
|
||||
static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
|
||||
static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
|
||||
{
|
||||
struct inet6_protocol *ipprot;
|
||||
int inner_offset;
|
||||
|
@ -643,7 +643,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
|
|||
struct in6_addr *saddr, *daddr;
|
||||
struct ipv6hdr *orig_hdr;
|
||||
struct icmp6hdr *hdr;
|
||||
int type;
|
||||
u8 type;
|
||||
|
||||
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
|
||||
struct sec_path *sp = skb_sec_path(skb);
|
||||
|
@ -914,7 +914,7 @@ static const struct icmp6_err {
|
|||
},
|
||||
};
|
||||
|
||||
int icmpv6_err_convert(int type, int code, int *err)
|
||||
int icmpv6_err_convert(u8 type, u8 code, int *err)
|
||||
{
|
||||
int fatal = 0;
|
||||
|
||||
|
|
|
@ -394,13 +394,13 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
|
|||
|
||||
static int
|
||||
ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
|
||||
int *type, int *code, int *msg, __u32 *info, int offset)
|
||||
u8 *type, u8 *code, int *msg, __u32 *info, int offset)
|
||||
{
|
||||
struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
|
||||
struct ip6_tnl *t;
|
||||
int rel_msg = 0;
|
||||
int rel_type = ICMPV6_DEST_UNREACH;
|
||||
int rel_code = ICMPV6_ADDR_UNREACH;
|
||||
u8 rel_type = ICMPV6_DEST_UNREACH;
|
||||
u8 rel_code = ICMPV6_ADDR_UNREACH;
|
||||
__u32 rel_info = 0;
|
||||
__u16 len;
|
||||
int err = -ENOENT;
|
||||
|
@ -488,11 +488,11 @@ out:
|
|||
|
||||
static int
|
||||
ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
int rel_msg = 0;
|
||||
int rel_type = type;
|
||||
int rel_code = code;
|
||||
u8 rel_type = type;
|
||||
u8 rel_code = code;
|
||||
__u32 rel_info = ntohl(info);
|
||||
int err;
|
||||
struct sk_buff *skb2;
|
||||
|
@ -586,11 +586,11 @@ out:
|
|||
|
||||
static int
|
||||
ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
int rel_msg = 0;
|
||||
int rel_type = type;
|
||||
int rel_code = code;
|
||||
u8 rel_type = type;
|
||||
u8 rel_code = code;
|
||||
__u32 rel_info = ntohl(info);
|
||||
int err;
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
#include <linux/mutex.h>
|
||||
|
||||
static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
__be32 spi;
|
||||
struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
|
||||
|
|
|
@ -54,7 +54,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
|
|||
return data + padlen;
|
||||
}
|
||||
|
||||
static inline void mip6_param_prob(struct sk_buff *skb, int code, int pos)
|
||||
static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
|
||||
{
|
||||
icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
|
||||
}
|
||||
|
|
|
@ -310,7 +310,7 @@ out:
|
|||
|
||||
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
|
||||
struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
@ -343,7 +343,7 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
|
||||
int type, int code, int inner_offset, __be32 info)
|
||||
u8 type, u8 code, int inner_offset, __be32 info)
|
||||
{
|
||||
struct sock *sk;
|
||||
int hash;
|
||||
|
|
|
@ -1865,7 +1865,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
|
|||
* Drop the packet on the floor
|
||||
*/
|
||||
|
||||
static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes)
|
||||
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
|
||||
{
|
||||
int type;
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
|
|
|
@ -317,7 +317,7 @@ failure:
|
|||
}
|
||||
|
||||
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
|
||||
const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
|
||||
|
|
|
@ -124,7 +124,7 @@ drop:
|
|||
}
|
||||
|
||||
static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
struct xfrm6_tunnel *handler;
|
||||
|
||||
|
|
|
@ -312,7 +312,7 @@ csum_copy_err:
|
|||
}
|
||||
|
||||
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info,
|
||||
u8 type, u8 code, int offset, __be32 info,
|
||||
struct udp_table *udptable)
|
||||
{
|
||||
struct ipv6_pinfo *np;
|
||||
|
@ -346,8 +346,8 @@ out:
|
|||
}
|
||||
|
||||
static __inline__ void udpv6_err(struct sk_buff *skb,
|
||||
struct inet6_skb_parm *opt, int type,
|
||||
int code, int offset, __be32 info )
|
||||
struct inet6_skb_parm *opt, u8 type,
|
||||
u8 code, int offset, __be32 info )
|
||||
{
|
||||
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
extern int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int );
|
||||
extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
|
||||
int , int , int , __be32 , struct udp_table *);
|
||||
u8 , u8 , int , __be32 , struct udp_table *);
|
||||
|
||||
extern int udp_v6_get_port(struct sock *sk, unsigned short snum);
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ static int udplitev6_rcv(struct sk_buff *skb)
|
|||
|
||||
static void udplitev6_err(struct sk_buff *skb,
|
||||
struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
__udp6_lib_err(skb, opt, type, code, offset, info, &udplite_table);
|
||||
}
|
||||
|
|
|
@ -262,7 +262,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
/* xfrm6_tunnel native err handling */
|
||||
switch (type) {
|
||||
|
|
|
@ -913,9 +913,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
/* Clean up the original one to keep it in listen state */
|
||||
irttp_listen(self->tsap);
|
||||
|
||||
/* Wow ! What is that ? Jean II */
|
||||
skb->sk = NULL;
|
||||
skb->destructor = NULL;
|
||||
kfree_skb(skb);
|
||||
sk->sk_ack_backlog--;
|
||||
|
||||
|
|
|
@ -196,6 +196,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self,
|
|||
/* Don't forget to refcount it - see ircomm_tty_do_softint() */
|
||||
skb_get(skb);
|
||||
|
||||
skb_orphan(skb);
|
||||
skb->destructor = ircomm_lmp_flow_control;
|
||||
|
||||
if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) {
|
||||
|
|
|
@ -335,7 +335,8 @@ begin:
|
|||
h = __nf_conntrack_find(net, tuple);
|
||||
if (h) {
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
|
||||
if (unlikely(nf_ct_is_dying(ct) ||
|
||||
!atomic_inc_not_zero(&ct->ct_general.use)))
|
||||
h = NULL;
|
||||
else {
|
||||
if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
|
||||
|
@ -425,7 +426,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|||
/* Remove from unconfirmed list */
|
||||
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
|
||||
|
||||
__nf_conntrack_hash_insert(ct, hash, repl_hash);
|
||||
/* Timer relative to confirmation time, not original
|
||||
setting time, otherwise we'd get timer wrap in
|
||||
weird delay cases. */
|
||||
|
@ -433,8 +433,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|||
add_timer(&ct->timeout);
|
||||
atomic_inc(&ct->ct_general.use);
|
||||
set_bit(IPS_CONFIRMED_BIT, &ct->status);
|
||||
|
||||
/* Since the lookup is lockless, hash insertion must be done after
|
||||
* starting the timer and setting the CONFIRMED bit. The RCU barriers
|
||||
* guarantee that no other CPU can find the conntrack before the above
|
||||
* stores are visible.
|
||||
*/
|
||||
__nf_conntrack_hash_insert(ct, hash, repl_hash);
|
||||
NF_CT_STAT_INC(net, insert);
|
||||
spin_unlock_bh(&nf_conntrack_lock);
|
||||
|
||||
help = nfct_help(ct);
|
||||
if (help && help->helper)
|
||||
nf_conntrack_event_cache(IPCT_HELPER, ct);
|
||||
|
@ -503,7 +511,8 @@ static noinline int early_drop(struct net *net, unsigned int hash)
|
|||
cnt++;
|
||||
}
|
||||
|
||||
if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
|
||||
if (ct && unlikely(nf_ct_is_dying(ct) ||
|
||||
!atomic_inc_not_zero(&ct->ct_general.use)))
|
||||
ct = NULL;
|
||||
if (ct || cnt >= NF_CT_EVICTION_RANGE)
|
||||
break;
|
||||
|
@ -1267,13 +1276,19 @@ err_cache:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to use special "null" values, not used in hash table
|
||||
*/
|
||||
#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
|
||||
#define DYING_NULLS_VAL ((1<<30)+1)
|
||||
|
||||
static int nf_conntrack_init_net(struct net *net)
|
||||
{
|
||||
int ret;
|
||||
|
||||
atomic_set(&net->ct.count, 0);
|
||||
INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
|
||||
INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0);
|
||||
INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
|
||||
INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
|
||||
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
|
||||
if (!net->ct.stat) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -47,7 +47,6 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
|
|||
mutex_lock(&nf_log_mutex);
|
||||
|
||||
if (pf == NFPROTO_UNSPEC) {
|
||||
int i;
|
||||
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
|
||||
list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
|
||||
} else {
|
||||
|
@ -216,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
|
|||
#endif /* PROC_FS */
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
struct ctl_path nf_log_sysctl_path[] = {
|
||||
static struct ctl_path nf_log_sysctl_path[] = {
|
||||
{ .procname = "net", .ctl_name = CTL_NET, },
|
||||
{ .procname = "netfilter", .ctl_name = NET_NETFILTER, },
|
||||
{ .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, },
|
||||
|
@ -228,19 +227,26 @@ static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
|
|||
static struct ctl_table_header *nf_log_dir_header;
|
||||
|
||||
static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
const struct nf_logger *logger;
|
||||
char buf[NFLOGGER_NAME_LEN];
|
||||
size_t size = *lenp;
|
||||
int r = 0;
|
||||
int tindex = (unsigned long)table->extra1;
|
||||
|
||||
if (write) {
|
||||
if (!strcmp(buffer, "NONE")) {
|
||||
if (size > sizeof(buf))
|
||||
size = sizeof(buf);
|
||||
if (copy_from_user(buf, buffer, size))
|
||||
return -EFAULT;
|
||||
|
||||
if (!strcmp(buf, "NONE")) {
|
||||
nf_log_unbind_pf(tindex);
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&nf_log_mutex);
|
||||
logger = __find_logger(tindex, buffer);
|
||||
logger = __find_logger(tindex, buf);
|
||||
if (logger == NULL) {
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
return -ENOENT;
|
||||
|
|
|
@ -40,12 +40,12 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
|
|||
static u32 hash_v4(const struct sk_buff *skb)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
u32 ipaddr;
|
||||
__be32 ipaddr;
|
||||
|
||||
/* packets in either direction go into same queue */
|
||||
ipaddr = iph->saddr ^ iph->daddr;
|
||||
|
||||
return jhash_2words(ipaddr, iph->protocol, jhash_initval);
|
||||
return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -63,14 +63,14 @@ nfqueue_tg4_v1(struct sk_buff *skb, const struct xt_target_param *par)
|
|||
static u32 hash_v6(const struct sk_buff *skb)
|
||||
{
|
||||
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
u32 addr[4];
|
||||
__be32 addr[4];
|
||||
|
||||
addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0];
|
||||
addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1];
|
||||
addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2];
|
||||
addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3];
|
||||
|
||||
return jhash2(addr, ARRAY_SIZE(addr), jhash_initval);
|
||||
return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
|
|
@ -15,14 +15,14 @@
|
|||
#include <net/netfilter/nf_conntrack.h>
|
||||
#include <linux/netfilter/xt_cluster.h>
|
||||
|
||||
static inline u_int32_t nf_ct_orig_ipv4_src(const struct nf_conn *ct)
|
||||
static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct)
|
||||
{
|
||||
return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
|
||||
return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
|
||||
}
|
||||
|
||||
static inline const void *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
|
||||
static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
|
||||
{
|
||||
return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
|
||||
return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
|
||||
}
|
||||
|
||||
static inline u_int32_t
|
||||
|
|
|
@ -54,6 +54,7 @@ static bool quota_mt_check(const struct xt_mtchk_param *par)
|
|||
if (q->master == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
q->master->quota = q->quota;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ xt_rateest_mt(const struct sk_buff *skb, const struct xt_match_param *par)
|
|||
if (info->flags & XT_RATEEST_MATCH_BPS)
|
||||
ret &= bps1 == bps2;
|
||||
if (info->flags & XT_RATEEST_MATCH_PPS)
|
||||
ret &= pps2 == pps2;
|
||||
ret &= pps1 == pps2;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ static struct notifier_block sctp_inet6addr_notifier = {
|
|||
|
||||
/* ICMP error handler. */
|
||||
SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int type, int code, int offset, __be32 info)
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
struct inet6_dev *idev;
|
||||
struct sock *sk;
|
||||
|
|
Loading…
Reference in a new issue