mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [IRDA] IRNET: Fix build when TCGETS2 is defined. [NET]: docbook fixes for netif_ functions [NET]: Hide the net_ns kmem cache [NET]: Mark the setup_net as __net_init [NET]: Hide the dead code in the net_namespace.c [NET]: Relax the reference counting of init_net_ns [NETNS]: Make the init/exit hooks checks outside the loop [NET]: Forget the zero_it argument of sk_alloc() [NET]: Remove bogus zero_it argument from sk_alloc [NET]: Make the sk_clone() lighter [NET]: Move some core sock setup into sk_prot_alloc [NET]: Auto-zero the allocated sock object [NET]: Cleanup the allocation/freeing of the sock object [NET]: Move the get_net() from sock_copy() [NET]: Move the sock_copy() from the header [TCP]: Another TAGBITS -> SACKED_ACKED|LOST conversion [TCP]: Process DSACKs that reside within a SACK block
This commit is contained in:
commit
54866f0323
39 changed files with 295 additions and 191 deletions
|
@ -491,7 +491,7 @@ static int pppoe_create(struct net *net, struct socket *sock)
|
|||
int error = -ENOMEM;
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, 1);
|
||||
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
|
||||
if (!sk)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1416,7 +1416,7 @@ static int pppol2tp_create(struct net *net, struct socket *sock)
|
|||
int error = -ENOMEM;
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1);
|
||||
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
|
||||
if (!sk)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -739,6 +739,16 @@ static inline void *netdev_priv(const struct net_device *dev)
|
|||
*/
|
||||
#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
|
||||
|
||||
/**
|
||||
* netif_napi_add - initialize a napi context
|
||||
* @dev: network device
|
||||
* @napi: napi context
|
||||
* @poll: polling function
|
||||
* @weight: default weight
|
||||
*
|
||||
* netif_napi_add() must be used to initialize a napi context prior to calling
|
||||
* *any* of the other napi related functions.
|
||||
*/
|
||||
static inline void netif_napi_add(struct net_device *dev,
|
||||
struct napi_struct *napi,
|
||||
int (*poll)(struct napi_struct *, int),
|
||||
|
|
|
@ -51,13 +51,12 @@ static inline struct net *copy_net_ns(unsigned long flags, struct net *net_ns)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NET_NS
|
||||
extern void __put_net(struct net *net);
|
||||
|
||||
static inline struct net *get_net(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NET
|
||||
atomic_inc(&net->count);
|
||||
#endif
|
||||
return net;
|
||||
}
|
||||
|
||||
|
@ -75,26 +74,44 @@ static inline struct net *maybe_get_net(struct net *net)
|
|||
|
||||
static inline void put_net(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NET
|
||||
if (atomic_dec_and_test(&net->count))
|
||||
__put_net(net);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct net *hold_net(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NET
|
||||
atomic_inc(&net->use_count);
|
||||
#endif
|
||||
return net;
|
||||
}
|
||||
|
||||
static inline void release_net(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NET
|
||||
atomic_dec(&net->use_count);
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static inline struct net *get_net(struct net *net)
|
||||
{
|
||||
return net;
|
||||
}
|
||||
|
||||
static inline void put_net(struct net *net)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct net *hold_net(struct net *net)
|
||||
{
|
||||
return net;
|
||||
}
|
||||
|
||||
static inline void release_net(struct net *net)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct net *maybe_get_net(struct net *net)
|
||||
{
|
||||
return net;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define for_each_net(VAR) \
|
||||
list_for_each_entry(VAR, &net_namespace_list, list)
|
||||
|
|
|
@ -779,7 +779,7 @@ extern void FASTCALL(release_sock(struct sock *sk));
|
|||
|
||||
extern struct sock *sk_alloc(struct net *net, int family,
|
||||
gfp_t priority,
|
||||
struct proto *prot, int zero_it);
|
||||
struct proto *prot);
|
||||
extern void sk_free(struct sock *sk);
|
||||
extern struct sock *sk_clone(const struct sock *sk,
|
||||
const gfp_t priority);
|
||||
|
@ -993,20 +993,6 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
|
|||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static inline void sock_copy(struct sock *nsk, const struct sock *osk)
|
||||
{
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
void *sptr = nsk->sk_security;
|
||||
#endif
|
||||
|
||||
memcpy(nsk, osk, osk->sk_prot->obj_size);
|
||||
get_net(nsk->sk_net);
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
nsk->sk_security = sptr;
|
||||
security_sk_clone(osk, nsk);
|
||||
#endif
|
||||
}
|
||||
|
||||
extern int sock_i_uid(struct sock *sk);
|
||||
extern unsigned long sock_i_ino(struct sock *sk);
|
||||
|
||||
|
|
|
@ -1044,7 +1044,7 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol)
|
|||
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
|
||||
goto out;
|
||||
rc = -ENOMEM;
|
||||
sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, 1);
|
||||
sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto);
|
||||
if (!sk)
|
||||
goto out;
|
||||
rc = 0;
|
||||
|
|
|
@ -133,7 +133,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
|
|||
sock->sk = NULL;
|
||||
if (sock->type == SOCK_STREAM)
|
||||
return -EINVAL;
|
||||
sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, 1);
|
||||
sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
sock_init_data(sock, sk);
|
||||
|
|
|
@ -836,7 +836,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol)
|
|||
return -ESOCKTNOSUPPORT;
|
||||
}
|
||||
|
||||
if ((sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, 1)) == NULL)
|
||||
sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto);
|
||||
if (sk == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ax25 = sk->sk_protinfo = ax25_create_cb();
|
||||
|
@ -861,7 +862,8 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
|
|||
struct sock *sk;
|
||||
ax25_cb *ax25, *oax25;
|
||||
|
||||
if ((sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot, 1)) == NULL)
|
||||
sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot);
|
||||
if (sk == NULL)
|
||||
return NULL;
|
||||
|
||||
if ((ax25 = ax25_create_cb()) == NULL) {
|
||||
|
|
|
@ -213,7 +213,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol)
|
|||
if (sock->type != SOCK_RAW)
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, 1);
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol)
|
|||
if (sock->type != SOCK_RAW)
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, 1);
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -645,7 +645,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol)
|
|||
|
||||
sock->ops = &hci_sock_ops;
|
||||
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1);
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol)
|
|||
if (sock->type != SOCK_RAW)
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, 1);
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -607,7 +607,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
|
|||
{
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1);
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
|
||||
if (!sk)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -287,7 +287,7 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int
|
|||
struct rfcomm_dlc *d;
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, 1);
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
|
||||
if (!sk)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -421,7 +421,7 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
|
|||
{
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, 1);
|
||||
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
|
||||
if (!sk)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1751,9 +1751,6 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
|
|||
*
|
||||
* return values:
|
||||
* NET_RX_SUCCESS (no congestion)
|
||||
* NET_RX_CN_LOW (low congestion)
|
||||
* NET_RX_CN_MOD (moderate congestion)
|
||||
* NET_RX_CN_HIGH (high congestion)
|
||||
* NET_RX_DROP (packet was dropped)
|
||||
*
|
||||
*/
|
||||
|
@ -2001,6 +1998,21 @@ out:
|
|||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* netif_receive_skb - process receive buffer from network
|
||||
* @skb: buffer to process
|
||||
*
|
||||
* netif_receive_skb() is the main receive data processing function.
|
||||
* It always succeeds. The buffer may be dropped during processing
|
||||
* for congestion control or by the protocol layers.
|
||||
*
|
||||
* This function may only be called from softirq context and interrupts
|
||||
* should be enabled.
|
||||
*
|
||||
* Return values (usually ignored):
|
||||
* NET_RX_SUCCESS: no congestion
|
||||
* NET_RX_DROP: packet was dropped
|
||||
*/
|
||||
int netif_receive_skb(struct sk_buff *skb)
|
||||
{
|
||||
struct packet_type *ptype, *pt_prev;
|
||||
|
|
|
@ -17,16 +17,89 @@ static DEFINE_MUTEX(net_mutex);
|
|||
|
||||
LIST_HEAD(net_namespace_list);
|
||||
|
||||
static struct kmem_cache *net_cachep;
|
||||
|
||||
struct net init_net;
|
||||
EXPORT_SYMBOL_GPL(init_net);
|
||||
|
||||
/*
|
||||
* setup_net runs the initializers for the network namespace object.
|
||||
*/
|
||||
static __net_init int setup_net(struct net *net)
|
||||
{
|
||||
/* Must be called with net_mutex held */
|
||||
struct pernet_operations *ops;
|
||||
int error;
|
||||
|
||||
atomic_set(&net->count, 1);
|
||||
atomic_set(&net->use_count, 0);
|
||||
|
||||
error = 0;
|
||||
list_for_each_entry(ops, &pernet_list, list) {
|
||||
if (ops->init) {
|
||||
error = ops->init(net);
|
||||
if (error < 0)
|
||||
goto out_undo;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return error;
|
||||
|
||||
out_undo:
|
||||
/* Walk through the list backwards calling the exit functions
|
||||
* for the pernet modules whose init functions did not fail.
|
||||
*/
|
||||
list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
|
||||
if (ops->exit)
|
||||
ops->exit(net);
|
||||
}
|
||||
|
||||
rcu_barrier();
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_NS
|
||||
static struct kmem_cache *net_cachep;
|
||||
|
||||
static struct net *net_alloc(void)
|
||||
{
|
||||
return kmem_cache_zalloc(net_cachep, GFP_KERNEL);
|
||||
}
|
||||
|
||||
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
|
||||
{
|
||||
struct net *new_net = NULL;
|
||||
int err;
|
||||
|
||||
get_net(old_net);
|
||||
|
||||
if (!(flags & CLONE_NEWNET))
|
||||
return old_net;
|
||||
|
||||
err = -ENOMEM;
|
||||
new_net = net_alloc();
|
||||
if (!new_net)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&net_mutex);
|
||||
err = setup_net(new_net);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
rtnl_lock();
|
||||
list_add_tail(&new_net->list, &net_namespace_list);
|
||||
rtnl_unlock();
|
||||
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&net_mutex);
|
||||
out:
|
||||
put_net(old_net);
|
||||
if (err) {
|
||||
net_free(new_net);
|
||||
new_net = ERR_PTR(err);
|
||||
}
|
||||
return new_net;
|
||||
}
|
||||
|
||||
static void net_free(struct net *net)
|
||||
{
|
||||
if (!net)
|
||||
|
@ -72,7 +145,6 @@ static void cleanup_net(struct work_struct *work)
|
|||
net_free(net);
|
||||
}
|
||||
|
||||
|
||||
void __put_net(struct net *net)
|
||||
{
|
||||
/* Cleanup the network namespace in process context */
|
||||
|
@ -81,90 +153,25 @@ void __put_net(struct net *net)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__put_net);
|
||||
|
||||
/*
|
||||
* setup_net runs the initializers for the network namespace object.
|
||||
*/
|
||||
static int setup_net(struct net *net)
|
||||
{
|
||||
/* Must be called with net_mutex held */
|
||||
struct pernet_operations *ops;
|
||||
int error;
|
||||
|
||||
atomic_set(&net->count, 1);
|
||||
atomic_set(&net->use_count, 0);
|
||||
|
||||
error = 0;
|
||||
list_for_each_entry(ops, &pernet_list, list) {
|
||||
if (ops->init) {
|
||||
error = ops->init(net);
|
||||
if (error < 0)
|
||||
goto out_undo;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return error;
|
||||
|
||||
out_undo:
|
||||
/* Walk through the list backwards calling the exit functions
|
||||
* for the pernet modules whose init functions did not fail.
|
||||
*/
|
||||
list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
|
||||
if (ops->exit)
|
||||
ops->exit(net);
|
||||
}
|
||||
|
||||
rcu_barrier();
|
||||
goto out;
|
||||
}
|
||||
|
||||
#else
|
||||
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
|
||||
{
|
||||
struct net *new_net = NULL;
|
||||
int err;
|
||||
|
||||
get_net(old_net);
|
||||
|
||||
if (!(flags & CLONE_NEWNET))
|
||||
return old_net;
|
||||
|
||||
#ifndef CONFIG_NET_NS
|
||||
return ERR_PTR(-EINVAL);
|
||||
#endif
|
||||
|
||||
err = -ENOMEM;
|
||||
new_net = net_alloc();
|
||||
if (!new_net)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&net_mutex);
|
||||
err = setup_net(new_net);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
rtnl_lock();
|
||||
list_add_tail(&new_net->list, &net_namespace_list);
|
||||
rtnl_unlock();
|
||||
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&net_mutex);
|
||||
out:
|
||||
put_net(old_net);
|
||||
if (err) {
|
||||
net_free(new_net);
|
||||
new_net = ERR_PTR(err);
|
||||
}
|
||||
return new_net;
|
||||
if (flags & CLONE_NEWNET)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return old_net;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init net_ns_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net));
|
||||
#ifdef CONFIG_NET_NS
|
||||
net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
|
||||
SMP_CACHE_BYTES,
|
||||
SLAB_PANIC, NULL);
|
||||
#endif
|
||||
mutex_lock(&net_mutex);
|
||||
err = setup_net(&init_net);
|
||||
|
||||
|
@ -187,29 +194,28 @@ static int register_pernet_operations(struct list_head *list,
|
|||
struct net *net, *undo_net;
|
||||
int error;
|
||||
|
||||
error = 0;
|
||||
list_add_tail(&ops->list, list);
|
||||
for_each_net(net) {
|
||||
if (ops->init) {
|
||||
if (ops->init) {
|
||||
for_each_net(net) {
|
||||
error = ops->init(net);
|
||||
if (error)
|
||||
goto out_undo;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return error;
|
||||
return 0;
|
||||
|
||||
out_undo:
|
||||
/* If I have an error cleanup all namespaces I initialized */
|
||||
list_del(&ops->list);
|
||||
for_each_net(undo_net) {
|
||||
if (undo_net == net)
|
||||
goto undone;
|
||||
if (ops->exit)
|
||||
if (ops->exit) {
|
||||
for_each_net(undo_net) {
|
||||
if (undo_net == net)
|
||||
goto undone;
|
||||
ops->exit(undo_net);
|
||||
}
|
||||
}
|
||||
undone:
|
||||
goto out;
|
||||
return error;
|
||||
}
|
||||
|
||||
static void unregister_pernet_operations(struct pernet_operations *ops)
|
||||
|
@ -217,8 +223,8 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
|
|||
struct net *net;
|
||||
|
||||
list_del(&ops->list);
|
||||
for_each_net(net)
|
||||
if (ops->exit)
|
||||
if (ops->exit)
|
||||
for_each_net(net)
|
||||
ops->exit(net);
|
||||
}
|
||||
|
||||
|
|
117
net/core/sock.c
117
net/core/sock.c
|
@ -857,6 +857,67 @@ static inline void sock_lock_init(struct sock *sk)
|
|||
af_family_keys + sk->sk_family);
|
||||
}
|
||||
|
||||
static void sock_copy(struct sock *nsk, const struct sock *osk)
|
||||
{
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
void *sptr = nsk->sk_security;
|
||||
#endif
|
||||
|
||||
memcpy(nsk, osk, osk->sk_prot->obj_size);
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
nsk->sk_security = sptr;
|
||||
security_sk_clone(osk, nsk);
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
||||
int family)
|
||||
{
|
||||
struct sock *sk;
|
||||
struct kmem_cache *slab;
|
||||
|
||||
slab = prot->slab;
|
||||
if (slab != NULL)
|
||||
sk = kmem_cache_alloc(slab, priority);
|
||||
else
|
||||
sk = kmalloc(prot->obj_size, priority);
|
||||
|
||||
if (sk != NULL) {
|
||||
if (security_sk_alloc(sk, family, priority))
|
||||
goto out_free;
|
||||
|
||||
if (!try_module_get(prot->owner))
|
||||
goto out_free_sec;
|
||||
}
|
||||
|
||||
return sk;
|
||||
|
||||
out_free_sec:
|
||||
security_sk_free(sk);
|
||||
out_free:
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
else
|
||||
kfree(sk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void sk_prot_free(struct proto *prot, struct sock *sk)
|
||||
{
|
||||
struct kmem_cache *slab;
|
||||
struct module *owner;
|
||||
|
||||
owner = prot->owner;
|
||||
slab = prot->slab;
|
||||
|
||||
security_sk_free(sk);
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
else
|
||||
kfree(sk);
|
||||
module_put(owner);
|
||||
}
|
||||
|
||||
/**
|
||||
* sk_alloc - All socket objects are allocated here
|
||||
* @net: the applicable net namespace
|
||||
|
@ -866,49 +927,28 @@ static inline void sock_lock_init(struct sock *sk)
|
|||
* @zero_it: if we should zero the newly allocated sock
|
||||
*/
|
||||
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
|
||||
struct proto *prot, int zero_it)
|
||||
struct proto *prot)
|
||||
{
|
||||
struct sock *sk = NULL;
|
||||
struct kmem_cache *slab = prot->slab;
|
||||
|
||||
if (slab != NULL)
|
||||
sk = kmem_cache_alloc(slab, priority);
|
||||
else
|
||||
sk = kmalloc(prot->obj_size, priority);
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
|
||||
if (sk) {
|
||||
if (zero_it) {
|
||||
memset(sk, 0, prot->obj_size);
|
||||
sk->sk_family = family;
|
||||
/*
|
||||
* See comment in struct sock definition to understand
|
||||
* why we need sk_prot_creator -acme
|
||||
*/
|
||||
sk->sk_prot = sk->sk_prot_creator = prot;
|
||||
sock_lock_init(sk);
|
||||
sk->sk_net = get_net(net);
|
||||
}
|
||||
|
||||
if (security_sk_alloc(sk, family, priority))
|
||||
goto out_free;
|
||||
|
||||
if (!try_module_get(prot->owner))
|
||||
goto out_free;
|
||||
sk->sk_family = family;
|
||||
/*
|
||||
* See comment in struct sock definition to understand
|
||||
* why we need sk_prot_creator -acme
|
||||
*/
|
||||
sk->sk_prot = sk->sk_prot_creator = prot;
|
||||
sock_lock_init(sk);
|
||||
sk->sk_net = get_net(net);
|
||||
}
|
||||
return sk;
|
||||
|
||||
out_free:
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
else
|
||||
kfree(sk);
|
||||
return NULL;
|
||||
return sk;
|
||||
}
|
||||
|
||||
void sk_free(struct sock *sk)
|
||||
{
|
||||
struct sk_filter *filter;
|
||||
struct module *owner = sk->sk_prot_creator->owner;
|
||||
|
||||
if (sk->sk_destruct)
|
||||
sk->sk_destruct(sk);
|
||||
|
@ -925,25 +965,22 @@ void sk_free(struct sock *sk)
|
|||
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
|
||||
__FUNCTION__, atomic_read(&sk->sk_omem_alloc));
|
||||
|
||||
security_sk_free(sk);
|
||||
put_net(sk->sk_net);
|
||||
if (sk->sk_prot_creator->slab != NULL)
|
||||
kmem_cache_free(sk->sk_prot_creator->slab, sk);
|
||||
else
|
||||
kfree(sk);
|
||||
module_put(owner);
|
||||
sk_prot_free(sk->sk_prot_creator, sk);
|
||||
}
|
||||
|
||||
struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
|
||||
{
|
||||
struct sock *newsk = sk_alloc(sk->sk_net, sk->sk_family, priority, sk->sk_prot, 0);
|
||||
struct sock *newsk;
|
||||
|
||||
newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
|
||||
if (newsk != NULL) {
|
||||
struct sk_filter *filter;
|
||||
|
||||
sock_copy(newsk, sk);
|
||||
|
||||
/* SANITY */
|
||||
get_net(newsk->sk_net);
|
||||
sk_node_init(&newsk->sk_node);
|
||||
sock_lock_init(newsk);
|
||||
bh_lock_sock(newsk);
|
||||
|
|
|
@ -474,7 +474,7 @@ static struct proto dn_proto = {
|
|||
static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
|
||||
{
|
||||
struct dn_scp *scp;
|
||||
struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, 1);
|
||||
struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
|
||||
|
||||
if (!sk)
|
||||
goto out;
|
||||
|
|
|
@ -624,7 +624,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol)
|
|||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
err = -ENOBUFS;
|
||||
sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto, 1);
|
||||
sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
|
||||
if (sk == NULL)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ lookup_protocol:
|
|||
BUG_TRAP(answer_prot->slab != NULL);
|
||||
|
||||
err = -ENOBUFS;
|
||||
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, 1);
|
||||
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
|
||||
if (sk == NULL)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1330,12 +1330,15 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|||
cached_fack_count = 0;
|
||||
}
|
||||
|
||||
for (i=0; i<num_sacks; i++, sp++) {
|
||||
for (i = 0; i < num_sacks; i++) {
|
||||
struct sk_buff *skb;
|
||||
__u32 start_seq = ntohl(sp->start_seq);
|
||||
__u32 end_seq = ntohl(sp->end_seq);
|
||||
int fack_count;
|
||||
int dup_sack = (found_dup_sack && (i == first_sack_index));
|
||||
int next_dup = (found_dup_sack && (i+1 == first_sack_index));
|
||||
|
||||
sp++;
|
||||
|
||||
if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) {
|
||||
if (dup_sack) {
|
||||
|
@ -1361,7 +1364,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|||
flag |= FLAG_DATA_LOST;
|
||||
|
||||
tcp_for_write_queue_from(skb, sk) {
|
||||
int in_sack;
|
||||
int in_sack = 0;
|
||||
u8 sacked;
|
||||
|
||||
if (skb == tcp_send_head(sk))
|
||||
|
@ -1380,7 +1383,23 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
|
|||
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
|
||||
break;
|
||||
|
||||
in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
|
||||
dup_sack = (found_dup_sack && (i == first_sack_index));
|
||||
|
||||
/* Due to sorting DSACK may reside within this SACK block! */
|
||||
if (next_dup) {
|
||||
u32 dup_start = ntohl(sp->start_seq);
|
||||
u32 dup_end = ntohl(sp->end_seq);
|
||||
|
||||
if (before(TCP_SKB_CB(skb)->seq, dup_end)) {
|
||||
in_sack = tcp_match_skb_to_sack(sk, skb, dup_start, dup_end);
|
||||
if (in_sack > 0)
|
||||
dup_sack = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* DSACK info lost if out-of-mem, try SACK still */
|
||||
if (in_sack <= 0)
|
||||
in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
|
||||
if (in_sack < 0)
|
||||
break;
|
||||
|
||||
|
@ -2059,7 +2078,7 @@ static void tcp_update_scoreboard(struct sock *sk)
|
|||
if (!tcp_skb_timedout(sk, skb))
|
||||
break;
|
||||
|
||||
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
|
||||
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
|
||||
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
|
||||
tp->lost_out += tcp_skb_pcount(skb);
|
||||
tcp_verify_retransmit_hint(tp, skb);
|
||||
|
|
|
@ -162,7 +162,7 @@ lookup_protocol:
|
|||
BUG_TRAP(answer_prot->slab != NULL);
|
||||
|
||||
err = -ENOBUFS;
|
||||
sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, 1);
|
||||
sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
|
||||
if (sk == NULL)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1381,7 +1381,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol)
|
|||
goto out;
|
||||
|
||||
rc = -ENOMEM;
|
||||
sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, 1);
|
||||
sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto);
|
||||
if (!sk)
|
||||
goto out;
|
||||
#ifdef IPX_REFCNT_DEBUG
|
||||
|
|
|
@ -1078,7 +1078,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
|
|||
}
|
||||
|
||||
/* Allocate networking socket */
|
||||
sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto, 1);
|
||||
sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto);
|
||||
if (sk == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -731,15 +731,25 @@ dev_irnet_ioctl(struct inode * inode,
|
|||
/* Get termios */
|
||||
case TCGETS:
|
||||
DEBUG(FS_INFO, "Get termios.\n");
|
||||
#ifndef TCGETS2
|
||||
if(kernel_termios_to_user_termios((struct termios __user *)argp, &ap->termios))
|
||||
break;
|
||||
#else
|
||||
if(kernel_termios_to_user_termios_1((struct termios __user *)argp, &ap->termios))
|
||||
break;
|
||||
#endif
|
||||
err = 0;
|
||||
break;
|
||||
/* Set termios */
|
||||
case TCSETSF:
|
||||
DEBUG(FS_INFO, "Set termios.\n");
|
||||
#ifndef TCGETS2
|
||||
if(user_termios_to_kernel_termios(&ap->termios, (struct termios __user *)argp))
|
||||
break;
|
||||
#else
|
||||
if(user_termios_to_kernel_termios_1(&ap->termios, (struct termios __user *)argp))
|
||||
break;
|
||||
#endif
|
||||
err = 0;
|
||||
break;
|
||||
|
||||
|
|
|
@ -216,7 +216,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
|
|||
{
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1);
|
||||
sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
|
||||
if (!sk)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol)
|
|||
return -EPROTONOSUPPORT;
|
||||
|
||||
err = -ENOMEM;
|
||||
sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, 1);
|
||||
sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto);
|
||||
if (sk == NULL)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -869,7 +869,7 @@ static void llc_sk_init(struct sock* sk)
|
|||
*/
|
||||
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot)
|
||||
{
|
||||
struct sock *sk = sk_alloc(net, family, priority, prot, 1);
|
||||
struct sock *sk = sk_alloc(net, family, priority, prot);
|
||||
|
||||
if (!sk)
|
||||
goto out;
|
||||
|
|
|
@ -396,7 +396,7 @@ static int __netlink_create(struct net *net, struct socket *sock,
|
|||
|
||||
sock->ops = &netlink_ops;
|
||||
|
||||
sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
|
||||
sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -423,7 +423,8 @@ static int nr_create(struct net *net, struct socket *sock, int protocol)
|
|||
if (sock->type != SOCK_SEQPACKET || protocol != 0)
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
if ((sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, 1)) == NULL)
|
||||
sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
|
||||
if (sk == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
nr = nr_sk(sk);
|
||||
|
@ -465,7 +466,8 @@ static struct sock *nr_make_new(struct sock *osk)
|
|||
if (osk->sk_type != SOCK_SEQPACKET)
|
||||
return NULL;
|
||||
|
||||
if ((sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot, 1)) == NULL)
|
||||
sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot);
|
||||
if (sk == NULL)
|
||||
return NULL;
|
||||
|
||||
nr = nr_sk(sk);
|
||||
|
|
|
@ -995,7 +995,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
|
|||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
err = -ENOBUFS;
|
||||
sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, 1);
|
||||
sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
|
||||
if (sk == NULL)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -513,7 +513,8 @@ static int rose_create(struct net *net, struct socket *sock, int protocol)
|
|||
if (sock->type != SOCK_SEQPACKET || protocol != 0)
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
if ((sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL)
|
||||
sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
|
||||
if (sk == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
rose = rose_sk(sk);
|
||||
|
@ -551,7 +552,8 @@ static struct sock *rose_make_new(struct sock *osk)
|
|||
if (osk->sk_type != SOCK_SEQPACKET)
|
||||
return NULL;
|
||||
|
||||
if ((sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL)
|
||||
sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto);
|
||||
if (sk == NULL)
|
||||
return NULL;
|
||||
|
||||
rose = rose_sk(sk);
|
||||
|
|
|
@ -627,7 +627,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol)
|
|||
sock->ops = &rxrpc_rpc_ops;
|
||||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1);
|
||||
sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -631,7 +631,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
|
|||
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
|
||||
struct sctp6_sock *newsctp6sk;
|
||||
|
||||
newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot, 1);
|
||||
newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot);
|
||||
if (!newsk)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -552,7 +552,8 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
|
|||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct inet_sock *newinet;
|
||||
struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, sk->sk_prot, 1);
|
||||
struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL,
|
||||
sk->sk_prot);
|
||||
|
||||
if (!newsk)
|
||||
goto out;
|
||||
|
|
|
@ -201,7 +201,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
|
|||
return -EPROTOTYPE;
|
||||
}
|
||||
|
||||
sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
|
||||
sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
|
||||
if (!sk) {
|
||||
tipc_deleteport(ref);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -602,7 +602,7 @@ static struct sock * unix_create1(struct net *net, struct socket *sock)
|
|||
if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
|
||||
goto out;
|
||||
|
||||
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, 1);
|
||||
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
|
||||
if (!sk)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -472,7 +472,7 @@ static struct proto x25_proto = {
|
|||
static struct sock *x25_alloc_socket(struct net *net)
|
||||
{
|
||||
struct x25_sock *x25;
|
||||
struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, 1);
|
||||
struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto);
|
||||
|
||||
if (!sk)
|
||||
goto out;
|
||||
|
|
Loading…
Reference in a new issue