netfilter: nf_conntrack: nf_conntrack_alloc() fixes

When a slab cache uses SLAB_DESTROY_BY_RCU, we must be careful when allocating
objects, since slab allocator could give a freed object still used by lockless
readers.

In particular, nf_conntrack RCU lookups rely on ct->tuplehash[xxx].hnnode.next
being always valid (ie containing a valid 'nulls' value, or a valid pointer to next
object in hash chain.)

kmem_cache_zalloc() setups object with NULL values, but a NULL value is not valid
for ct->tuplehash[xxx].hnnode.next.

Fix is to call kmem_cache_alloc() and do the zeroing ourself.

As spotted by Patrick, we also need to make sure lookup keys are committed to
memory before setting refcount to 1, or a lockless reader could get a reference
on the old version of the object. Its key re-check could then pass the barrier.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
This commit is contained in:
Eric Dumazet 2009-07-16 14:03:40 +02:00 committed by Patrick McHardy
parent aa6a03eb0a
commit 941297f443
2 changed files with 24 additions and 4 deletions

View file

@ -83,11 +83,12 @@ not detect it missed following items in original chain.
obj = kmem_cache_alloc(...); obj = kmem_cache_alloc(...);
lock_chain(); // typically a spin_lock() lock_chain(); // typically a spin_lock()
obj->key = key; obj->key = key;
atomic_inc(&obj->refcnt);
/* /*
* we need to make sure obj->key is updated before obj->next * we need to make sure obj->key is updated before obj->next
* or obj->refcnt
*/ */
smp_wmb(); smp_wmb();
atomic_set(&obj->refcnt, 1);
hlist_add_head_rcu(&obj->obj_node, list); hlist_add_head_rcu(&obj->obj_node, list);
unlock_chain(); // typically a spin_unlock() unlock_chain(); // typically a spin_unlock()
@ -159,6 +160,10 @@ out:
obj = kmem_cache_alloc(cachep); obj = kmem_cache_alloc(cachep);
lock_chain(); // typically a spin_lock() lock_chain(); // typically a spin_lock()
obj->key = key; obj->key = key;
/*
* changes to obj->key must be visible before refcnt one
*/
smp_wmb();
atomic_set(&obj->refcnt, 1); atomic_set(&obj->refcnt, 1);
/* /*
* insert obj in RCU way (readers might be traversing chain) * insert obj in RCU way (readers might be traversing chain)

View file

@ -561,23 +561,38 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
} }
} }
ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp); /*
* Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_DESTROY_BY_RCU.
*/
ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
if (ct == NULL) { if (ct == NULL) {
pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
atomic_dec(&net->ct.count); atomic_dec(&net->ct.count);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
/*
* Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
* and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
*/
memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
spin_lock_init(&ct->lock); spin_lock_init(&ct->lock);
atomic_set(&ct->ct_general.use, 1);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
/* Don't set timer yet: wait for confirmation */ /* Don't set timer yet: wait for confirmation */
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
#ifdef CONFIG_NET_NS #ifdef CONFIG_NET_NS
ct->ct_net = net; ct->ct_net = net;
#endif #endif
/*
* changes to lookup keys must be done before setting refcnt to 1
*/
smp_wmb();
atomic_set(&ct->ct_general.use, 1);
return ct; return ct;
} }
EXPORT_SYMBOL_GPL(nf_conntrack_alloc); EXPORT_SYMBOL_GPL(nf_conntrack_alloc);