ipsec: Use RCU-like construct for saved state within a walk

Now that we save states within a walk we need synchronisation
so that the list the saved state is on doesn't disappear from
under us.

As it stands this is done by keeping the state on the list which
is bad because it gets in the way of the management of the state
life-cycle.

An alternative is to make our own pseudo-RCU system where we use
counters to indicate which state can't be freed immediately as
it may be referenced by an ongoing walk when that resumes.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Herbert Xu 2008-09-09 19:58:29 -07:00 committed by David S. Miller
parent dacc62dbf5
commit abb81c4f3c
2 changed files with 44 additions and 23 deletions

View file

@ -120,9 +120,11 @@ extern struct mutex xfrm_cfg_mutex;
/* Full description of state of transformer. */
struct xfrm_state
{
/* Note: bydst is re-used during gc */
struct list_head all;
struct hlist_node bydst;
union {
struct list_head gclist;
struct hlist_node bydst;
};
struct hlist_node bysrc;
struct hlist_node byspi;
@ -1286,16 +1288,9 @@ static inline void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
walk->count = 0;
}
static inline void xfrm_state_walk_done(struct xfrm_state_walk *walk)
{
if (walk->state != NULL) {
xfrm_state_put(walk->state);
walk->state = NULL;
}
}
extern int xfrm_state_walk(struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*), void *);
extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
extern struct xfrm_state *xfrm_state_alloc(void);
extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
struct flowi *fl, struct xfrm_tmpl *tmpl,

View file

@ -59,6 +59,11 @@ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
static unsigned int xfrm_state_num;
static unsigned int xfrm_state_genid;
/* Counter indicating ongoing walk, protected by xfrm_state_lock. */
static unsigned long xfrm_state_walk_ongoing;
/* Counter indicating walk completion, protected by xfrm_cfg_mutex. */
static unsigned long xfrm_state_walk_completed;
static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
@ -191,7 +196,8 @@ static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
static struct work_struct xfrm_state_gc_work;
static HLIST_HEAD(xfrm_state_gc_list);
static LIST_HEAD(xfrm_state_gc_leftovers);
static LIST_HEAD(xfrm_state_gc_list);
static DEFINE_SPINLOCK(xfrm_state_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
@ -403,17 +409,22 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
static void xfrm_state_gc_task(struct work_struct *data)
{
struct xfrm_state *x;
struct hlist_node *entry, *tmp;
struct hlist_head gc_list;
struct xfrm_state *x, *tmp;
unsigned long completed;
mutex_lock(&xfrm_cfg_mutex);
spin_lock_bh(&xfrm_state_gc_lock);
gc_list.first = xfrm_state_gc_list.first;
INIT_HLIST_HEAD(&xfrm_state_gc_list);
list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers);
spin_unlock_bh(&xfrm_state_gc_lock);
hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
completed = xfrm_state_walk_completed;
mutex_unlock(&xfrm_cfg_mutex);
list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) {
if ((long)(x->lastused - completed) > 0)
break;
xfrm_state_gc_destroy(x);
}
wake_up(&km_waitq);
}
@ -540,12 +551,8 @@ void __xfrm_state_destroy(struct xfrm_state *x)
{
WARN_ON(x->km.state != XFRM_STATE_DEAD);
spin_lock_bh(&xfrm_state_lock);
list_del(&x->all);
spin_unlock_bh(&xfrm_state_lock);
spin_lock_bh(&xfrm_state_gc_lock);
hlist_add_head(&x->bydst, &xfrm_state_gc_list);
list_add_tail(&x->gclist, &xfrm_state_gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
schedule_work(&xfrm_state_gc_work);
}
@ -558,6 +565,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
if (x->km.state != XFRM_STATE_DEAD) {
x->km.state = XFRM_STATE_DEAD;
spin_lock(&xfrm_state_lock);
x->lastused = xfrm_state_walk_ongoing;
list_del_rcu(&x->all);
hlist_del(&x->bydst);
hlist_del(&x->bysrc);
if (x->id.spi)
@ -1574,6 +1583,7 @@ int xfrm_state_walk(struct xfrm_state_walk *walk,
if (err) {
xfrm_state_hold(last);
walk->state = last;
xfrm_state_walk_ongoing++;
goto out;
}
}
@ -1588,12 +1598,28 @@ int xfrm_state_walk(struct xfrm_state_walk *walk,
err = func(last, 0, data);
out:
spin_unlock_bh(&xfrm_state_lock);
if (old != NULL)
if (old != NULL) {
xfrm_state_put(old);
xfrm_state_walk_completed++;
if (!list_empty(&xfrm_state_gc_leftovers))
schedule_work(&xfrm_state_gc_work);
}
return err;
}
EXPORT_SYMBOL(xfrm_state_walk);
void xfrm_state_walk_done(struct xfrm_state_walk *walk)
{
if (walk->state != NULL) {
xfrm_state_put(walk->state);
walk->state = NULL;
xfrm_state_walk_completed++;
if (!list_empty(&xfrm_state_gc_leftovers))
schedule_work(&xfrm_state_gc_work);
}
}
EXPORT_SYMBOL(xfrm_state_walk_done);
void xfrm_replay_notify(struct xfrm_state *x, int event)
{