mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
[PATCH] file: kill unnecessary timer in fdtable_defer
free_fdtable_rc() schedules timer to reschedule fddef->wq if schedule_work() on it returns 0. However, schedule_work() guarantees that the target work is executed at least once after the scheduling regardless of its return value. 0 return simply means that the work was already pending and thus no further action was required. Another problem is that it used contant '5' as @expires argument to mod_timer(). Kill unnecessary fddef->timer. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: Dipankar Sarma <dipankar@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
e59e2ae2c2
commit
593be07ae8
1 changed files with 2 additions and 27 deletions
29
fs/file.c
29
fs/file.c
|
@ -21,7 +21,6 @@
|
|||
struct fdtable_defer {
|
||||
spinlock_t lock;
|
||||
struct work_struct wq;
|
||||
struct timer_list timer;
|
||||
struct fdtable *next;
|
||||
};
|
||||
|
||||
|
@ -75,22 +74,6 @@ static void __free_fdtable(struct fdtable *fdt)
|
|||
kfree(fdt);
|
||||
}
|
||||
|
||||
static void fdtable_timer(unsigned long data)
|
||||
{
|
||||
struct fdtable_defer *fddef = (struct fdtable_defer *)data;
|
||||
|
||||
spin_lock(&fddef->lock);
|
||||
/*
|
||||
* If someone already emptied the queue return.
|
||||
*/
|
||||
if (!fddef->next)
|
||||
goto out;
|
||||
if (!schedule_work(&fddef->wq))
|
||||
mod_timer(&fddef->timer, 5);
|
||||
out:
|
||||
spin_unlock(&fddef->lock);
|
||||
}
|
||||
|
||||
static void free_fdtable_work(struct work_struct *work)
|
||||
{
|
||||
struct fdtable_defer *f =
|
||||
|
@ -144,13 +127,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
|
|||
spin_lock(&fddef->lock);
|
||||
fdt->next = fddef->next;
|
||||
fddef->next = fdt;
|
||||
/*
|
||||
* vmallocs are handled from the workqueue context.
|
||||
* If the per-cpu workqueue is running, then we
|
||||
* defer work scheduling through a timer.
|
||||
*/
|
||||
if (!schedule_work(&fddef->wq))
|
||||
mod_timer(&fddef->timer, 5);
|
||||
/* vmallocs are handled from the workqueue context */
|
||||
schedule_work(&fddef->wq);
|
||||
spin_unlock(&fddef->lock);
|
||||
put_cpu_var(fdtable_defer_list);
|
||||
}
|
||||
|
@ -354,9 +332,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
|
|||
struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
|
||||
spin_lock_init(&fddef->lock);
|
||||
INIT_WORK(&fddef->wq, free_fdtable_work);
|
||||
init_timer(&fddef->timer);
|
||||
fddef->timer.data = (unsigned long)fddef;
|
||||
fddef->timer.function = fdtable_timer;
|
||||
fddef->next = NULL;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue