mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
mm: dirty balancing for tasks
Based on ideas of Andrew: http://marc.info/?l=linux-kernel&m=102912915020543&w=2 Scale the bdi dirty limit inversly with the tasks dirty rate. This makes heavy writers have a lower dirty limit than the occasional writer. Andrea proposed something similar: http://lwn.net/Articles/152277/ The main disadvantage to his patch is that he uses an unrelated quantity to measure time, which leaves him with a workload dependant tunable. Other than that the two approaches appear quite similar. [akpm@linux-foundation.org: fix warning] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
04fbfdc14e
commit
3e26c149c3
4 changed files with 62 additions and 1 deletions
|
@ -171,6 +171,7 @@ extern struct group_info init_groups;
|
|||
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
|
||||
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
|
||||
}, \
|
||||
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
|
||||
INIT_TRACE_IRQFLAGS \
|
||||
INIT_LOCKDEP \
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ struct sched_param {
|
|||
#include <linux/pid.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/proportions.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/futex.h>
|
||||
|
@ -1149,6 +1150,7 @@ struct task_struct {
|
|||
#ifdef CONFIG_FAULT_INJECTION
|
||||
int make_it_fail;
|
||||
#endif
|
||||
struct prop_local_single dirties;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep;
|
|||
|
||||
void free_task(struct task_struct *tsk)
|
||||
{
|
||||
prop_local_destroy_single(&tsk->dirties);
|
||||
free_thread_info(tsk->stack);
|
||||
rt_mutex_debug_task_free(tsk);
|
||||
free_task_struct(tsk);
|
||||
|
@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
|||
{
|
||||
struct task_struct *tsk;
|
||||
struct thread_info *ti;
|
||||
int err;
|
||||
|
||||
prepare_to_copy(orig);
|
||||
|
||||
|
@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
|||
|
||||
*tsk = *orig;
|
||||
tsk->stack = ti;
|
||||
|
||||
err = prop_local_init_single(&tsk->dirties);
|
||||
if (err) {
|
||||
free_thread_info(ti);
|
||||
free_task_struct(tsk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
setup_thread_stack(tsk, orig);
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
|
|
|
@ -118,6 +118,7 @@ static void background_writeout(unsigned long _min_pages);
|
|||
*
|
||||
*/
|
||||
static struct prop_descriptor vm_completions;
|
||||
static struct prop_descriptor vm_dirties;
|
||||
|
||||
static unsigned long determine_dirtyable_memory(void);
|
||||
|
||||
|
@ -146,6 +147,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
|
|||
if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
|
||||
int shift = calc_period_shift();
|
||||
prop_change_shift(&vm_completions, shift);
|
||||
prop_change_shift(&vm_dirties, shift);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -159,6 +161,11 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
|
|||
__prop_inc_percpu(&vm_completions, &bdi->completions);
|
||||
}
|
||||
|
||||
static inline void task_dirty_inc(struct task_struct *tsk)
|
||||
{
|
||||
prop_inc_single(&vm_dirties, &tsk->dirties);
|
||||
}
|
||||
|
||||
/*
|
||||
* Obtain an accurate fraction of the BDI's portion.
|
||||
*/
|
||||
|
@ -198,6 +205,37 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
|
|||
*pbdi_dirty = min(*pbdi_dirty, avail_dirty);
|
||||
}
|
||||
|
||||
static inline void task_dirties_fraction(struct task_struct *tsk,
|
||||
long *numerator, long *denominator)
|
||||
{
|
||||
prop_fraction_single(&vm_dirties, &tsk->dirties,
|
||||
numerator, denominator);
|
||||
}
|
||||
|
||||
/*
|
||||
* scale the dirty limit
|
||||
*
|
||||
* task specific dirty limit:
|
||||
*
|
||||
* dirty -= (dirty/8) * p_{t}
|
||||
*/
|
||||
void task_dirty_limit(struct task_struct *tsk, long *pdirty)
|
||||
{
|
||||
long numerator, denominator;
|
||||
long dirty = *pdirty;
|
||||
u64 inv = dirty >> 3;
|
||||
|
||||
task_dirties_fraction(tsk, &numerator, &denominator);
|
||||
inv *= numerator;
|
||||
do_div(inv, denominator);
|
||||
|
||||
dirty -= inv;
|
||||
if (dirty < *pdirty/2)
|
||||
dirty = *pdirty/2;
|
||||
|
||||
*pdirty = dirty;
|
||||
}
|
||||
|
||||
/*
|
||||
* Work out the current dirty-memory clamping and background writeout
|
||||
* thresholds.
|
||||
|
@ -304,6 +342,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
|
|||
|
||||
*pbdi_dirty = bdi_dirty;
|
||||
clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
|
||||
task_dirty_limit(current, pbdi_dirty);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -720,6 +759,7 @@ void __init page_writeback_init(void)
|
|||
|
||||
shift = calc_period_shift();
|
||||
prop_descriptor_init(&vm_completions, shift);
|
||||
prop_descriptor_init(&vm_dirties, shift);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -998,7 +1038,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
|
|||
* If the mapping doesn't provide a set_page_dirty a_op, then
|
||||
* just fall through and assume that it wants buffer_heads.
|
||||
*/
|
||||
int fastcall set_page_dirty(struct page *page)
|
||||
static int __set_page_dirty(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page_mapping(page);
|
||||
|
||||
|
@ -1016,6 +1056,14 @@ int fastcall set_page_dirty(struct page *page)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int fastcall set_page_dirty(struct page *page)
|
||||
{
|
||||
int ret = __set_page_dirty(page);
|
||||
if (ret)
|
||||
task_dirty_inc(current);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(set_page_dirty);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue