2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-10-03 21:01:26 +00:00
|
|
|
* include/linux/writeback.h
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#ifndef WRITEBACK_H
|
|
|
|
#define WRITEBACK_H
|
|
|
|
|
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-20 21:22:52 +00:00
|
|
|
#include <linux/sched.h>
|
2007-09-21 07:19:54 +00:00
|
|
|
#include <linux/fs.h>
|
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-20 21:22:52 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct backing_dev_info;
|
|
|
|
|
|
|
|
extern spinlock_t inode_lock;
|
|
|
|
extern struct list_head inode_in_use;
|
|
|
|
extern struct list_head inode_unused;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Yes, writeback.h requires sched.h
|
|
|
|
* No, sched.h is not included from here.
|
|
|
|
*/
|
2005-06-27 08:55:12 +00:00
|
|
|
static inline int task_is_pdflush(struct task_struct *task)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-06-27 08:55:12 +00:00
|
|
|
return task->flags & PF_FLUSHER;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-06-27 08:55:12 +00:00
|
|
|
#define current_is_pdflush() task_is_pdflush(current)
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* fs/fs-writeback.c
|
|
|
|
*/
|
|
|
|
enum writeback_sync_modes {
|
|
|
|
WB_SYNC_NONE, /* Don't wait on anything */
|
|
|
|
WB_SYNC_ALL, /* Wait on every mapping */
|
|
|
|
WB_SYNC_HOLD, /* Hold the inode on sb_dirty for sys_sync() */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A control structure which tells the writeback code what to do. These are
|
|
|
|
* always on the stack, and hence need no locking. They are always initialised
|
|
|
|
* in a manner such that unspecified fields are set to zero.
|
|
|
|
*/
|
|
|
|
struct writeback_control {
|
|
|
|
struct backing_dev_info *bdi; /* If !NULL, only write back this
|
|
|
|
queue */
|
|
|
|
enum writeback_sync_modes sync_mode;
|
|
|
|
unsigned long *older_than_this; /* If !NULL, only write back inodes
|
|
|
|
older than this */
|
|
|
|
long nr_to_write; /* Write this many pages, and decrement
|
|
|
|
this for each page written */
|
|
|
|
long pages_skipped; /* Pages which were not written */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For a_ops->writepages(): is start or end are non-zero then this is
|
|
|
|
* a hint that the filesystem need only write out the pages inside that
|
|
|
|
* byterange. The byte at `end' is included in the writeout request.
|
|
|
|
*/
|
[PATCH] writeback: fix range handling
When a writeback_control's `start' and `end' fields are used to
indicate a one-byte-range starting at file offset zero, the required
values of .start=0,.end=0 mean that the ->writepages() implementation
has no way of telling that it is being asked to perform a range
request. Because we're currently overloading (start == 0 && end == 0)
to mean "this is not a write-a-range request".
To make all this sane, the patch changes range of writeback_control.
So caller does: If it is calling ->writepages() to write pages, it
sets range (range_start/end or range_cyclic) always.
And if range_cyclic is true, ->writepages() thinks the range is
cyclic, otherwise it just uses range_start and range_end.
This patch does,
- Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h
-1 is usually ok for range_end (type is long long). But, if someone did,
range_end += val; range_end is "val - 1"
u64val = range_end >> bits; u64val is "~(0ULL)"
or something, they are wrong. So, this adds LLONG_MAX to avoid nasty
things, and uses LLONG_MAX for range_end.
- All callers of ->writepages() sets range_start/end or range_cyclic.
- Fix updates of ->writeback_index. It seems already bit strange.
If it starts at 0 and ended by check of nr_to_write, this last
index may reduce chance to scan end of file. So, this updates
->writeback_index only if range_cyclic is true or whole-file is
scanned.
Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Steven French <sfrench@us.ibm.com>
Cc: "Vladimir V. Saveliev" <vs@namesys.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:26 +00:00
|
|
|
loff_t range_start;
|
|
|
|
loff_t range_end;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-11-16 23:07:01 +00:00
|
|
|
unsigned nonblocking:1; /* Don't get stuck on request queues */
|
|
|
|
unsigned encountered_congestion:1; /* An output: a queue is full */
|
|
|
|
unsigned for_kupdate:1; /* A kupdate writeback */
|
|
|
|
unsigned for_reclaim:1; /* Invoked from the page allocator */
|
|
|
|
unsigned for_writepages:1; /* This is a writepages() call */
|
[PATCH] writeback: fix range handling
When a writeback_control's `start' and `end' fields are used to
indicate a one-byte-range starting at file offset zero, the required
values of .start=0,.end=0 mean that the ->writepages() implementation
has no way of telling that it is being asked to perform a range
request. Because we're currently overloading (start == 0 && end == 0)
to mean "this is not a write-a-range request".
To make all this sane, the patch changes range of writeback_control.
So caller does: If it is calling ->writepages() to write pages, it
sets range (range_start/end or range_cyclic) always.
And if range_cyclic is true, ->writepages() thinks the range is
cyclic, otherwise it just uses range_start and range_end.
This patch does,
- Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h
-1 is usually ok for range_end (type is long long). But, if someone did,
range_end += val; range_end is "val - 1"
u64val = range_end >> bits; u64val is "~(0ULL)"
or something, they are wrong. So, this adds LLONG_MAX to avoid nasty
things, and uses LLONG_MAX for range_end.
- All callers of ->writepages() sets range_start/end or range_cyclic.
- Fix updates of ->writeback_index. It seems already bit strange.
If it starts at 0 and ended by check of nr_to_write, this last
index may reduce chance to scan end of file. So, this updates
->writeback_index only if range_cyclic is true or whole-file is
scanned.
Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Steven French <sfrench@us.ibm.com>
Cc: "Vladimir V. Saveliev" <vs@namesys.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:03:26 +00:00
|
|
|
unsigned range_cyclic:1; /* range_start is cyclic */
|
writeback: speed up writeback of big dirty files
After making dirty a 100M file, the normal behavior is to start the
writeback for all data after 30s delays. But sometimes the following
happens instead:
- after 30s: ~4M
- after 5s: ~4M
- after 5s: all remaining 92M
Some analyze shows that the internal io dispatch queues goes like this:
s_io s_more_io
-------------------------
1) 100M,1K 0
2) 1K 96M
3) 0 96M
1) initial state with a 100M file and a 1K file
2) 4M written, nr_to_write <= 0, so write more
3) 1K written, nr_to_write > 0, no more writes(BUG)
nr_to_write > 0 in (3) fools the upper layer to think that data have all
been written out. The big dirty file is actually still sitting in
s_more_io. We cannot simply splice s_more_io back to s_io as soon as s_io
becomes empty, and let the loop in generic_sync_sb_inodes() continue: this
may starve newly expired inodes in s_dirty. It is also not an option to
draw inodes from both s_more_io and s_dirty, an let the loop go on: this
might lead to live locks, and might also starve other superblocks in sync
time(well kupdate may still starve some superblocks, that's another bug).
We have to return when a full scan of s_io completes. So nr_to_write > 0
does not necessarily mean that "all data are written". This patch
introduces a flag writeback_control.more_io to indicate that more io should
be done. With it the big dirty file no longer has to wait for the next
kupdate invokation 5s later.
In sync_sb_inodes() we only set more_io on super_blocks we actually
visited. This avoids the interaction between two pdflush deamons.
Also in __sync_single_inode() we don't blindly keep requeuing the io if the
filesystem cannot progress. Failing to do so may lead to 100% iowait.
Tested-by: Mike Snitzer <snitzer@gmail.com>
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Michael Rubin <mrubin@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 06:29:36 +00:00
|
|
|
unsigned more_io:1; /* more io to be dispatched */
|
2008-07-11 23:27:31 +00:00
|
|
|
unsigned range_cont:1;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fs/fs-writeback.c
|
|
|
|
*/
|
|
|
|
void writeback_inodes(struct writeback_control *wbc);
|
|
|
|
int inode_wait(void *);
|
|
|
|
void sync_inodes_sb(struct super_block *, int wait);
|
|
|
|
void sync_inodes(int wait);
|
|
|
|
|
|
|
|
/* writeback.h requires fs.h; it, too, is not included from here. */
|
|
|
|
static inline void wait_on_inode(struct inode *inode)
|
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
wait_on_bit(&inode->i_state, __I_LOCK, inode_wait,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
2007-10-17 06:30:44 +00:00
|
|
|
static inline void inode_sync_wait(struct inode *inode)
|
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
wait_on_bit(&inode->i_state, __I_SYNC, inode_wait,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* mm/page-writeback.c
|
|
|
|
*/
|
2005-06-29 03:44:55 +00:00
|
|
|
int wakeup_pdflush(long nr_pages);
|
2005-04-16 22:20:36 +00:00
|
|
|
void laptop_io_completion(void);
|
|
|
|
void laptop_sync_completion(void);
|
2007-03-01 04:13:21 +00:00
|
|
|
void throttle_vm_writeout(gfp_t gfp_mask);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* These are exported to sysctl. */
|
|
|
|
extern int dirty_background_ratio;
|
|
|
|
extern int vm_dirty_ratio;
|
2006-03-24 11:15:48 +00:00
|
|
|
extern int dirty_writeback_interval;
|
|
|
|
extern int dirty_expire_interval;
|
2008-02-05 06:29:20 +00:00
|
|
|
extern int vm_highmem_is_dirtyable;
|
2005-04-16 22:20:36 +00:00
|
|
|
extern int block_dump;
|
|
|
|
extern int laptop_mode;
|
|
|
|
|
2008-05-12 19:21:04 +00:00
|
|
|
extern unsigned long determine_dirtyable_memory(void);
|
|
|
|
|
2007-10-17 06:25:50 +00:00
|
|
|
extern int dirty_ratio_handler(struct ctl_table *table, int write,
|
|
|
|
struct file *filp, void __user *buffer, size_t *lenp,
|
|
|
|
loff_t *ppos);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct ctl_table;
|
|
|
|
struct file;
|
|
|
|
int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
|
|
|
|
void __user *, size_t *, loff_t *);
|
|
|
|
|
2008-04-30 07:54:32 +00:00
|
|
|
void get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
|
|
|
|
struct backing_dev_info *bdi);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
void page_writeback_init(void);
|
2006-03-24 11:18:10 +00:00
|
|
|
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
|
|
|
unsigned long nr_pages_dirtied);
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
balance_dirty_pages_ratelimited(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
balance_dirty_pages_ratelimited_nr(mapping, 1);
|
|
|
|
}
|
|
|
|
|
2007-05-11 05:22:51 +00:00
|
|
|
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
|
|
|
|
void *data);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
|
2007-05-11 05:22:51 +00:00
|
|
|
int generic_writepages(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc);
|
|
|
|
int write_cache_pages(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc, writepage_t writepage,
|
|
|
|
void *data);
|
2005-04-16 22:20:36 +00:00
|
|
|
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
|
|
|
|
int sync_page_range(struct inode *inode, struct address_space *mapping,
|
2006-01-08 09:02:12 +00:00
|
|
|
loff_t pos, loff_t count);
|
|
|
|
int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
|
|
|
|
loff_t pos, loff_t count);
|
2007-10-08 16:54:37 +00:00
|
|
|
void set_page_dirty_balance(struct page *page, int page_mkwrite);
|
2006-09-29 09:01:25 +00:00
|
|
|
void writeback_set_ratelimit(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* pdflush.c */
|
|
|
|
extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
|
|
|
|
read-only. */
|
|
|
|
|
|
|
|
|
|
|
|
#endif /* WRITEBACK_H */
|