mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
mm: make read_cache_page synchronous
Ensure pages are uptodate after returning from read_cache_page, which allows us to cut out most of the filesystem-internal PageUptodate calls. I didn't have a great look down the call chains, but this appears to fixes 7 possible use-before uptodate in hfs, 2 in hfsplus, 1 in jfs, a few in ecryptfs, 1 in jffs2, and a possible cleared data overwritten with readpage in block2mtd. All depending on whether the filler is async and/or can return with a !uptodate page. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
714b8171af
commit
6fe6900e1e
24 changed files with 71 additions and 146 deletions
|
@ -40,13 +40,11 @@ struct block2mtd_dev {
|
||||||
static LIST_HEAD(blkmtd_device_list);
|
static LIST_HEAD(blkmtd_device_list);
|
||||||
|
|
||||||
|
|
||||||
static struct page* page_read(struct address_space *mapping, int index)
|
static struct page *page_read(struct address_space *mapping, int index)
|
||||||
{
|
{
|
||||||
filler_t *filler = (filler_t*)mapping->a_ops->readpage;
|
return read_mapping_page(mapping, index, NULL);
|
||||||
return read_cache_page(mapping, index, filler, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* erase a specified part of the device */
|
/* erase a specified part of the device */
|
||||||
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
|
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
|
||||||
{
|
{
|
||||||
|
|
|
@ -194,10 +194,7 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index,
|
||||||
|
|
||||||
page = read_mapping_page(dir->i_mapping, index, &file);
|
page = read_mapping_page(dir->i_mapping, index, &file);
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
|
||||||
kmap(page);
|
kmap(page);
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto fail;
|
|
||||||
if (!PageChecked(page))
|
if (!PageChecked(page))
|
||||||
afs_dir_check_page(dir, page);
|
afs_dir_check_page(dir, page);
|
||||||
if (PageError(page))
|
if (PageError(page))
|
||||||
|
|
|
@ -68,13 +68,11 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
wait_on_page_locked(page);
|
|
||||||
buf = kmap(page);
|
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto out_free;
|
|
||||||
if (PageError(page))
|
if (PageError(page))
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
|
buf = kmap(page);
|
||||||
|
|
||||||
/* examine the symlink's contents */
|
/* examine the symlink's contents */
|
||||||
size = vnode->status.size;
|
size = vnode->status.size;
|
||||||
_debug("symlink to %*.*s", (int) size, (int) size, buf);
|
_debug("symlink to %*.*s", (int) size, (int) size, buf);
|
||||||
|
@ -91,8 +89,8 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
out_free:
|
|
||||||
kunmap(page);
|
kunmap(page);
|
||||||
|
out_free:
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
out:
|
out:
|
||||||
_leave(" = %d", ret);
|
_leave(" = %d", ret);
|
||||||
|
@ -171,8 +169,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
wait_on_page_locked(page);
|
if (PageError(page))
|
||||||
if (!PageUptodate(page) || PageError(page))
|
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
buf = kmap(page);
|
buf = kmap(page);
|
||||||
|
|
|
@ -180,7 +180,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
if (blocknr + i < devsize) {
|
if (blocknr + i < devsize) {
|
||||||
page = read_mapping_page(mapping, blocknr + i, NULL);
|
page = read_mapping_page_async(mapping, blocknr + i,
|
||||||
|
NULL);
|
||||||
/* synchronous error? */
|
/* synchronous error? */
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
page = NULL;
|
page = NULL;
|
||||||
|
|
|
@ -46,7 +46,6 @@ struct kmem_cache *ecryptfs_lower_page_cache;
|
||||||
*/
|
*/
|
||||||
static struct page *ecryptfs_get1page(struct file *file, int index)
|
static struct page *ecryptfs_get1page(struct file *file, int index)
|
||||||
{
|
{
|
||||||
struct page *page;
|
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
|
@ -54,14 +53,7 @@ static struct page *ecryptfs_get1page(struct file *file, int index)
|
||||||
dentry = file->f_path.dentry;
|
dentry = file->f_path.dentry;
|
||||||
inode = dentry->d_inode;
|
inode = dentry->d_inode;
|
||||||
mapping = inode->i_mapping;
|
mapping = inode->i_mapping;
|
||||||
page = read_cache_page(mapping, index,
|
return read_mapping_page(mapping, index, (void *)file);
|
||||||
(filler_t *)mapping->a_ops->readpage,
|
|
||||||
(void *)file);
|
|
||||||
if (IS_ERR(page))
|
|
||||||
goto out;
|
|
||||||
wait_on_page_locked(page);
|
|
||||||
out:
|
|
||||||
return page;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
|
@ -233,7 +225,6 @@ int ecryptfs_do_readpage(struct file *file, struct page *page,
|
||||||
ecryptfs_printk(KERN_ERR, "Error reading from page cache\n");
|
ecryptfs_printk(KERN_ERR, "Error reading from page cache\n");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
wait_on_page_locked(lower_page);
|
|
||||||
page_data = kmap_atomic(page, KM_USER0);
|
page_data = kmap_atomic(page, KM_USER0);
|
||||||
lower_page_data = kmap_atomic(lower_page, KM_USER1);
|
lower_page_data = kmap_atomic(lower_page, KM_USER1);
|
||||||
memcpy(page_data, lower_page_data, PAGE_CACHE_SIZE);
|
memcpy(page_data, lower_page_data, PAGE_CACHE_SIZE);
|
||||||
|
|
|
@ -161,10 +161,7 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n)
|
||||||
struct address_space *mapping = dir->i_mapping;
|
struct address_space *mapping = dir->i_mapping;
|
||||||
struct page *page = read_mapping_page(mapping, n, NULL);
|
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
|
||||||
kmap(page);
|
kmap(page);
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto fail;
|
|
||||||
if (!PageChecked(page))
|
if (!PageChecked(page))
|
||||||
ext2_check_page(page);
|
ext2_check_page(page);
|
||||||
if (PageError(page))
|
if (PageError(page))
|
||||||
|
|
|
@ -74,10 +74,7 @@ vxfs_get_page(struct address_space *mapping, u_long n)
|
||||||
pp = read_mapping_page(mapping, n, NULL);
|
pp = read_mapping_page(mapping, n, NULL);
|
||||||
|
|
||||||
if (!IS_ERR(pp)) {
|
if (!IS_ERR(pp)) {
|
||||||
wait_on_page_locked(pp);
|
|
||||||
kmap(pp);
|
kmap(pp);
|
||||||
if (!PageUptodate(pp))
|
|
||||||
goto fail;
|
|
||||||
/** if (!PageChecked(pp)) **/
|
/** if (!PageChecked(pp)) **/
|
||||||
/** vxfs_check_page(pp); **/
|
/** vxfs_check_page(pp); **/
|
||||||
if (PageError(pp))
|
if (PageError(pp))
|
||||||
|
|
|
@ -65,7 +65,6 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n)
|
||||||
struct address_space *mapping = dir->i_mapping;
|
struct address_space *mapping = dir->i_mapping;
|
||||||
struct page *page = read_mapping_page(mapping, n, NULL);
|
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
|
||||||
kmap(page);
|
kmap(page);
|
||||||
if (!PageUptodate(page))
|
if (!PageUptodate(page))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
12
fs/namei.c
12
fs/namei.c
|
@ -2671,19 +2671,9 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage)
|
||||||
struct address_space *mapping = dentry->d_inode->i_mapping;
|
struct address_space *mapping = dentry->d_inode->i_mapping;
|
||||||
page = read_mapping_page(mapping, 0, NULL);
|
page = read_mapping_page(mapping, 0, NULL);
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
goto sync_fail;
|
return (char*)page;
|
||||||
wait_on_page_locked(page);
|
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto async_fail;
|
|
||||||
*ppage = page;
|
*ppage = page;
|
||||||
return kmap(page);
|
return kmap(page);
|
||||||
|
|
||||||
async_fail:
|
|
||||||
page_cache_release(page);
|
|
||||||
return ERR_PTR(-EIO);
|
|
||||||
|
|
||||||
sync_fail:
|
|
||||||
return (char*)page;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
|
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
|
||||||
|
|
|
@ -334,8 +334,6 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc)
|
||||||
status = PTR_ERR(page);
|
status = PTR_ERR(page);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto read_error;
|
|
||||||
|
|
||||||
/* NOTE: Someone else may have changed the READDIRPLUS flag */
|
/* NOTE: Someone else may have changed the READDIRPLUS flag */
|
||||||
desc->page = page;
|
desc->page = page;
|
||||||
|
@ -349,9 +347,6 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc)
|
||||||
out:
|
out:
|
||||||
dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, status);
|
dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, status);
|
||||||
return status;
|
return status;
|
||||||
read_error:
|
|
||||||
page_cache_release(page);
|
|
||||||
return -EIO;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -61,15 +61,9 @@ static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
|
||||||
err = page;
|
err = page;
|
||||||
goto read_failed;
|
goto read_failed;
|
||||||
}
|
}
|
||||||
if (!PageUptodate(page)) {
|
|
||||||
err = ERR_PTR(-EIO);
|
|
||||||
goto getlink_read_error;
|
|
||||||
}
|
|
||||||
nd_set_link(nd, kmap(page));
|
nd_set_link(nd, kmap(page));
|
||||||
return page;
|
return page;
|
||||||
|
|
||||||
getlink_read_error:
|
|
||||||
page_cache_release(page);
|
|
||||||
read_failed:
|
read_failed:
|
||||||
nd_set_link(nd, err);
|
nd_set_link(nd, err);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -89,9 +89,8 @@ static inline struct page *ntfs_map_page(struct address_space *mapping,
|
||||||
struct page *page = read_mapping_page(mapping, index, NULL);
|
struct page *page = read_mapping_page(mapping, index, NULL);
|
||||||
|
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
|
||||||
kmap(page);
|
kmap(page);
|
||||||
if (PageUptodate(page) && !PageError(page))
|
if (!PageError(page))
|
||||||
return page;
|
return page;
|
||||||
ntfs_unmap_page(page);
|
ntfs_unmap_page(page);
|
||||||
return ERR_PTR(-EIO);
|
return ERR_PTR(-EIO);
|
||||||
|
|
|
@ -2532,14 +2532,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
|
||||||
page = read_mapping_page(mapping, idx, NULL);
|
page = read_mapping_page(mapping, idx, NULL);
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
ntfs_error(vol->sb, "Failed to read first partial "
|
ntfs_error(vol->sb, "Failed to read first partial "
|
||||||
"page (sync error, index 0x%lx).", idx);
|
"page (error, index 0x%lx).", idx);
|
||||||
return PTR_ERR(page);
|
|
||||||
}
|
|
||||||
wait_on_page_locked(page);
|
|
||||||
if (unlikely(!PageUptodate(page))) {
|
|
||||||
ntfs_error(vol->sb, "Failed to read first partial page "
|
|
||||||
"(async error, index 0x%lx).", idx);
|
|
||||||
page_cache_release(page);
|
|
||||||
return PTR_ERR(page);
|
return PTR_ERR(page);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -2602,14 +2595,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
|
||||||
page = read_mapping_page(mapping, idx, NULL);
|
page = read_mapping_page(mapping, idx, NULL);
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
ntfs_error(vol->sb, "Failed to read last partial page "
|
ntfs_error(vol->sb, "Failed to read last partial page "
|
||||||
"(sync error, index 0x%lx).", idx);
|
"(error, index 0x%lx).", idx);
|
||||||
return PTR_ERR(page);
|
|
||||||
}
|
|
||||||
wait_on_page_locked(page);
|
|
||||||
if (unlikely(!PageUptodate(page))) {
|
|
||||||
ntfs_error(vol->sb, "Failed to read last partial page "
|
|
||||||
"(async error, index 0x%lx).", idx);
|
|
||||||
page_cache_release(page);
|
|
||||||
return PTR_ERR(page);
|
return PTR_ERR(page);
|
||||||
}
|
}
|
||||||
kaddr = kmap_atomic(page, KM_USER0);
|
kaddr = kmap_atomic(page, KM_USER0);
|
||||||
|
|
|
@ -236,8 +236,7 @@ do_non_resident_extend:
|
||||||
err = PTR_ERR(page);
|
err = PTR_ERR(page);
|
||||||
goto init_err_out;
|
goto init_err_out;
|
||||||
}
|
}
|
||||||
wait_on_page_locked(page);
|
if (unlikely(PageError(page))) {
|
||||||
if (unlikely(!PageUptodate(page) || PageError(page))) {
|
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
goto init_err_out;
|
goto init_err_out;
|
||||||
|
|
|
@ -2471,7 +2471,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
|
||||||
s64 nr_free = vol->nr_clusters;
|
s64 nr_free = vol->nr_clusters;
|
||||||
u32 *kaddr;
|
u32 *kaddr;
|
||||||
struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
|
struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
|
||||||
filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
pgoff_t index, max_index;
|
pgoff_t index, max_index;
|
||||||
|
|
||||||
|
@ -2494,24 +2493,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
|
||||||
* Read the page from page cache, getting it from backing store
|
* Read the page from page cache, getting it from backing store
|
||||||
* if necessary, and increment the use count.
|
* if necessary, and increment the use count.
|
||||||
*/
|
*/
|
||||||
page = read_cache_page(mapping, index, (filler_t*)readpage,
|
page = read_mapping_page(mapping, index, NULL);
|
||||||
NULL);
|
|
||||||
/* Ignore pages which errored synchronously. */
|
/* Ignore pages which errored synchronously. */
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
ntfs_debug("Sync read_cache_page() error. Skipping "
|
ntfs_debug("read_mapping_page() error. Skipping "
|
||||||
"page (index 0x%lx).", index);
|
"page (index 0x%lx).", index);
|
||||||
nr_free -= PAGE_CACHE_SIZE * 8;
|
nr_free -= PAGE_CACHE_SIZE * 8;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
wait_on_page_locked(page);
|
|
||||||
/* Ignore pages which errored asynchronously. */
|
|
||||||
if (!PageUptodate(page)) {
|
|
||||||
ntfs_debug("Async read_cache_page() error. Skipping "
|
|
||||||
"page (index 0x%lx).", index);
|
|
||||||
page_cache_release(page);
|
|
||||||
nr_free -= PAGE_CACHE_SIZE * 8;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
kaddr = (u32*)kmap_atomic(page, KM_USER0);
|
kaddr = (u32*)kmap_atomic(page, KM_USER0);
|
||||||
/*
|
/*
|
||||||
* For each 4 bytes, subtract the number of set bits. If this
|
* For each 4 bytes, subtract the number of set bits. If this
|
||||||
|
@ -2562,7 +2551,6 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
|
||||||
{
|
{
|
||||||
u32 *kaddr;
|
u32 *kaddr;
|
||||||
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
|
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
|
||||||
filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
pgoff_t index;
|
pgoff_t index;
|
||||||
|
|
||||||
|
@ -2576,24 +2564,14 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
|
||||||
* Read the page from page cache, getting it from backing store
|
* Read the page from page cache, getting it from backing store
|
||||||
* if necessary, and increment the use count.
|
* if necessary, and increment the use count.
|
||||||
*/
|
*/
|
||||||
page = read_cache_page(mapping, index, (filler_t*)readpage,
|
page = read_mapping_page(mapping, index, NULL);
|
||||||
NULL);
|
|
||||||
/* Ignore pages which errored synchronously. */
|
/* Ignore pages which errored synchronously. */
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
ntfs_debug("Sync read_cache_page() error. Skipping "
|
ntfs_debug("read_mapping_page() error. Skipping "
|
||||||
"page (index 0x%lx).", index);
|
"page (index 0x%lx).", index);
|
||||||
nr_free -= PAGE_CACHE_SIZE * 8;
|
nr_free -= PAGE_CACHE_SIZE * 8;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
wait_on_page_locked(page);
|
|
||||||
/* Ignore pages which errored asynchronously. */
|
|
||||||
if (!PageUptodate(page)) {
|
|
||||||
ntfs_debug("Async read_cache_page() error. Skipping "
|
|
||||||
"page (index 0x%lx).", index);
|
|
||||||
page_cache_release(page);
|
|
||||||
nr_free -= PAGE_CACHE_SIZE * 8;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
kaddr = (u32*)kmap_atomic(page, KM_USER0);
|
kaddr = (u32*)kmap_atomic(page, KM_USER0);
|
||||||
/*
|
/*
|
||||||
* For each 4 bytes, subtract the number of set bits. If this
|
* For each 4 bytes, subtract the number of set bits. If this
|
||||||
|
|
|
@ -67,16 +67,9 @@ static char *ocfs2_page_getlink(struct dentry * dentry,
|
||||||
page = read_mapping_page(mapping, 0, NULL);
|
page = read_mapping_page(mapping, 0, NULL);
|
||||||
if (IS_ERR(page))
|
if (IS_ERR(page))
|
||||||
goto sync_fail;
|
goto sync_fail;
|
||||||
wait_on_page_locked(page);
|
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto async_fail;
|
|
||||||
*ppage = page;
|
*ppage = page;
|
||||||
return kmap(page);
|
return kmap(page);
|
||||||
|
|
||||||
async_fail:
|
|
||||||
page_cache_release(page);
|
|
||||||
return ERR_PTR(-EIO);
|
|
||||||
|
|
||||||
sync_fail:
|
sync_fail:
|
||||||
return (char*)page;
|
return (char*)page;
|
||||||
}
|
}
|
||||||
|
|
|
@ -569,9 +569,6 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||||
page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
|
page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
|
||||||
NULL);
|
NULL);
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto fail;
|
|
||||||
if (PageError(page))
|
if (PageError(page))
|
||||||
goto fail;
|
goto fail;
|
||||||
p->v = page;
|
p->v = page;
|
||||||
|
|
|
@ -410,11 +410,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n)
|
||||||
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
mapping_set_gfp_mask(mapping, GFP_NOFS);
|
||||||
page = read_mapping_page(mapping, n, NULL);
|
page = read_mapping_page(mapping, n, NULL);
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
|
||||||
kmap(page);
|
kmap(page);
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
if (PageError(page))
|
if (PageError(page))
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,17 +54,9 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = dir->i_mapping;
|
struct address_space *mapping = dir->i_mapping;
|
||||||
struct page *page = read_mapping_page(mapping, n, NULL);
|
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page))
|
||||||
wait_on_page_locked(page);
|
|
||||||
kmap(page);
|
kmap(page);
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
return page;
|
return page;
|
||||||
|
|
||||||
fail:
|
|
||||||
dir_put_page(page);
|
|
||||||
return ERR_PTR(-EIO);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
|
static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
|
||||||
|
|
|
@ -180,13 +180,9 @@ fail:
|
||||||
static struct page *ufs_get_page(struct inode *dir, unsigned long n)
|
static struct page *ufs_get_page(struct inode *dir, unsigned long n)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = dir->i_mapping;
|
struct address_space *mapping = dir->i_mapping;
|
||||||
struct page *page = read_cache_page(mapping, n,
|
struct page *page = read_mapping_page(mapping, n, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage, NULL);
|
|
||||||
if (!IS_ERR(page)) {
|
if (!IS_ERR(page)) {
|
||||||
wait_on_page_locked(page);
|
|
||||||
kmap(page);
|
kmap(page);
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto fail;
|
|
||||||
if (!PageChecked(page))
|
if (!PageChecked(page))
|
||||||
ufs_check_page(page);
|
ufs_check_page(page);
|
||||||
if (PageError(page))
|
if (PageError(page))
|
||||||
|
|
|
@ -251,13 +251,11 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
|
||||||
|
|
||||||
page = find_lock_page(mapping, index);
|
page = find_lock_page(mapping, index);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
page = read_cache_page(mapping, index,
|
page = read_mapping_page(mapping, index, NULL);
|
||||||
(filler_t*)mapping->a_ops->readpage,
|
|
||||||
NULL);
|
|
||||||
|
|
||||||
if (IS_ERR(page)) {
|
if (IS_ERR(page)) {
|
||||||
printk(KERN_ERR "ufs_change_blocknr: "
|
printk(KERN_ERR "ufs_change_blocknr: "
|
||||||
"read_cache_page error: ino %lu, index: %lu\n",
|
"read_mapping_page error: ino %lu, index: %lu\n",
|
||||||
mapping->host->i_ino, index);
|
mapping->host->i_ino, index);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,12 +95,23 @@ static inline struct page *grab_cache_page(struct address_space *mapping, unsign
|
||||||
|
|
||||||
extern struct page * grab_cache_page_nowait(struct address_space *mapping,
|
extern struct page * grab_cache_page_nowait(struct address_space *mapping,
|
||||||
unsigned long index);
|
unsigned long index);
|
||||||
|
extern struct page * read_cache_page_async(struct address_space *mapping,
|
||||||
|
unsigned long index, filler_t *filler,
|
||||||
|
void *data);
|
||||||
extern struct page * read_cache_page(struct address_space *mapping,
|
extern struct page * read_cache_page(struct address_space *mapping,
|
||||||
unsigned long index, filler_t *filler,
|
unsigned long index, filler_t *filler,
|
||||||
void *data);
|
void *data);
|
||||||
extern int read_cache_pages(struct address_space *mapping,
|
extern int read_cache_pages(struct address_space *mapping,
|
||||||
struct list_head *pages, filler_t *filler, void *data);
|
struct list_head *pages, filler_t *filler, void *data);
|
||||||
|
|
||||||
|
static inline struct page *read_mapping_page_async(
|
||||||
|
struct address_space *mapping,
|
||||||
|
unsigned long index, void *data)
|
||||||
|
{
|
||||||
|
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
|
||||||
|
return read_cache_page_async(mapping, index, filler, data);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct page *read_mapping_page(struct address_space *mapping,
|
static inline struct page *read_mapping_page(struct address_space *mapping,
|
||||||
unsigned long index, void *data)
|
unsigned long index, void *data)
|
||||||
{
|
{
|
||||||
|
|
49
mm/filemap.c
49
mm/filemap.c
|
@ -1726,7 +1726,7 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
|
||||||
EXPORT_SYMBOL(generic_file_mmap);
|
EXPORT_SYMBOL(generic_file_mmap);
|
||||||
EXPORT_SYMBOL(generic_file_readonly_mmap);
|
EXPORT_SYMBOL(generic_file_readonly_mmap);
|
||||||
|
|
||||||
static inline struct page *__read_cache_page(struct address_space *mapping,
|
static struct page *__read_cache_page(struct address_space *mapping,
|
||||||
unsigned long index,
|
unsigned long index,
|
||||||
int (*filler)(void *,struct page*),
|
int (*filler)(void *,struct page*),
|
||||||
void *data)
|
void *data)
|
||||||
|
@ -1763,17 +1763,11 @@ repeat:
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* read_cache_page - read into page cache, fill it if needed
|
* Same as read_cache_page, but don't wait for page to become unlocked
|
||||||
* @mapping: the page's address_space
|
* after submitting it to the filler.
|
||||||
* @index: the page index
|
|
||||||
* @filler: function to perform the read
|
|
||||||
* @data: destination for read data
|
|
||||||
*
|
|
||||||
* Read into the page cache. If a page already exists,
|
|
||||||
* and PageUptodate() is not set, try to fill the page.
|
|
||||||
*/
|
*/
|
||||||
struct page *read_cache_page(struct address_space *mapping,
|
struct page *read_cache_page_async(struct address_space *mapping,
|
||||||
unsigned long index,
|
unsigned long index,
|
||||||
int (*filler)(void *,struct page*),
|
int (*filler)(void *,struct page*),
|
||||||
void *data)
|
void *data)
|
||||||
|
@ -1804,6 +1798,39 @@ retry:
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
page = ERR_PTR(err);
|
page = ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
|
mark_page_accessed(page);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(read_cache_page_async);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* read_cache_page - read into page cache, fill it if needed
|
||||||
|
* @mapping: the page's address_space
|
||||||
|
* @index: the page index
|
||||||
|
* @filler: function to perform the read
|
||||||
|
* @data: destination for read data
|
||||||
|
*
|
||||||
|
* Read into the page cache. If a page already exists, and PageUptodate() is
|
||||||
|
* not set, try to fill the page then wait for it to become unlocked.
|
||||||
|
*
|
||||||
|
* If the page does not get brought uptodate, return -EIO.
|
||||||
|
*/
|
||||||
|
struct page *read_cache_page(struct address_space *mapping,
|
||||||
|
unsigned long index,
|
||||||
|
int (*filler)(void *,struct page*),
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
page = read_cache_page_async(mapping, index, filler, data);
|
||||||
|
if (IS_ERR(page))
|
||||||
|
goto out;
|
||||||
|
wait_on_page_locked(page);
|
||||||
|
if (!PageUptodate(page)) {
|
||||||
|
page_cache_release(page);
|
||||||
|
page = ERR_PTR(-EIO);
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1531,9 +1531,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
|
||||||
error = PTR_ERR(page);
|
error = PTR_ERR(page);
|
||||||
goto bad_swap;
|
goto bad_swap;
|
||||||
}
|
}
|
||||||
wait_on_page_locked(page);
|
|
||||||
if (!PageUptodate(page))
|
|
||||||
goto bad_swap;
|
|
||||||
kmap(page);
|
kmap(page);
|
||||||
swap_header = page_address(page);
|
swap_header = page_address(page);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue