mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 04:06:22 +00:00
NFS: Optimise append writes with holes
If a file is being extended, and we're creating a hole, we might as well declare the entire page to be up to date. This patch significantly improves the write performance for sparse files in the case where lseek(SEEK_END) is used to append several non-contiguous writes at intervals of < PAGE_SIZE. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
b390c2b55c
commit
efc91ed019
2 changed files with 23 additions and 9 deletions
|
@ -344,6 +344,26 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
|
|||
unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
|
||||
int status;
|
||||
|
||||
/*
|
||||
* Zero any uninitialised parts of the page, and then mark the page
|
||||
* as up to date if it turns out that we're extending the file.
|
||||
*/
|
||||
if (!PageUptodate(page)) {
|
||||
unsigned pglen = nfs_page_length(page);
|
||||
unsigned end = offset + len;
|
||||
|
||||
if (pglen == 0) {
|
||||
zero_user_segments(page, 0, offset,
|
||||
end, PAGE_CACHE_SIZE);
|
||||
SetPageUptodate(page);
|
||||
} else if (end >= pglen) {
|
||||
zero_user_segment(page, end, PAGE_CACHE_SIZE);
|
||||
if (offset == 0)
|
||||
SetPageUptodate(page);
|
||||
} else
|
||||
zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
|
||||
}
|
||||
|
||||
lock_kernel();
|
||||
status = nfs_updatepage(file, page, offset, copied);
|
||||
unlock_kernel();
|
||||
|
|
|
@ -616,7 +616,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|
|||
spin_unlock(&inode->i_lock);
|
||||
radix_tree_preload_end();
|
||||
req = new;
|
||||
goto zero_page;
|
||||
goto out;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
|
@ -649,19 +649,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|
|||
req->wb_offset = offset;
|
||||
req->wb_pgbase = offset;
|
||||
req->wb_bytes = max(end, rqend) - req->wb_offset;
|
||||
goto zero_page;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (end > rqend)
|
||||
req->wb_bytes = end - req->wb_offset;
|
||||
|
||||
return req;
|
||||
zero_page:
|
||||
/* If this page might potentially be marked as up to date,
|
||||
* then we need to zero any uninitalised data. */
|
||||
if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
|
||||
&& !PageUptodate(req->wb_page))
|
||||
zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
|
||||
out:
|
||||
return req;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue