mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
mm: pagecache insertion fewer atomics
Setting and clearing the page locked when inserting it into swapcache / pagecache when it has no other references can use non-atomic page flags operations because no other CPU may be operating on it at this time. This saves one atomic operation when inserting a page into pagecache. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9978ad583e
commit
f45840b5c1
2 changed files with 9 additions and 9 deletions
|
@ -299,14 +299,14 @@ extern int __lock_page_killable(struct page *page);
|
|||
extern void __lock_page_nosync(struct page *page);
|
||||
extern void unlock_page(struct page *page);
|
||||
|
||||
static inline void set_page_locked(struct page *page)
|
||||
static inline void __set_page_locked(struct page *page)
|
||||
{
|
||||
set_bit(PG_locked, &page->flags);
|
||||
__set_bit(PG_locked, &page->flags);
|
||||
}
|
||||
|
||||
static inline void clear_page_locked(struct page *page)
|
||||
static inline void __clear_page_locked(struct page *page)
|
||||
{
|
||||
clear_bit(PG_locked, &page->flags);
|
||||
__clear_bit(PG_locked, &page->flags);
|
||||
}
|
||||
|
||||
static inline int trylock_page(struct page *page)
|
||||
|
@ -438,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
|
|||
|
||||
/*
|
||||
* Like add_to_page_cache_locked, but used to add newly allocated pages:
|
||||
* the page is new, so we can just run set_page_locked() against it.
|
||||
* the page is new, so we can just run __set_page_locked() against it.
|
||||
*/
|
||||
static inline int add_to_page_cache(struct page *page,
|
||||
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
|
||||
{
|
||||
int error;
|
||||
|
||||
set_page_locked(page);
|
||||
__set_page_locked(page);
|
||||
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
|
||||
if (unlikely(error))
|
||||
clear_page_locked(page);
|
||||
__clear_page_locked(page);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -303,7 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|||
* re-using the just freed swap entry for an existing page.
|
||||
* May fail (-ENOMEM) if radix-tree node allocation failed.
|
||||
*/
|
||||
set_page_locked(new_page);
|
||||
__set_page_locked(new_page);
|
||||
SetPageSwapBacked(new_page);
|
||||
err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
|
||||
if (likely(!err)) {
|
||||
|
@ -315,7 +315,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
|||
return new_page;
|
||||
}
|
||||
ClearPageSwapBacked(new_page);
|
||||
clear_page_locked(new_page);
|
||||
__clear_page_locked(new_page);
|
||||
swap_free(entry);
|
||||
} while (err != -ENOMEM);
|
||||
|
||||
|
|
Loading…
Reference in a new issue