mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 03:06:10 +00:00
mm: spinlock tree_lock
mapping->tree_lock has no read lockers. convert the lock from an rwlock to a spinlock. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Hugh Dickins <hugh@veritas.com> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a60637c858
commit
19fd623127
12 changed files with 38 additions and 39 deletions
|
@ -706,7 +706,7 @@ static int __set_page_dirty(struct page *page,
|
|||
if (TestSetPageDirty(page))
|
||||
return 0;
|
||||
|
||||
write_lock_irq(&mapping->tree_lock);
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
if (page->mapping) { /* Race with truncate? */
|
||||
WARN_ON_ONCE(warn && !PageUptodate(page));
|
||||
|
||||
|
@ -719,7 +719,7 @@ static int __set_page_dirty(struct page *page,
|
|||
radix_tree_tag_set(&mapping->page_tree,
|
||||
page_index(page), PAGECACHE_TAG_DIRTY);
|
||||
}
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode)
|
|||
INIT_LIST_HEAD(&inode->i_dentry);
|
||||
INIT_LIST_HEAD(&inode->i_devices);
|
||||
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
|
||||
rwlock_init(&inode->i_data.tree_lock);
|
||||
spin_lock_init(&inode->i_data.tree_lock);
|
||||
spin_lock_init(&inode->i_data.i_mmap_lock);
|
||||
INIT_LIST_HEAD(&inode->i_data.private_list);
|
||||
spin_lock_init(&inode->i_data.private_lock);
|
||||
|
|
|
@ -424,9 +424,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) \
|
||||
write_lock_irq(&(mapping)->tree_lock)
|
||||
spin_lock_irq(&(mapping)->tree_lock)
|
||||
#define flush_dcache_mmap_unlock(mapping) \
|
||||
write_unlock_irq(&(mapping)->tree_lock)
|
||||
spin_unlock_irq(&(mapping)->tree_lock)
|
||||
|
||||
#define flush_icache_user_range(vma,page,addr,len) \
|
||||
flush_dcache_page(page)
|
||||
|
|
|
@ -45,9 +45,9 @@ void flush_cache_mm(struct mm_struct *mm);
|
|||
extern void flush_dcache_page(struct page *page);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) \
|
||||
write_lock_irq(&(mapping)->tree_lock)
|
||||
spin_lock_irq(&(mapping)->tree_lock)
|
||||
#define flush_dcache_mmap_unlock(mapping) \
|
||||
write_unlock_irq(&(mapping)->tree_lock)
|
||||
spin_unlock_irq(&(mapping)->tree_lock)
|
||||
|
||||
#define flush_icache_page(vma,page) do { \
|
||||
flush_kernel_dcache_page(page); \
|
||||
|
|
|
@ -499,7 +499,7 @@ struct backing_dev_info;
|
|||
struct address_space {
|
||||
struct inode *host; /* owner: inode, block_device */
|
||||
struct radix_tree_root page_tree; /* radix tree of all pages */
|
||||
rwlock_t tree_lock; /* and rwlock protecting it */
|
||||
spinlock_t tree_lock; /* and lock protecting it */
|
||||
unsigned int i_mmap_writable;/* count VM_SHARED mappings */
|
||||
struct prio_tree_root i_mmap; /* tree of private and shared mappings */
|
||||
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
|
||||
|
|
10
mm/filemap.c
10
mm/filemap.c
|
@ -109,7 +109,7 @@
|
|||
/*
|
||||
* Remove a page from the page cache and free it. Caller has to make
|
||||
* sure the page is locked and that nobody else uses it - or that usage
|
||||
* is safe. The caller must hold a write_lock on the mapping's tree_lock.
|
||||
* is safe. The caller must hold the mapping's tree_lock.
|
||||
*/
|
||||
void __remove_from_page_cache(struct page *page)
|
||||
{
|
||||
|
@ -141,9 +141,9 @@ void remove_from_page_cache(struct page *page)
|
|||
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
||||
write_lock_irq(&mapping->tree_lock);
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
__remove_from_page_cache(page);
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
}
|
||||
|
||||
static int sync_page(void *word)
|
||||
|
@ -469,7 +469,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|||
page->mapping = mapping;
|
||||
page->index = offset;
|
||||
|
||||
write_lock_irq(&mapping->tree_lock);
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
error = radix_tree_insert(&mapping->page_tree, offset, page);
|
||||
if (likely(!error)) {
|
||||
mapping->nrpages++;
|
||||
|
@ -480,7 +480,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|||
page_cache_release(page);
|
||||
}
|
||||
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
radix_tree_preload_end();
|
||||
} else
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
|
|
11
mm/migrate.c
11
mm/migrate.c
|
@ -323,7 +323,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|||
return 0;
|
||||
}
|
||||
|
||||
write_lock_irq(&mapping->tree_lock);
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
|
||||
pslot = radix_tree_lookup_slot(&mapping->page_tree,
|
||||
page_index(page));
|
||||
|
@ -331,12 +331,12 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|||
expected_count = 2 + !!PagePrivate(page);
|
||||
if (page_count(page) != expected_count ||
|
||||
(struct page *)radix_tree_deref_slot(pslot) != page) {
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (!page_freeze_refs(page, expected_count)) {
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -373,10 +373,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|||
__dec_zone_page_state(page, NR_FILE_PAGES);
|
||||
__inc_zone_page_state(newpage, NR_FILE_PAGES);
|
||||
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
if (!PageSwapCache(newpage)) {
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
if (!PageSwapCache(newpage))
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1088,7 +1088,7 @@ int __set_page_dirty_nobuffers(struct page *page)
|
|||
if (!mapping)
|
||||
return 1;
|
||||
|
||||
write_lock_irq(&mapping->tree_lock);
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
mapping2 = page_mapping(page);
|
||||
if (mapping2) { /* Race with truncate? */
|
||||
BUG_ON(mapping2 != mapping);
|
||||
|
@ -1102,7 +1102,7 @@ int __set_page_dirty_nobuffers(struct page *page)
|
|||
radix_tree_tag_set(&mapping->page_tree,
|
||||
page_index(page), PAGECACHE_TAG_DIRTY);
|
||||
}
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
if (mapping->host) {
|
||||
/* !PageAnon && !swapper_space */
|
||||
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
||||
|
@ -1258,7 +1258,7 @@ int test_clear_page_writeback(struct page *page)
|
|||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&mapping->tree_lock, flags);
|
||||
spin_lock_irqsave(&mapping->tree_lock, flags);
|
||||
ret = TestClearPageWriteback(page);
|
||||
if (ret) {
|
||||
radix_tree_tag_clear(&mapping->page_tree,
|
||||
|
@ -1269,7 +1269,7 @@ int test_clear_page_writeback(struct page *page)
|
|||
__bdi_writeout_inc(bdi);
|
||||
}
|
||||
}
|
||||
write_unlock_irqrestore(&mapping->tree_lock, flags);
|
||||
spin_unlock_irqrestore(&mapping->tree_lock, flags);
|
||||
} else {
|
||||
ret = TestClearPageWriteback(page);
|
||||
}
|
||||
|
@ -1287,7 +1287,7 @@ int test_set_page_writeback(struct page *page)
|
|||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&mapping->tree_lock, flags);
|
||||
spin_lock_irqsave(&mapping->tree_lock, flags);
|
||||
ret = TestSetPageWriteback(page);
|
||||
if (!ret) {
|
||||
radix_tree_tag_set(&mapping->page_tree,
|
||||
|
@ -1300,7 +1300,7 @@ int test_set_page_writeback(struct page *page)
|
|||
radix_tree_tag_clear(&mapping->page_tree,
|
||||
page_index(page),
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
write_unlock_irqrestore(&mapping->tree_lock, flags);
|
||||
spin_unlock_irqrestore(&mapping->tree_lock, flags);
|
||||
} else {
|
||||
ret = TestSetPageWriteback(page);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ static struct backing_dev_info swap_backing_dev_info = {
|
|||
|
||||
struct address_space swapper_space = {
|
||||
.page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
|
||||
.tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock),
|
||||
.tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
|
||||
.a_ops = &swap_aops,
|
||||
.i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
|
||||
.backing_dev_info = &swap_backing_dev_info,
|
||||
|
@ -80,7 +80,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
|
|||
SetPageSwapCache(page);
|
||||
set_page_private(page, entry.val);
|
||||
|
||||
write_lock_irq(&swapper_space.tree_lock);
|
||||
spin_lock_irq(&swapper_space.tree_lock);
|
||||
error = radix_tree_insert(&swapper_space.page_tree,
|
||||
entry.val, page);
|
||||
if (likely(!error)) {
|
||||
|
@ -88,7 +88,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
|
|||
__inc_zone_page_state(page, NR_FILE_PAGES);
|
||||
INC_CACHE_INFO(add_total);
|
||||
}
|
||||
write_unlock_irq(&swapper_space.tree_lock);
|
||||
spin_unlock_irq(&swapper_space.tree_lock);
|
||||
radix_tree_preload_end();
|
||||
|
||||
if (unlikely(error)) {
|
||||
|
@ -182,9 +182,9 @@ void delete_from_swap_cache(struct page *page)
|
|||
|
||||
entry.val = page_private(page);
|
||||
|
||||
write_lock_irq(&swapper_space.tree_lock);
|
||||
spin_lock_irq(&swapper_space.tree_lock);
|
||||
__delete_from_swap_cache(page);
|
||||
write_unlock_irq(&swapper_space.tree_lock);
|
||||
spin_unlock_irq(&swapper_space.tree_lock);
|
||||
|
||||
swap_free(entry);
|
||||
page_cache_release(page);
|
||||
|
|
|
@ -369,13 +369,13 @@ int remove_exclusive_swap_page(struct page *page)
|
|||
retval = 0;
|
||||
if (p->swap_map[swp_offset(entry)] == 1) {
|
||||
/* Recheck the page count with the swapcache lock held.. */
|
||||
write_lock_irq(&swapper_space.tree_lock);
|
||||
spin_lock_irq(&swapper_space.tree_lock);
|
||||
if ((page_count(page) == 2) && !PageWriteback(page)) {
|
||||
__delete_from_swap_cache(page);
|
||||
SetPageDirty(page);
|
||||
retval = 1;
|
||||
}
|
||||
write_unlock_irq(&swapper_space.tree_lock);
|
||||
spin_unlock_irq(&swapper_space.tree_lock);
|
||||
}
|
||||
spin_unlock(&swap_lock);
|
||||
|
||||
|
|
|
@ -349,18 +349,18 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
|
|||
if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
|
||||
return 0;
|
||||
|
||||
write_lock_irq(&mapping->tree_lock);
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
if (PageDirty(page))
|
||||
goto failed;
|
||||
|
||||
BUG_ON(PagePrivate(page));
|
||||
__remove_from_page_cache(page);
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
ClearPageUptodate(page);
|
||||
page_cache_release(page); /* pagecache ref */
|
||||
return 1;
|
||||
failed:
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -399,7 +399,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
|
|||
BUG_ON(!PageLocked(page));
|
||||
BUG_ON(mapping != page_mapping(page));
|
||||
|
||||
write_lock_irq(&mapping->tree_lock);
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
/*
|
||||
* The non racy check for a busy page.
|
||||
*
|
||||
|
@ -436,17 +436,17 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
|
|||
if (PageSwapCache(page)) {
|
||||
swp_entry_t swap = { .val = page_private(page) };
|
||||
__delete_from_swap_cache(page);
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
swap_free(swap);
|
||||
} else {
|
||||
__remove_from_page_cache(page);
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
cannot_free:
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue