mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 04:06:22 +00:00
e9995ef978
A side-effect of making ksm pages swappable is that they have to be placed on the LRUs: which then exposes them to isolate_lru_page() and hence to page migration. Add rmap_walk() for remove_migration_ptes() to use: rmap_walk_anon() and rmap_walk_file() in rmap.c, but rmap_walk_ksm() in ksm.c. Perhaps some consolidation with existing code is possible, but don't attempt that yet (try_to_unmap needs to handle nonlinears, but migration pte removal does not). rmap_walk() is sadly less general than it appears: rmap_walk_anon(), like remove_anon_migration_ptes() which it replaces, avoids calling page_lock_anon_vma(), because that includes a page_mapped() test which fails when all migration ptes are in place. That was valid when NUMA page migration was introduced (holding mmap_sem provided the missing guarantee that anon_vma's slab had not already been destroyed), but I believe not valid in the memory hotremove case added since. For now do the same as before, and consider the best way to fix that unlikely race later on. When fixed, we can probably use rmap_walk() on hwpoisoned ksm pages too: for now, they remain among hwpoison's various exceptions (its PageKsm test comes before the page is locked, but its page_lock_anon_vma fails safely if an anon gets upgraded). Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Chris Wright <chrisw@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
205 lines
5.6 KiB
C
205 lines
5.6 KiB
C
#ifndef _LINUX_RMAP_H
|
|
#define _LINUX_RMAP_H
|
|
/*
|
|
* Declarations for Reverse Mapping functions in mm/rmap.c
|
|
*/
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/memcontrol.h>
|
|
|
|
/*
|
|
* The anon_vma heads a list of private "related" vmas, to scan if
|
|
* an anonymous page pointing to this anon_vma needs to be unmapped:
|
|
* the vmas on the list will be related by forking, or by splitting.
|
|
*
|
|
* Since vmas come and go as they are split and merged (particularly
|
|
* in mprotect), the mapping field of an anonymous page cannot point
|
|
* directly to a vma: instead it points to an anon_vma, on whose list
|
|
* the related vmas can be easily linked or unlinked.
|
|
*
|
|
* After unlinking the last vma on the list, we must garbage collect
|
|
* the anon_vma object itself: we're guaranteed no page can be
|
|
* pointing to this anon_vma once its vma list is empty.
|
|
*/
|
|
struct anon_vma {
|
|
spinlock_t lock; /* Serialize access to vma list */
|
|
#ifdef CONFIG_KSM
|
|
atomic_t ksm_refcount;
|
|
#endif
|
|
/*
|
|
* NOTE: the LSB of the head.next is set by
|
|
* mm_take_all_locks() _after_ taking the above lock. So the
|
|
* head must only be read/written after taking the above lock
|
|
* to be sure to see a valid next pointer. The LSB bit itself
|
|
* is serialized by a system wide lock only visible to
|
|
* mm_take_all_locks() (mm_all_locks_mutex).
|
|
*/
|
|
struct list_head head; /* List of private "related" vmas */
|
|
};
|
|
|
|
#ifdef CONFIG_MMU
|
|
#ifdef CONFIG_KSM
|
|
static inline void ksm_refcount_init(struct anon_vma *anon_vma)
|
|
{
|
|
atomic_set(&anon_vma->ksm_refcount, 0);
|
|
}
|
|
|
|
static inline int ksm_refcount(struct anon_vma *anon_vma)
|
|
{
|
|
return atomic_read(&anon_vma->ksm_refcount);
|
|
}
|
|
#else
|
|
static inline void ksm_refcount_init(struct anon_vma *anon_vma)
|
|
{
|
|
}
|
|
|
|
static inline int ksm_refcount(struct anon_vma *anon_vma)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_KSM */
|
|
|
|
static inline struct anon_vma *page_anon_vma(struct page *page)
|
|
{
|
|
if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
|
|
PAGE_MAPPING_ANON)
|
|
return NULL;
|
|
return page_rmapping(page);
|
|
}
|
|
|
|
static inline void anon_vma_lock(struct vm_area_struct *vma)
|
|
{
|
|
struct anon_vma *anon_vma = vma->anon_vma;
|
|
if (anon_vma)
|
|
spin_lock(&anon_vma->lock);
|
|
}
|
|
|
|
static inline void anon_vma_unlock(struct vm_area_struct *vma)
|
|
{
|
|
struct anon_vma *anon_vma = vma->anon_vma;
|
|
if (anon_vma)
|
|
spin_unlock(&anon_vma->lock);
|
|
}
|
|
|
|
/*
|
|
* anon_vma helper functions.
|
|
*/
|
|
void anon_vma_init(void); /* create anon_vma_cachep */
|
|
int anon_vma_prepare(struct vm_area_struct *);
|
|
void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
|
|
void anon_vma_unlink(struct vm_area_struct *);
|
|
void anon_vma_link(struct vm_area_struct *);
|
|
void __anon_vma_link(struct vm_area_struct *);
|
|
void anon_vma_free(struct anon_vma *);
|
|
|
|
/*
|
|
* rmap interfaces called when adding or removing pte of page
|
|
*/
|
|
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
|
|
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
|
|
void page_add_file_rmap(struct page *);
|
|
void page_remove_rmap(struct page *);
|
|
|
|
static inline void page_dup_rmap(struct page *page)
|
|
{
|
|
atomic_inc(&page->_mapcount);
|
|
}
|
|
|
|
/*
|
|
* Called from mm/vmscan.c to handle paging out
|
|
*/
|
|
int page_referenced(struct page *, int is_locked,
|
|
struct mem_cgroup *cnt, unsigned long *vm_flags);
|
|
int page_referenced_one(struct page *, struct vm_area_struct *,
|
|
unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
|
|
|
|
enum ttu_flags {
|
|
TTU_UNMAP = 0, /* unmap mode */
|
|
TTU_MIGRATION = 1, /* migration mode */
|
|
TTU_MUNLOCK = 2, /* munlock mode */
|
|
TTU_ACTION_MASK = 0xff,
|
|
|
|
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
|
|
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
|
|
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
|
|
};
|
|
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
|
|
|
|
int try_to_unmap(struct page *, enum ttu_flags flags);
|
|
int try_to_unmap_one(struct page *, struct vm_area_struct *,
|
|
unsigned long address, enum ttu_flags flags);
|
|
|
|
/*
|
|
* Called from mm/filemap_xip.c to unmap empty zero page
|
|
*/
|
|
pte_t *page_check_address(struct page *, struct mm_struct *,
|
|
unsigned long, spinlock_t **, int);
|
|
|
|
/*
|
|
* Used by swapoff to help locate where page is expected in vma.
|
|
*/
|
|
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
|
|
|
|
/*
|
|
* Cleans the PTEs of shared mappings.
|
|
* (and since clean PTEs should also be readonly, write protects them too)
|
|
*
|
|
* returns the number of cleaned PTEs.
|
|
*/
|
|
int page_mkclean(struct page *);
|
|
|
|
/*
|
|
* called in munlock()/munmap() path to check for other vmas holding
|
|
* the page mlocked.
|
|
*/
|
|
int try_to_munlock(struct page *);
|
|
|
|
/*
|
|
* Called by memory-failure.c to kill processes.
|
|
*/
|
|
struct anon_vma *page_lock_anon_vma(struct page *page);
|
|
void page_unlock_anon_vma(struct anon_vma *anon_vma);
|
|
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
|
|
|
|
/*
|
|
* Called by migrate.c to remove migration ptes, but might be used more later.
|
|
*/
|
|
int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
|
|
struct vm_area_struct *, unsigned long, void *), void *arg);
|
|
|
|
#else /* !CONFIG_MMU */
|
|
|
|
#define anon_vma_init() do {} while (0)
|
|
#define anon_vma_prepare(vma) (0)
|
|
#define anon_vma_link(vma) do {} while (0)
|
|
|
|
static inline int page_referenced(struct page *page, int is_locked,
|
|
struct mem_cgroup *cnt,
|
|
unsigned long *vm_flags)
|
|
{
|
|
*vm_flags = 0;
|
|
return TestClearPageReferenced(page);
|
|
}
|
|
|
|
#define try_to_unmap(page, refs) SWAP_FAIL
|
|
|
|
static inline int page_mkclean(struct page *page)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
/*
|
|
* Return values of try_to_unmap
|
|
*/
|
|
#define SWAP_SUCCESS 0
|
|
#define SWAP_AGAIN 1
|
|
#define SWAP_FAIL 2
|
|
#define SWAP_MLOCK 3
|
|
|
|
#endif /* _LINUX_RMAP_H */
|