memcg: atomic ops for page_cgroup->flags

This patch makes page_cgroup->flags to be atomic_ops and define functions
(and macros) to access it.

Before trying to modify memory resource controller, this atomic operation
on flags is necessary.  Most of flags in this patch is for LRU and modfied
under mz->lru_lock but we'll add another flags which is not for LRU soon.
For example, we'll place LOCK bit on flags field.  We need atomic
operation to modify LRU bit without LOCK.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KAMEZAWA Hiroyuki 2008-10-18 20:28:11 -07:00 committed by Linus Torvalds
parent addb9efebb
commit c05555b572

View file

@ -157,12 +157,46 @@ struct page_cgroup {
struct list_head lru; /* per cgroup LRU list */ struct list_head lru; /* per cgroup LRU list */
struct page *page; struct page *page;
struct mem_cgroup *mem_cgroup; struct mem_cgroup *mem_cgroup;
int flags; unsigned long flags;
}; };
#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ enum {
#define PAGE_CGROUP_FLAG_FILE (0x4) /* page is file system backed */ /* flags for mem_cgroup */
#define PAGE_CGROUP_FLAG_UNEVICTABLE (0x8) /* page is unevictableable */ PCG_CACHE, /* charged as cache */
/* flags for LRU placement */
PCG_ACTIVE, /* page is active in this cgroup */
PCG_FILE, /* page is file system backed */
PCG_UNEVICTABLE, /* page is unevictableable */
};
#define TESTPCGFLAG(uname, lname) \
static inline int PageCgroup##uname(struct page_cgroup *pc) \
{ return test_bit(PCG_##lname, &pc->flags); }
#define SETPCGFLAG(uname, lname) \
static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
{ set_bit(PCG_##lname, &pc->flags); }
#define CLEARPCGFLAG(uname, lname) \
static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
{ clear_bit(PCG_##lname, &pc->flags); }
/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
/* LRU management flags (from global-lru definition) */
TESTPCGFLAG(File, FILE)
SETPCGFLAG(File, FILE)
CLEARPCGFLAG(File, FILE)
TESTPCGFLAG(Active, ACTIVE)
SETPCGFLAG(Active, ACTIVE)
CLEARPCGFLAG(Active, ACTIVE)
TESTPCGFLAG(Unevictable, UNEVICTABLE)
SETPCGFLAG(Unevictable, UNEVICTABLE)
CLEARPCGFLAG(Unevictable, UNEVICTABLE)
static int page_cgroup_nid(struct page_cgroup *pc) static int page_cgroup_nid(struct page_cgroup *pc)
{ {
@ -177,15 +211,25 @@ static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
enum charge_type { enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0, MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
MEM_CGROUP_CHARGE_TYPE_MAPPED, MEM_CGROUP_CHARGE_TYPE_MAPPED,
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
NR_CHARGE_TYPE,
};
static const unsigned long
pcg_default_flags[NR_CHARGE_TYPE] = {
((1 << PCG_CACHE) | (1 << PCG_FILE)),
((1 << PCG_ACTIVE)),
((1 << PCG_ACTIVE) | (1 << PCG_CACHE)),
0,
}; };
/* /*
* Always modified under lru lock. Then, not necessary to preempt_disable() * Always modified under lru lock. Then, not necessary to preempt_disable()
*/ */
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
bool charge) struct page_cgroup *pc,
bool charge)
{ {
int val = (charge)? 1 : -1; int val = (charge)? 1 : -1;
struct mem_cgroup_stat *stat = &mem->stat; struct mem_cgroup_stat *stat = &mem->stat;
@ -194,7 +238,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
VM_BUG_ON(!irqs_disabled()); VM_BUG_ON(!irqs_disabled());
cpustat = &stat->cpustat[smp_processor_id()]; cpustat = &stat->cpustat[smp_processor_id()];
if (flags & PAGE_CGROUP_FLAG_CACHE) if (PageCgroupCache(pc))
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val); __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
else else
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val); __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
@ -295,18 +339,18 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
{ {
int lru = LRU_BASE; int lru = LRU_BASE;
if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE) if (PageCgroupUnevictable(pc))
lru = LRU_UNEVICTABLE; lru = LRU_UNEVICTABLE;
else { else {
if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE) if (PageCgroupActive(pc))
lru += LRU_ACTIVE; lru += LRU_ACTIVE;
if (pc->flags & PAGE_CGROUP_FLAG_FILE) if (PageCgroupFile(pc))
lru += LRU_FILE; lru += LRU_FILE;
} }
MEM_CGROUP_ZSTAT(mz, lru) -= 1; MEM_CGROUP_ZSTAT(mz, lru) -= 1;
mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
list_del(&pc->lru); list_del(&pc->lru);
} }
@ -315,27 +359,27 @@ static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
{ {
int lru = LRU_BASE; int lru = LRU_BASE;
if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE) if (PageCgroupUnevictable(pc))
lru = LRU_UNEVICTABLE; lru = LRU_UNEVICTABLE;
else { else {
if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE) if (PageCgroupActive(pc))
lru += LRU_ACTIVE; lru += LRU_ACTIVE;
if (pc->flags & PAGE_CGROUP_FLAG_FILE) if (PageCgroupFile(pc))
lru += LRU_FILE; lru += LRU_FILE;
} }
MEM_CGROUP_ZSTAT(mz, lru) += 1; MEM_CGROUP_ZSTAT(mz, lru) += 1;
list_add(&pc->lru, &mz->lists[lru]); list_add(&pc->lru, &mz->lists[lru]);
mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
} }
static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru) static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
{ {
struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
int active = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; int active = PageCgroupActive(pc);
int file = pc->flags & PAGE_CGROUP_FLAG_FILE; int file = PageCgroupFile(pc);
int unevictable = pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE; int unevictable = PageCgroupUnevictable(pc);
enum lru_list from = unevictable ? LRU_UNEVICTABLE : enum lru_list from = unevictable ? LRU_UNEVICTABLE :
(LRU_FILE * !!file + !!active); (LRU_FILE * !!file + !!active);
@ -343,16 +387,20 @@ static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
return; return;
MEM_CGROUP_ZSTAT(mz, from) -= 1; MEM_CGROUP_ZSTAT(mz, from) -= 1;
/*
* However this is done under mz->lru_lock, another flags, which
* are not related to LRU, will be modified from out-of-lock.
* We have to use atomic set/clear flags.
*/
if (is_unevictable_lru(lru)) { if (is_unevictable_lru(lru)) {
pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; ClearPageCgroupActive(pc);
pc->flags |= PAGE_CGROUP_FLAG_UNEVICTABLE; SetPageCgroupUnevictable(pc);
} else { } else {
if (is_active_lru(lru)) if (is_active_lru(lru))
pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; SetPageCgroupActive(pc);
else else
pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; ClearPageCgroupActive(pc);
pc->flags &= ~PAGE_CGROUP_FLAG_UNEVICTABLE; ClearPageCgroupUnevictable(pc);
} }
MEM_CGROUP_ZSTAT(mz, lru) += 1; MEM_CGROUP_ZSTAT(mz, lru) += 1;
@ -589,16 +637,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
* If a page is accounted as a page cache, insert to inactive list. * If a page is accounted as a page cache, insert to inactive list.
* If anon, insert to active list. * If anon, insert to active list.
*/ */
if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) { pc->flags = pcg_default_flags[ctype];
pc->flags = PAGE_CGROUP_FLAG_CACHE;
if (page_is_file_cache(page))
pc->flags |= PAGE_CGROUP_FLAG_FILE;
else
pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
} else if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
else /* MEM_CGROUP_CHARGE_TYPE_SHMEM */
pc->flags = PAGE_CGROUP_FLAG_CACHE | PAGE_CGROUP_FLAG_ACTIVE;
lock_page_cgroup(page); lock_page_cgroup(page);
if (unlikely(page_get_page_cgroup(page))) { if (unlikely(page_get_page_cgroup(page))) {
@ -677,8 +716,12 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
if (unlikely(!mm)) if (unlikely(!mm))
mm = &init_mm; mm = &init_mm;
return mem_cgroup_charge_common(page, mm, gfp_mask, if (page_is_file_cache(page))
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
else
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
} }
/* /*
@ -706,8 +749,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
VM_BUG_ON(pc->page != page); VM_BUG_ON(pc->page != page);
if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
&& ((pc->flags & PAGE_CGROUP_FLAG_CACHE) && ((PageCgroupCache(pc) || page_mapped(page))))
|| page_mapped(page)))
goto unlock; goto unlock;
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
@ -758,7 +800,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
if (pc) { if (pc) {
mem = pc->mem_cgroup; mem = pc->mem_cgroup;
css_get(&mem->css); css_get(&mem->css);
if (pc->flags & PAGE_CGROUP_FLAG_CACHE) { if (PageCgroupCache(pc)) {
if (page_is_file_cache(page)) if (page_is_file_cache(page))
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else else