mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 19:56:18 +00:00
memcg: coalesce charging via percpu storage
This is a patch for coalescing access to res_counter at charging by percpu caching. At charge, memcg charges 64pages and remember it in percpu cache. Because it's cache, drain/flush if necessary. This version uses public percpu area. 2 benefits for using public percpu area. 1. Sum of stocked charge in the system is limited to # of cpus not to the number of memcg. This shows better synchonization. 2. drain code for flush/cpuhotplug is very easy (and quick) The most important point of this patch is that we never touch res_counter in fast path. The res_counter is system-wide shared counter which is modified very frequently. We shouldn't touch it as far as we can for avoiding false sharing. On x86-64 8cpu server, I tested overheads of memcg at page fault by running a program which does map/fault/unmap in a loop. Running a task per a cpu by taskset and see sum of the number of page faults in 60secs. [without memcg config] 40156968 page-faults # 0.085 M/sec ( +- 0.046% ) 27.67 cache-miss/faults [root cgroup] 36659599 page-faults # 0.077 M/sec ( +- 0.247% ) 31.58 cache miss/faults [in a child cgroup] 18444157 page-faults # 0.039 M/sec ( +- 0.133% ) 69.96 cache miss/faults [ + coalescing uncharge patch] 27133719 page-faults # 0.057 M/sec ( +- 0.155% ) 47.16 cache miss/faults [ + coalescing uncharge patch + this patch ] 34224709 page-faults # 0.072 M/sec ( +- 0.173% ) 34.69 cache miss/faults Changelog (since Oct/2): - updated comments - replaced get_cpu_var() with __get_cpu_var() if possible. - removed mutex for system-wide drain. adds a counter instead of it. - removed CONFIG_HOTPLUG_CPU Changelog (old): - rebased onto the latest mmotm - moved charge size check before __GFP_WAIT check for avoiding unnecesary - added asynchronous flush routine. - fixed bugs pointed out by Nishimura-san. [akpm@linux-foundation.org: tweak comments] [nishimura@mxp.nes.nec.co.jp: don't do INIT_WORK() repeatedly against the same work_struct] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
569b846df5
commit
cdec2e4265
1 changed files with 156 additions and 6 deletions
162
mm/memcontrol.c
162
mm/memcontrol.c
|
@ -38,6 +38,7 @@
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/mm_inline.h>
|
#include <linux/mm_inline.h>
|
||||||
#include <linux/page_cgroup.h>
|
#include <linux/page_cgroup.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
@ -275,6 +276,7 @@ enum charge_type {
|
||||||
static void mem_cgroup_get(struct mem_cgroup *mem);
|
static void mem_cgroup_get(struct mem_cgroup *mem);
|
||||||
static void mem_cgroup_put(struct mem_cgroup *mem);
|
static void mem_cgroup_put(struct mem_cgroup *mem);
|
||||||
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
|
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
|
||||||
|
static void drain_all_stock_async(void);
|
||||||
|
|
||||||
static struct mem_cgroup_per_zone *
|
static struct mem_cgroup_per_zone *
|
||||||
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
|
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
|
||||||
|
@ -1137,6 +1139,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
|
||||||
victim = mem_cgroup_select_victim(root_mem);
|
victim = mem_cgroup_select_victim(root_mem);
|
||||||
if (victim == root_mem) {
|
if (victim == root_mem) {
|
||||||
loop++;
|
loop++;
|
||||||
|
if (loop >= 1)
|
||||||
|
drain_all_stock_async();
|
||||||
if (loop >= 2) {
|
if (loop >= 2) {
|
||||||
/*
|
/*
|
||||||
* If we have not been able to reclaim
|
* If we have not been able to reclaim
|
||||||
|
@ -1258,6 +1262,133 @@ done:
|
||||||
unlock_page_cgroup(pc);
|
unlock_page_cgroup(pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* size of first charge trial. "32" comes from vmscan.c's magic value.
|
||||||
|
* TODO: maybe necessary to use big numbers in big irons.
|
||||||
|
*/
|
||||||
|
#define CHARGE_SIZE (32 * PAGE_SIZE)
|
||||||
|
struct memcg_stock_pcp {
|
||||||
|
struct mem_cgroup *cached; /* this never be root cgroup */
|
||||||
|
int charge;
|
||||||
|
struct work_struct work;
|
||||||
|
};
|
||||||
|
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
|
||||||
|
static atomic_t memcg_drain_count;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
|
||||||
|
* from local stock and true is returned. If the stock is 0 or charges from a
|
||||||
|
* cgroup which is not current target, returns false. This stock will be
|
||||||
|
* refilled.
|
||||||
|
*/
|
||||||
|
static bool consume_stock(struct mem_cgroup *mem)
|
||||||
|
{
|
||||||
|
struct memcg_stock_pcp *stock;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
|
stock = &get_cpu_var(memcg_stock);
|
||||||
|
if (mem == stock->cached && stock->charge)
|
||||||
|
stock->charge -= PAGE_SIZE;
|
||||||
|
else /* need to call res_counter_charge */
|
||||||
|
ret = false;
|
||||||
|
put_cpu_var(memcg_stock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns stocks cached in percpu to res_counter and reset cached information.
|
||||||
|
*/
|
||||||
|
static void drain_stock(struct memcg_stock_pcp *stock)
|
||||||
|
{
|
||||||
|
struct mem_cgroup *old = stock->cached;
|
||||||
|
|
||||||
|
if (stock->charge) {
|
||||||
|
res_counter_uncharge(&old->res, stock->charge);
|
||||||
|
if (do_swap_account)
|
||||||
|
res_counter_uncharge(&old->memsw, stock->charge);
|
||||||
|
}
|
||||||
|
stock->cached = NULL;
|
||||||
|
stock->charge = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This must be called under preempt disabled or must be called by
|
||||||
|
* a thread which is pinned to local cpu.
|
||||||
|
*/
|
||||||
|
static void drain_local_stock(struct work_struct *dummy)
|
||||||
|
{
|
||||||
|
struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
|
||||||
|
drain_stock(stock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cache charges(val) which is from res_counter, to local per_cpu area.
|
||||||
|
* This will be consumed by consumt_stock() function, later.
|
||||||
|
*/
|
||||||
|
static void refill_stock(struct mem_cgroup *mem, int val)
|
||||||
|
{
|
||||||
|
struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
|
||||||
|
|
||||||
|
if (stock->cached != mem) { /* reset if necessary */
|
||||||
|
drain_stock(stock);
|
||||||
|
stock->cached = mem;
|
||||||
|
}
|
||||||
|
stock->charge += val;
|
||||||
|
put_cpu_var(memcg_stock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Tries to drain stocked charges in other cpus. This function is asynchronous
|
||||||
|
* and just put a work per cpu for draining localy on each cpu. Caller can
|
||||||
|
* expects some charges will be back to res_counter later but cannot wait for
|
||||||
|
* it.
|
||||||
|
*/
|
||||||
|
static void drain_all_stock_async(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
/* This function is for scheduling "drain" in asynchronous way.
|
||||||
|
* The result of "drain" is not directly handled by callers. Then,
|
||||||
|
* if someone is calling drain, we don't have to call drain more.
|
||||||
|
* Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
|
||||||
|
* there is a race. We just do loose check here.
|
||||||
|
*/
|
||||||
|
if (atomic_read(&memcg_drain_count))
|
||||||
|
return;
|
||||||
|
/* Notify other cpus that system-wide "drain" is running */
|
||||||
|
atomic_inc(&memcg_drain_count);
|
||||||
|
get_online_cpus();
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
|
||||||
|
schedule_work_on(cpu, &stock->work);
|
||||||
|
}
|
||||||
|
put_online_cpus();
|
||||||
|
atomic_dec(&memcg_drain_count);
|
||||||
|
/* We don't wait for flush_work */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This is a synchronous drain interface. */
|
||||||
|
static void drain_all_stock_sync(void)
|
||||||
|
{
|
||||||
|
/* called when force_empty is called */
|
||||||
|
atomic_inc(&memcg_drain_count);
|
||||||
|
schedule_on_each_cpu(drain_local_stock);
|
||||||
|
atomic_dec(&memcg_drain_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
|
||||||
|
unsigned long action,
|
||||||
|
void *hcpu)
|
||||||
|
{
|
||||||
|
int cpu = (unsigned long)hcpu;
|
||||||
|
struct memcg_stock_pcp *stock;
|
||||||
|
|
||||||
|
if (action != CPU_DEAD)
|
||||||
|
return NOTIFY_OK;
|
||||||
|
stock = &per_cpu(memcg_stock, cpu);
|
||||||
|
drain_stock(stock);
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unlike exported interface, "oom" parameter is added. if oom==true,
|
* Unlike exported interface, "oom" parameter is added. if oom==true,
|
||||||
* oom-killer can be invoked.
|
* oom-killer can be invoked.
|
||||||
|
@ -1269,6 +1400,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
||||||
struct mem_cgroup *mem, *mem_over_limit;
|
struct mem_cgroup *mem, *mem_over_limit;
|
||||||
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
|
||||||
struct res_counter *fail_res;
|
struct res_counter *fail_res;
|
||||||
|
int csize = CHARGE_SIZE;
|
||||||
|
|
||||||
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
|
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
|
||||||
/* Don't account this! */
|
/* Don't account this! */
|
||||||
|
@ -1293,23 +1425,25 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
VM_BUG_ON(css_is_removed(&mem->css));
|
VM_BUG_ON(css_is_removed(&mem->css));
|
||||||
|
if (mem_cgroup_is_root(mem))
|
||||||
|
goto done;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
|
|
||||||
if (mem_cgroup_is_root(mem))
|
if (consume_stock(mem))
|
||||||
goto done;
|
goto charged;
|
||||||
ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
|
|
||||||
|
ret = res_counter_charge(&mem->res, csize, &fail_res);
|
||||||
if (likely(!ret)) {
|
if (likely(!ret)) {
|
||||||
if (!do_swap_account)
|
if (!do_swap_account)
|
||||||
break;
|
break;
|
||||||
ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
|
ret = res_counter_charge(&mem->memsw, csize, &fail_res);
|
||||||
&fail_res);
|
|
||||||
if (likely(!ret))
|
if (likely(!ret))
|
||||||
break;
|
break;
|
||||||
/* mem+swap counter fails */
|
/* mem+swap counter fails */
|
||||||
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
res_counter_uncharge(&mem->res, csize);
|
||||||
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
|
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
|
||||||
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
|
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
|
||||||
memsw);
|
memsw);
|
||||||
|
@ -1318,6 +1452,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
||||||
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
|
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
|
||||||
res);
|
res);
|
||||||
|
|
||||||
|
/* reduce request size and retry */
|
||||||
|
if (csize > PAGE_SIZE) {
|
||||||
|
csize = PAGE_SIZE;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (!(gfp_mask & __GFP_WAIT))
|
if (!(gfp_mask & __GFP_WAIT))
|
||||||
goto nomem;
|
goto nomem;
|
||||||
|
|
||||||
|
@ -1347,6 +1486,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
||||||
goto nomem;
|
goto nomem;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (csize > PAGE_SIZE)
|
||||||
|
refill_stock(mem, csize - PAGE_SIZE);
|
||||||
|
charged:
|
||||||
/*
|
/*
|
||||||
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
|
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
|
||||||
* if they exceeds softlimit.
|
* if they exceeds softlimit.
|
||||||
|
@ -2469,6 +2611,7 @@ move_account:
|
||||||
goto out;
|
goto out;
|
||||||
/* This is for making all *used* pages to be on LRU. */
|
/* This is for making all *used* pages to be on LRU. */
|
||||||
lru_add_drain_all();
|
lru_add_drain_all();
|
||||||
|
drain_all_stock_sync();
|
||||||
ret = 0;
|
ret = 0;
|
||||||
for_each_node_state(node, N_HIGH_MEMORY) {
|
for_each_node_state(node, N_HIGH_MEMORY) {
|
||||||
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
|
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
|
||||||
|
@ -3183,11 +3326,18 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||||
|
|
||||||
/* root ? */
|
/* root ? */
|
||||||
if (cont->parent == NULL) {
|
if (cont->parent == NULL) {
|
||||||
|
int cpu;
|
||||||
enable_swap_cgroup();
|
enable_swap_cgroup();
|
||||||
parent = NULL;
|
parent = NULL;
|
||||||
root_mem_cgroup = mem;
|
root_mem_cgroup = mem;
|
||||||
if (mem_cgroup_soft_limit_tree_init())
|
if (mem_cgroup_soft_limit_tree_init())
|
||||||
goto free_out;
|
goto free_out;
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct memcg_stock_pcp *stock =
|
||||||
|
&per_cpu(memcg_stock, cpu);
|
||||||
|
INIT_WORK(&stock->work, drain_local_stock);
|
||||||
|
}
|
||||||
|
hotcpu_notifier(memcg_stock_cpu_callback, 0);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
parent = mem_cgroup_from_cont(cont->parent);
|
parent = mem_cgroup_from_cont(cont->parent);
|
||||||
|
|
Loading…
Reference in a new issue