mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
vmscan,memcg: reintroduce sc->may_swap
Commit a6dc60f897
("vmscan: rename
sc.may_swap to may_unmap") removed the may_swap flag, but memcg had used
it as a flag for "we need to use swap?", as the name indicate.
And in the current implementation, memcg cannot reclaim mapped file
caches when mem+swap hits the limit.
re-introduce may_swap flag and handle it at get_scan_ratio(). This
patch doesn't influence any scan_control users other than memcg.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
55e5750b3e
commit
2e2e425989
1 changed files with 8 additions and 4 deletions
12
mm/vmscan.c
12
mm/vmscan.c
|
@ -63,6 +63,9 @@ struct scan_control {
|
|||
/* Can mapped pages be reclaimed? */
|
||||
int may_unmap;
|
||||
|
||||
/* Can pages be swapped as part of reclaim? */
|
||||
int may_swap;
|
||||
|
||||
/* This context's SWAP_CLUSTER_MAX. If freeing memory for
|
||||
* suspend, we effectively ignore SWAP_CLUSTER_MAX.
|
||||
* In this context, it doesn't matter that we scan the
|
||||
|
@ -1380,7 +1383,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
|
|||
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
|
||||
|
||||
/* If we have no swap space, do not bother scanning anon pages. */
|
||||
if (nr_swap_pages <= 0) {
|
||||
if (!sc->may_swap || (nr_swap_pages <= 0)) {
|
||||
percent[0] = 0;
|
||||
percent[1] = 100;
|
||||
return;
|
||||
|
@ -1697,6 +1700,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
|||
.may_writepage = !laptop_mode,
|
||||
.swap_cluster_max = SWAP_CLUSTER_MAX,
|
||||
.may_unmap = 1,
|
||||
.may_swap = 1,
|
||||
.swappiness = vm_swappiness,
|
||||
.order = order,
|
||||
.mem_cgroup = NULL,
|
||||
|
@ -1717,6 +1721,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
|
|||
struct scan_control sc = {
|
||||
.may_writepage = !laptop_mode,
|
||||
.may_unmap = 1,
|
||||
.may_swap = !noswap,
|
||||
.swap_cluster_max = SWAP_CLUSTER_MAX,
|
||||
.swappiness = swappiness,
|
||||
.order = 0,
|
||||
|
@ -1726,9 +1731,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
|
|||
};
|
||||
struct zonelist *zonelist;
|
||||
|
||||
if (noswap)
|
||||
sc.may_unmap = 0;
|
||||
|
||||
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
|
||||
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
|
||||
zonelist = NODE_DATA(numa_node_id())->node_zonelists;
|
||||
|
@ -1767,6 +1769,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
|
|||
struct scan_control sc = {
|
||||
.gfp_mask = GFP_KERNEL,
|
||||
.may_unmap = 1,
|
||||
.may_swap = 1,
|
||||
.swap_cluster_max = SWAP_CLUSTER_MAX,
|
||||
.swappiness = vm_swappiness,
|
||||
.order = order,
|
||||
|
@ -2298,6 +2301,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|||
struct scan_control sc = {
|
||||
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
|
||||
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
|
||||
.may_swap = 1,
|
||||
.swap_cluster_max = max_t(unsigned long, nr_pages,
|
||||
SWAP_CLUSTER_MAX),
|
||||
.gfp_mask = gfp_mask,
|
||||
|
|
Loading…
Reference in a new issue