mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
mm: oom analysis: add shmem vmstat
Recently we encountered OOM problems due to memory use of the GEM cache. Generally a large amuont of Shmem/Tmpfs pages tend to create a memory shortage problem. We often use the following calculation to determine the amount of shmem pages: shmem = NR_ACTIVE_ANON + NR_INACTIVE_ANON - NR_ANON_PAGES however the expression does not consider isolated and mlocked pages. This patch adds explicit accounting for pages used by shmem and tmpfs. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Acked-by: Wu Fengguang <fengguang.wu@intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c6a7f5728a
commit
4b02108ac1
7 changed files with 18 additions and 3 deletions
|
@ -85,6 +85,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
|
|||
"Node %d FilePages: %8lu kB\n"
|
||||
"Node %d Mapped: %8lu kB\n"
|
||||
"Node %d AnonPages: %8lu kB\n"
|
||||
"Node %d Shmem: %8lu kB\n"
|
||||
"Node %d KernelStack: %8lu kB\n"
|
||||
"Node %d PageTables: %8lu kB\n"
|
||||
"Node %d NFS_Unstable: %8lu kB\n"
|
||||
|
@ -117,6 +118,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
|
|||
nid, K(node_page_state(nid, NR_FILE_PAGES)),
|
||||
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
|
||||
nid, K(node_page_state(nid, NR_ANON_PAGES)),
|
||||
nid, K(node_page_state(nid, NR_SHMEM)),
|
||||
nid, node_page_state(nid, NR_KERNEL_STACK) *
|
||||
THREAD_SIZE / 1024,
|
||||
nid, K(node_page_state(nid, NR_PAGETABLE)),
|
||||
|
|
|
@ -81,6 +81,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
|||
"Writeback: %8lu kB\n"
|
||||
"AnonPages: %8lu kB\n"
|
||||
"Mapped: %8lu kB\n"
|
||||
"Shmem: %8lu kB\n"
|
||||
"Slab: %8lu kB\n"
|
||||
"SReclaimable: %8lu kB\n"
|
||||
"SUnreclaim: %8lu kB\n"
|
||||
|
@ -125,6 +126,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
|||
K(global_page_state(NR_WRITEBACK)),
|
||||
K(global_page_state(NR_ANON_PAGES)),
|
||||
K(global_page_state(NR_FILE_MAPPED)),
|
||||
K(global_page_state(NR_SHMEM)),
|
||||
K(global_page_state(NR_SLAB_RECLAIMABLE) +
|
||||
global_page_state(NR_SLAB_UNRECLAIMABLE)),
|
||||
K(global_page_state(NR_SLAB_RECLAIMABLE)),
|
||||
|
|
|
@ -100,6 +100,7 @@ enum zone_stat_item {
|
|||
NR_BOUNCE,
|
||||
NR_VMSCAN_WRITE,
|
||||
NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
|
||||
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
|
||||
#ifdef CONFIG_NUMA
|
||||
NUMA_HIT, /* allocated in intended node */
|
||||
NUMA_MISS, /* allocated in non intended node */
|
||||
|
|
|
@ -119,6 +119,8 @@ void __remove_from_page_cache(struct page *page)
|
|||
page->mapping = NULL;
|
||||
mapping->nrpages--;
|
||||
__dec_zone_page_state(page, NR_FILE_PAGES);
|
||||
if (PageSwapBacked(page))
|
||||
__dec_zone_page_state(page, NR_SHMEM);
|
||||
BUG_ON(page_mapped(page));
|
||||
|
||||
/*
|
||||
|
@ -431,6 +433,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|||
if (likely(!error)) {
|
||||
mapping->nrpages++;
|
||||
__inc_zone_page_state(page, NR_FILE_PAGES);
|
||||
if (PageSwapBacked(page))
|
||||
__inc_zone_page_state(page, NR_SHMEM);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
} else {
|
||||
page->mapping = NULL;
|
||||
|
|
|
@ -312,7 +312,10 @@ static int migrate_page_move_mapping(struct address_space *mapping,
|
|||
*/
|
||||
__dec_zone_page_state(page, NR_FILE_PAGES);
|
||||
__inc_zone_page_state(newpage, NR_FILE_PAGES);
|
||||
|
||||
if (PageSwapBacked(page)) {
|
||||
__dec_zone_page_state(page, NR_SHMEM);
|
||||
__inc_zone_page_state(newpage, NR_SHMEM);
|
||||
}
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2139,7 +2139,7 @@ void show_free_areas(void)
|
|||
" unevictable:%lu"
|
||||
" dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
|
||||
" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
|
||||
" mapped:%lu pagetables:%lu bounce:%lu\n",
|
||||
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
|
||||
global_page_state(NR_ACTIVE_ANON),
|
||||
global_page_state(NR_ACTIVE_FILE),
|
||||
global_page_state(NR_INACTIVE_ANON),
|
||||
|
@ -2153,6 +2153,7 @@ void show_free_areas(void)
|
|||
global_page_state(NR_SLAB_RECLAIMABLE),
|
||||
global_page_state(NR_SLAB_UNRECLAIMABLE),
|
||||
global_page_state(NR_FILE_MAPPED),
|
||||
global_page_state(NR_SHMEM),
|
||||
global_page_state(NR_PAGETABLE),
|
||||
global_page_state(NR_BOUNCE));
|
||||
|
||||
|
@ -2175,6 +2176,7 @@ void show_free_areas(void)
|
|||
" dirty:%lukB"
|
||||
" writeback:%lukB"
|
||||
" mapped:%lukB"
|
||||
" shmem:%lukB"
|
||||
" slab_reclaimable:%lukB"
|
||||
" slab_unreclaimable:%lukB"
|
||||
" kernel_stack:%lukB"
|
||||
|
@ -2200,6 +2202,7 @@ void show_free_areas(void)
|
|||
K(zone_page_state(zone, NR_FILE_DIRTY)),
|
||||
K(zone_page_state(zone, NR_WRITEBACK)),
|
||||
K(zone_page_state(zone, NR_FILE_MAPPED)),
|
||||
K(zone_page_state(zone, NR_SHMEM)),
|
||||
K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
|
||||
K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
|
||||
zone_page_state(zone, NR_KERNEL_STACK) *
|
||||
|
|
|
@ -644,7 +644,7 @@ static const char * const vmstat_text[] = {
|
|||
"nr_bounce",
|
||||
"nr_vmscan_write",
|
||||
"nr_writeback_temp",
|
||||
|
||||
"nr_shmem",
|
||||
#ifdef CONFIG_NUMA
|
||||
"numa_hit",
|
||||
"numa_miss",
|
||||
|
|
Loading…
Reference in a new issue