mirror of
https://github.com/adulau/aha.git
synced 2024-12-30 12:46:17 +00:00
4b8561521d
Quicklists can consume several GB of memory. We should provide a means of monitoring this. After this patch is applied, /proc/meminfo will output the following: % cat /proc/meminfo MemTotal: 7715392 kB MemFree: 5401600 kB Buffers: 80384 kB Cached: 300800 kB SwapCached: 0 kB Active: 235584 kB Inactive: 262656 kB SwapTotal: 2031488 kB SwapFree: 2031488 kB Dirty: 3520 kB Writeback: 0 kB AnonPages: 117696 kB Mapped: 38528 kB Slab: 1589952 kB SReclaimable: 23104 kB SUnreclaim: 1566848 kB PageTables: 14656 kB NFS_Unstable: 0 kB Bounce: 0 kB WritebackTmp: 0 kB CommitLimit: 5889152 kB Committed_AS: 393152 kB VmallocTotal: 17592177655808 kB VmallocUsed: 29056 kB VmallocChunk: 17592177626432 kB Quicklists: 130944 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 262144 kB Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Keiichiro Tokunaga <tokunaga.keiich@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
93 lines
2.1 KiB
C
93 lines
2.1 KiB
C
#ifndef LINUX_QUICKLIST_H
|
|
#define LINUX_QUICKLIST_H
|
|
/*
|
|
* Fast allocations and disposal of pages. Pages must be in the condition
|
|
* as needed after allocation when they are freed. Per cpu lists of pages
|
|
* are kept that only contain node local pages.
|
|
*
|
|
* (C) 2007, SGI. Christoph Lameter <clameter@sgi.com>
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/percpu.h>
|
|
|
|
#ifdef CONFIG_QUICKLIST
|
|
|
|
struct quicklist {
|
|
void *page;
|
|
int nr_pages;
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
|
|
|
|
/*
|
|
* The two key functions quicklist_alloc and quicklist_free are inline so
|
|
* that they may be custom compiled for the platform.
|
|
* Specifying a NULL ctor can remove constructor support. Specifying
|
|
* a constant quicklist allows the determination of the exact address
|
|
* in the per cpu area.
|
|
*
|
|
* The fast patch in quicklist_alloc touched only a per cpu cacheline and
|
|
* the first cacheline of the page itself. There is minmal overhead involved.
|
|
*/
|
|
static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
|
|
{
|
|
struct quicklist *q;
|
|
void **p = NULL;
|
|
|
|
q =&get_cpu_var(quicklist)[nr];
|
|
p = q->page;
|
|
if (likely(p)) {
|
|
q->page = p[0];
|
|
p[0] = NULL;
|
|
q->nr_pages--;
|
|
}
|
|
put_cpu_var(quicklist);
|
|
if (likely(p))
|
|
return p;
|
|
|
|
p = (void *)__get_free_page(flags | __GFP_ZERO);
|
|
if (ctor && p)
|
|
ctor(p);
|
|
return p;
|
|
}
|
|
|
|
static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
|
|
struct page *page)
|
|
{
|
|
struct quicklist *q;
|
|
|
|
q = &get_cpu_var(quicklist)[nr];
|
|
*(void **)p = q->page;
|
|
q->page = p;
|
|
q->nr_pages++;
|
|
put_cpu_var(quicklist);
|
|
}
|
|
|
|
static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
|
|
{
|
|
__quicklist_free(nr, dtor, pp, virt_to_page(pp));
|
|
}
|
|
|
|
static inline void quicklist_free_page(int nr, void (*dtor)(void *),
|
|
struct page *page)
|
|
{
|
|
__quicklist_free(nr, dtor, page_address(page), page);
|
|
}
|
|
|
|
void quicklist_trim(int nr, void (*dtor)(void *),
|
|
unsigned long min_pages, unsigned long max_free);
|
|
|
|
unsigned long quicklist_total_size(void);
|
|
|
|
#else
|
|
|
|
static inline unsigned long quicklist_total_size(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif /* LINUX_QUICKLIST_H */
|
|
|