mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
cgroups: use vmalloc for large cgroups pidlist allocations
Separates all pidlist allocation requests to a separate function that judges based on the requested size whether or not the array needs to be vmalloced or can be gotten via kmalloc, and similar for kfree/vfree. Signed-off-by: Ben Blum <bblum@google.com> Signed-off-by: Paul Menage <menage@google.com> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
72a8cb30d1
commit
d1d9fd3308
1 changed files with 42 additions and 5 deletions
|
@ -50,6 +50,7 @@
|
|||
#include <linux/smp_lock.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
@ -2350,6 +2351,42 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
|
|||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* The following two functions "fix" the issue where there are more pids
|
||||
* than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
|
||||
* TODO: replace with a kernel-wide solution to this problem
|
||||
*/
|
||||
#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
|
||||
static void *pidlist_allocate(int count)
|
||||
{
|
||||
if (PIDLIST_TOO_LARGE(count))
|
||||
return vmalloc(count * sizeof(pid_t));
|
||||
else
|
||||
return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
|
||||
}
|
||||
static void pidlist_free(void *p)
|
||||
{
|
||||
if (is_vmalloc_addr(p))
|
||||
vfree(p);
|
||||
else
|
||||
kfree(p);
|
||||
}
|
||||
static void *pidlist_resize(void *p, int newcount)
|
||||
{
|
||||
void *newlist;
|
||||
/* note: if new alloc fails, old p will still be valid either way */
|
||||
if (is_vmalloc_addr(p)) {
|
||||
newlist = vmalloc(newcount * sizeof(pid_t));
|
||||
if (!newlist)
|
||||
return NULL;
|
||||
memcpy(newlist, p, newcount * sizeof(pid_t));
|
||||
vfree(p);
|
||||
} else {
|
||||
newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
|
||||
}
|
||||
return newlist;
|
||||
}
|
||||
|
||||
/*
|
||||
* pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
|
||||
* If the new stripped list is sufficiently smaller and there's enough memory
|
||||
|
@ -2389,7 +2426,7 @@ after:
|
|||
* we'll just stay with what we've got.
|
||||
*/
|
||||
if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
|
||||
newlist = krealloc(list, dest * sizeof(pid_t), GFP_KERNEL);
|
||||
newlist = pidlist_resize(list, dest);
|
||||
if (newlist)
|
||||
*p = newlist;
|
||||
}
|
||||
|
@ -2470,7 +2507,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
|
|||
* show up until sometime later on.
|
||||
*/
|
||||
length = cgroup_task_count(cgrp);
|
||||
array = kmalloc(length * sizeof(pid_t), GFP_KERNEL);
|
||||
array = pidlist_allocate(length);
|
||||
if (!array)
|
||||
return -ENOMEM;
|
||||
/* now, populate the array */
|
||||
|
@ -2494,11 +2531,11 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
|
|||
length = pidlist_uniq(&array, length);
|
||||
l = cgroup_pidlist_find(cgrp, type);
|
||||
if (!l) {
|
||||
kfree(array);
|
||||
pidlist_free(array);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* store array, freeing old if necessary - lock already held */
|
||||
kfree(l->list);
|
||||
pidlist_free(l->list);
|
||||
l->list = array;
|
||||
l->length = length;
|
||||
l->use_count++;
|
||||
|
@ -2659,7 +2696,7 @@ static void cgroup_release_pid_array(struct cgroup_pidlist *l)
|
|||
/* we're the last user if refcount is 0; remove and free */
|
||||
list_del(&l->links);
|
||||
mutex_unlock(&l->owner->pidlist_mutex);
|
||||
kfree(l->list);
|
||||
pidlist_free(l->list);
|
||||
put_pid_ns(l->key.ns);
|
||||
up_write(&l->mutex);
|
||||
kfree(l);
|
||||
|
|
Loading…
Reference in a new issue