mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Cpuset hardwall flag: switch cpusets to use the bulk cgroup_add_files() API
Currently the cpusets mem_exclusive flag is overloaded to mean both "no-overlapping" and "no GFP_KERNEL allocations outside this cpuset". These patches add a new mem_hardwall flag with just the allocation restriction part of the mem_exclusive semantics, without breaking backwards-compatibility for those who continue to use just mem_exclusive. Additionally, the cgroup control file registration for cpusets is cleaned up to reduce boilerplate. This patch: This change tidies up the cpusets control file definitions, and reduces the amount of boilerplate required to add/change control files in the future. Signed-off-by: Paul Menage <menage@google.com> Reviewed-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9e0c914cab
commit
addf2c739d
1 changed files with 69 additions and 85 deletions
100
kernel/cpuset.c
100
kernel/cpuset.c
|
@ -1445,53 +1445,76 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
|
|||
* for the common functions, 'private' gives the type of file
|
||||
*/
|
||||
|
||||
static struct cftype cft_cpus = {
|
||||
static struct cftype files[] = {
|
||||
{
|
||||
.name = "cpus",
|
||||
.read = cpuset_common_file_read,
|
||||
.write = cpuset_common_file_write,
|
||||
.private = FILE_CPULIST,
|
||||
};
|
||||
},
|
||||
|
||||
static struct cftype cft_mems = {
|
||||
{
|
||||
.name = "mems",
|
||||
.read = cpuset_common_file_read,
|
||||
.write = cpuset_common_file_write,
|
||||
.private = FILE_MEMLIST,
|
||||
};
|
||||
},
|
||||
|
||||
static struct cftype cft_cpu_exclusive = {
|
||||
{
|
||||
.name = "cpu_exclusive",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_CPU_EXCLUSIVE,
|
||||
};
|
||||
},
|
||||
|
||||
static struct cftype cft_mem_exclusive = {
|
||||
{
|
||||
.name = "mem_exclusive",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_MEM_EXCLUSIVE,
|
||||
};
|
||||
},
|
||||
|
||||
static struct cftype cft_sched_load_balance = {
|
||||
{
|
||||
.name = "sched_load_balance",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_SCHED_LOAD_BALANCE,
|
||||
};
|
||||
},
|
||||
|
||||
static struct cftype cft_sched_relax_domain_level = {
|
||||
{
|
||||
.name = "sched_relax_domain_level",
|
||||
.read = cpuset_common_file_read,
|
||||
.write = cpuset_common_file_write,
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
|
||||
};
|
||||
},
|
||||
|
||||
static struct cftype cft_memory_migrate = {
|
||||
{
|
||||
.name = "memory_migrate",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_MEMORY_MIGRATE,
|
||||
},
|
||||
|
||||
{
|
||||
.name = "memory_pressure",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_MEMORY_PRESSURE,
|
||||
},
|
||||
|
||||
{
|
||||
.name = "memory_spread_page",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_SPREAD_PAGE,
|
||||
},
|
||||
|
||||
{
|
||||
.name = "memory_spread_slab",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_SPREAD_SLAB,
|
||||
},
|
||||
};
|
||||
|
||||
static struct cftype cft_memory_pressure_enabled = {
|
||||
|
@ -1501,57 +1524,18 @@ static struct cftype cft_memory_pressure_enabled = {
|
|||
.private = FILE_MEMORY_PRESSURE_ENABLED,
|
||||
};
|
||||
|
||||
static struct cftype cft_memory_pressure = {
|
||||
.name = "memory_pressure",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_MEMORY_PRESSURE,
|
||||
};
|
||||
|
||||
static struct cftype cft_spread_page = {
|
||||
.name = "memory_spread_page",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_SPREAD_PAGE,
|
||||
};
|
||||
|
||||
static struct cftype cft_spread_slab = {
|
||||
.name = "memory_spread_slab",
|
||||
.read_u64 = cpuset_read_u64,
|
||||
.write_u64 = cpuset_write_u64,
|
||||
.private = FILE_SPREAD_SLAB,
|
||||
};
|
||||
|
||||
static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||
{
|
||||
int err;
|
||||
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss,
|
||||
&cft_sched_relax_domain_level)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
|
||||
return err;
|
||||
if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0)
|
||||
err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
|
||||
if (err)
|
||||
return err;
|
||||
/* memory_pressure_enabled is in root cpuset only */
|
||||
if (err == 0 && !cont->parent)
|
||||
if (!cont->parent)
|
||||
err = cgroup_add_file(cont, ss,
|
||||
&cft_memory_pressure_enabled);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue