Merge branch 'for-linus' into for-next

Conflicts:
	mm/percpu.c
This commit is contained in:
Tejun Heo 2009-12-08 10:02:12 +09:00
commit 50de1a8ef1
4 changed files with 132 additions and 60 deletions

View file

@ -97,7 +97,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
* boot up and this data does not change there after. Hence this * boot up and this data does not change there after. Hence this
* operation should be safe. No locking required. * operation should be safe. No locking required.
*/ */
addr = __pa(per_cpu_ptr(crash_notes, cpunum)); addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
rc = sprintf(buf, "%Lx\n", addr); rc = sprintf(buf, "%Lx\n", addr);
return rc; return rc;
} }

View file

@ -130,6 +130,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
extern void *__alloc_reserved_percpu(size_t size, size_t align); extern void *__alloc_reserved_percpu(size_t size, size_t align);
extern void *__alloc_percpu(size_t size, size_t align); extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata); extern void free_percpu(void *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void); extern void __init setup_per_cpu_areas(void);
@ -155,6 +156,11 @@ static inline void free_percpu(void *p)
kfree(p); kfree(p);
} }
static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
return __pa(addr);
}
static inline void __init setup_per_cpu_areas(void) { } static inline void __init setup_per_cpu_areas(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr) static inline void *pcpu_lpage_remapped(void *kaddr)

View file

@ -1563,11 +1563,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
struct update_shares_data { static __read_mostly unsigned long *update_shares_data;
unsigned long rq_weight[NR_CPUS];
};
static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
static void __set_se_shares(struct sched_entity *se, unsigned long shares); static void __set_se_shares(struct sched_entity *se, unsigned long shares);
@ -1577,12 +1573,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
static void update_group_shares_cpu(struct task_group *tg, int cpu, static void update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares, unsigned long sd_shares,
unsigned long sd_rq_weight, unsigned long sd_rq_weight,
struct update_shares_data *usd) unsigned long *usd_rq_weight)
{ {
unsigned long shares, rq_weight; unsigned long shares, rq_weight;
int boost = 0; int boost = 0;
rq_weight = usd->rq_weight[cpu]; rq_weight = usd_rq_weight[cpu];
if (!rq_weight) { if (!rq_weight) {
boost = 1; boost = 1;
rq_weight = NICE_0_LOAD; rq_weight = NICE_0_LOAD;
@ -1617,7 +1613,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
static int tg_shares_up(struct task_group *tg, void *data) static int tg_shares_up(struct task_group *tg, void *data)
{ {
unsigned long weight, rq_weight = 0, shares = 0; unsigned long weight, rq_weight = 0, shares = 0;
struct update_shares_data *usd; unsigned long *usd_rq_weight;
struct sched_domain *sd = data; struct sched_domain *sd = data;
unsigned long flags; unsigned long flags;
int i; int i;
@ -1626,11 +1622,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
return 0; return 0;
local_irq_save(flags); local_irq_save(flags);
usd = &__get_cpu_var(update_shares_data); usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
for_each_cpu(i, sched_domain_span(sd)) { for_each_cpu(i, sched_domain_span(sd)) {
weight = tg->cfs_rq[i]->load.weight; weight = tg->cfs_rq[i]->load.weight;
usd->rq_weight[i] = weight; usd_rq_weight[i] = weight;
/* /*
* If there are currently no tasks on the cpu pretend there * If there are currently no tasks on the cpu pretend there
@ -1651,7 +1647,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
shares = tg->shares; shares = tg->shares;
for_each_cpu(i, sched_domain_span(sd)) for_each_cpu(i, sched_domain_span(sd))
update_group_shares_cpu(tg, i, shares, rq_weight, usd); update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
local_irq_restore(flags); local_irq_restore(flags);
@ -9406,6 +9402,10 @@ void __init sched_init(void)
#endif /* CONFIG_USER_SCHED */ #endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_GROUP_SCHED */ #endif /* CONFIG_GROUP_SCHED */
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
__alignof__(unsigned long));
#endif
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct rq *rq; struct rq *rq;

View file

@ -72,6 +72,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/io.h>
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
@ -151,7 +152,10 @@ static int pcpu_reserved_chunk_limit;
* *
* During allocation, pcpu_alloc_mutex is kept locked all the time and * During allocation, pcpu_alloc_mutex is kept locked all the time and
* pcpu_lock is grabbed and released as necessary. All actual memory * pcpu_lock is grabbed and released as necessary. All actual memory
* allocations are done using GFP_KERNEL with pcpu_lock released. * allocations are done using GFP_KERNEL with pcpu_lock released. In
* general, percpu memory can't be allocated with irq off but
* irqsave/restore are still used in alloc path so that it can be used
* from early init path - sched_init() specifically.
* *
* Free path accesses and alters only the index data structures, so it * Free path accesses and alters only the index data structures, so it
* can be safely called from atomic context. When memory needs to be * can be safely called from atomic context. When memory needs to be
@ -350,63 +354,86 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
} }
/** /**
* pcpu_extend_area_map - extend area map for allocation * pcpu_need_to_extend - determine whether chunk area map needs to be extended
* @chunk: target chunk * @chunk: chunk of interest
* *
* Extend area map of @chunk so that it can accomodate an allocation. * Determine whether area map of @chunk needs to be extended to
* A single allocation can split an area into three areas, so this * accomodate a new allocation.
* function makes sure that @chunk->map has at least two extra slots.
* *
* CONTEXT: * CONTEXT:
* pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired * pcpu_lock.
* if area map is extended.
* *
* RETURNS: * RETURNS:
* 0 if noop, 1 if successfully extended, -errno on failure. * New target map allocation length if extension is necessary, 0
* otherwise.
*/ */
static int pcpu_extend_area_map(struct pcpu_chunk *chunk) static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
__releases(lock) __acquires(lock)
{ {
int new_alloc; int new_alloc;
int *new;
size_t size;
/* has enough? */
if (chunk->map_alloc >= chunk->map_used + 2) if (chunk->map_alloc >= chunk->map_used + 2)
return 0; return 0;
spin_unlock_irq(&pcpu_lock);
new_alloc = PCPU_DFL_MAP_ALLOC; new_alloc = PCPU_DFL_MAP_ALLOC;
while (new_alloc < chunk->map_used + 2) while (new_alloc < chunk->map_used + 2)
new_alloc *= 2; new_alloc *= 2;
new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); return new_alloc;
if (!new) { }
spin_lock_irq(&pcpu_lock);
/**
* pcpu_extend_area_map - extend area map of a chunk
* @chunk: chunk of interest
* @new_alloc: new target allocation length of the area map
*
* Extend area map of @chunk to have @new_alloc entries.
*
* CONTEXT:
* Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
{
int *old = NULL, *new = NULL;
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
unsigned long flags;
new = pcpu_mem_alloc(new_size);
if (!new)
return -ENOMEM; return -ENOMEM;
}
/* /* acquire pcpu_lock and switch to new area map */
* Acquire pcpu_lock and switch to new area map. Only free spin_lock_irqsave(&pcpu_lock, flags);
* could have happened inbetween, so map_used couldn't have
* grown.
*/
spin_lock_irq(&pcpu_lock);
BUG_ON(new_alloc < chunk->map_used + 2);
size = chunk->map_alloc * sizeof(chunk->map[0]); if (new_alloc <= chunk->map_alloc)
memcpy(new, chunk->map, size); goto out_unlock;
old_size = chunk->map_alloc * sizeof(chunk->map[0]);
memcpy(new, chunk->map, old_size);
/* /*
* map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
* one of the first chunks and still using static map. * one of the first chunks and still using static map.
*/ */
if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
pcpu_mem_free(chunk->map, size); old = chunk->map;
chunk->map_alloc = new_alloc; chunk->map_alloc = new_alloc;
chunk->map = new; chunk->map = new;
new = NULL;
out_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
/*
* pcpu_mem_free() might end up calling vfree() which uses
* IRQ-unsafe lock and thus can't be called under pcpu_lock.
*/
pcpu_mem_free(old, old_size);
pcpu_mem_free(new, new_size);
return 0; return 0;
} }
@ -1045,7 +1072,8 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
static int warn_limit = 10; static int warn_limit = 10;
struct pcpu_chunk *chunk; struct pcpu_chunk *chunk;
const char *err; const char *err;
int slot, off; int slot, off, new_alloc;
unsigned long flags;
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for " WARN(true, "illegal size (%zu) or align (%zu) for "
@ -1054,19 +1082,30 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
} }
mutex_lock(&pcpu_alloc_mutex); mutex_lock(&pcpu_alloc_mutex);
spin_lock_irq(&pcpu_lock); spin_lock_irqsave(&pcpu_lock, flags);
/* serve reserved allocations from the reserved chunk if available */ /* serve reserved allocations from the reserved chunk if available */
if (reserved && pcpu_reserved_chunk) { if (reserved && pcpu_reserved_chunk) {
chunk = pcpu_reserved_chunk; chunk = pcpu_reserved_chunk;
if (size > chunk->contig_hint ||
pcpu_extend_area_map(chunk) < 0) { if (size > chunk->contig_hint) {
err = "failed to extend area map of reserved chunk"; err = "alloc from reserved chunk failed";
goto fail_unlock; goto fail_unlock;
} }
while ((new_alloc = pcpu_need_to_extend(chunk))) {
spin_unlock_irqrestore(&pcpu_lock, flags);
if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
err = "failed to extend area map of reserved chunk";
goto fail_unlock_mutex;
}
spin_lock_irqsave(&pcpu_lock, flags);
}
off = pcpu_alloc_area(chunk, size, align); off = pcpu_alloc_area(chunk, size, align);
if (off >= 0) if (off >= 0)
goto area_found; goto area_found;
err = "alloc from reserved chunk failed"; err = "alloc from reserved chunk failed";
goto fail_unlock; goto fail_unlock;
} }
@ -1078,14 +1117,20 @@ restart:
if (size > chunk->contig_hint) if (size > chunk->contig_hint)
continue; continue;
switch (pcpu_extend_area_map(chunk)) { new_alloc = pcpu_need_to_extend(chunk);
case 0: if (new_alloc) {
break; spin_unlock_irqrestore(&pcpu_lock, flags);
case 1: if (pcpu_extend_area_map(chunk,
goto restart; /* pcpu_lock dropped, restart */ new_alloc) < 0) {
default: err = "failed to extend area map";
err = "failed to extend area map"; goto fail_unlock_mutex;
goto fail_unlock; }
spin_lock_irqsave(&pcpu_lock, flags);
/*
* pcpu_lock has been dropped, need to
* restart cpu_slot list walking.
*/
goto restart;
} }
off = pcpu_alloc_area(chunk, size, align); off = pcpu_alloc_area(chunk, size, align);
@ -1095,7 +1140,7 @@ restart:
} }
/* hmmm... no space left, create a new chunk */ /* hmmm... no space left, create a new chunk */
spin_unlock_irq(&pcpu_lock); spin_unlock_irqrestore(&pcpu_lock, flags);
chunk = alloc_pcpu_chunk(); chunk = alloc_pcpu_chunk();
if (!chunk) { if (!chunk) {
@ -1103,16 +1148,16 @@ restart:
goto fail_unlock_mutex; goto fail_unlock_mutex;
} }
spin_lock_irq(&pcpu_lock); spin_lock_irqsave(&pcpu_lock, flags);
pcpu_chunk_relocate(chunk, -1); pcpu_chunk_relocate(chunk, -1);
goto restart; goto restart;
area_found: area_found:
spin_unlock_irq(&pcpu_lock); spin_unlock_irqrestore(&pcpu_lock, flags);
/* populate, map and clear the area */ /* populate, map and clear the area */
if (pcpu_populate_chunk(chunk, off, size)) { if (pcpu_populate_chunk(chunk, off, size)) {
spin_lock_irq(&pcpu_lock); spin_lock_irqsave(&pcpu_lock, flags);
pcpu_free_area(chunk, off); pcpu_free_area(chunk, off);
err = "failed to populate"; err = "failed to populate";
goto fail_unlock; goto fail_unlock;
@ -1124,7 +1169,7 @@ area_found:
return __addr_to_pcpu_ptr(chunk->base_addr + off); return __addr_to_pcpu_ptr(chunk->base_addr + off);
fail_unlock: fail_unlock:
spin_unlock_irq(&pcpu_lock); spin_unlock_irqrestore(&pcpu_lock, flags);
fail_unlock_mutex: fail_unlock_mutex:
mutex_unlock(&pcpu_alloc_mutex); mutex_unlock(&pcpu_alloc_mutex);
if (warn_limit) { if (warn_limit) {
@ -1256,6 +1301,27 @@ void free_percpu(void *ptr)
} }
EXPORT_SYMBOL_GPL(free_percpu); EXPORT_SYMBOL_GPL(free_percpu);
/**
* per_cpu_ptr_to_phys - convert translated percpu address to physical address
* @addr: the address to be converted to physical address
*
* Given @addr which is dereferenceable address obtained via one of
* percpu access macros, this function translates it into its physical
* address. The caller is responsible for ensuring @addr stays valid
* until this function finishes.
*
* RETURNS:
* The physical address for @addr.
*/
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
if ((unsigned long)addr < VMALLOC_START ||
(unsigned long)addr >= VMALLOC_END)
return __pa(addr);
else
return page_to_phys(vmalloc_to_page(addr));
}
static inline size_t pcpu_calc_fc_sizes(size_t static_size, static inline size_t pcpu_calc_fc_sizes(size_t static_size,
size_t reserved_size, size_t reserved_size,
ssize_t *dyn_sizep) ssize_t *dyn_sizep)