mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
percpu_counter: new function percpu_counter_sum_and_set
Delayed allocation need to check free blocks at every write time. percpu_counter_read_positive() is not quit accurate. delayed allocation need a more accurate accounting, but using percpu_counter_sum_positive() is frequently is quite expensive. This patch added a new function to update center counter when sum per-cpu counter, to increase the accurate rate for next percpu_counter_read() and require less calling expensive percpu_counter_sum(). Signed-off-by: Mingming Cao <cmm@us.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
parent
64769240bd
commit
e8ced39d5e
3 changed files with 16 additions and 5 deletions
|
@ -1621,7 +1621,7 @@ ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi,
|
|||
#ifdef CONFIG_SMP
|
||||
if (free_blocks - root_blocks < FBC_BATCH)
|
||||
free_blocks =
|
||||
percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
|
||||
percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
|
||||
#endif
|
||||
if (free_blocks - root_blocks < nblocks)
|
||||
return free_blocks - root_blocks;
|
||||
|
|
|
@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
|
|||
void percpu_counter_destroy(struct percpu_counter *fbc);
|
||||
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
|
||||
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc);
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
|
||||
|
||||
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
||||
{
|
||||
|
@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
|||
|
||||
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
|
||||
{
|
||||
s64 ret = __percpu_counter_sum(fbc);
|
||||
s64 ret = __percpu_counter_sum(fbc, 0);
|
||||
return ret < 0 ? 0 : ret;
|
||||
}
|
||||
|
||||
static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
|
||||
{
|
||||
return __percpu_counter_sum(fbc, 1);
|
||||
}
|
||||
|
||||
|
||||
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
|
||||
{
|
||||
return __percpu_counter_sum(fbc);
|
||||
return __percpu_counter_sum(fbc, 0);
|
||||
}
|
||||
|
||||
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
|
||||
|
|
|
@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
|
|||
* Add up all the per-cpu counts, return the result. This is a more accurate
|
||||
* but much slower version of percpu_counter_read_positive()
|
||||
*/
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc)
|
||||
s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
|
||||
{
|
||||
s64 ret;
|
||||
int cpu;
|
||||
|
@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
|
|||
for_each_online_cpu(cpu) {
|
||||
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||
ret += *pcount;
|
||||
if (set)
|
||||
*pcount = 0;
|
||||
}
|
||||
if (set)
|
||||
fbc->count = ret;
|
||||
|
||||
spin_unlock(&fbc->lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue