From e7247c2d3cb16586f0953735fda3303cd4b6f9ca Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Fri, 11 Jul 2008 19:27:31 -0400 Subject: [PATCH] --- yaml --- r: 101114 b: refs/heads/master c: e8ced39d5e8911c662d4d69a342b9d053eaaac4e h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/fs/ext4/balloc.c | 2 +- trunk/include/linux/percpu_counter.h | 12 +++++++++--- trunk/lib/percpu_counter.c | 7 ++++++- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/[refs] b/[refs] index 6dfa9a8c9a33..8f829f4c34b7 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 64769240bd07f446f83660bb143bb609d8ab4910 +refs/heads/master: e8ced39d5e8911c662d4d69a342b9d053eaaac4e diff --git a/trunk/fs/ext4/balloc.c b/trunk/fs/ext4/balloc.c index 25f63d8c1b3d..6369bacf0dcb 100644 --- a/trunk/fs/ext4/balloc.c +++ b/trunk/fs/ext4/balloc.c @@ -1621,7 +1621,7 @@ ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi, #ifdef CONFIG_SMP if (free_blocks - root_blocks < FBC_BATCH) free_blocks = - percpu_counter_sum_positive(&sbi->s_freeblocks_counter); + percpu_counter_sum_and_set(&sbi->s_freeblocks_counter); #endif if (free_blocks - root_blocks < nblocks) return free_blocks - root_blocks; diff --git a/trunk/include/linux/percpu_counter.h b/trunk/include/linux/percpu_counter.h index 9007ccdfc112..208388835357 100644 --- a/trunk/include/linux/percpu_counter.h +++ b/trunk/include/linux/percpu_counter.h @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); -s64 __percpu_counter_sum(struct percpu_counter *fbc); +s64 __percpu_counter_sum(struct percpu_counter *fbc, int set); static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { @@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { - s64 ret = __percpu_counter_sum(fbc); + s64 ret = __percpu_counter_sum(fbc, 0); return ret < 0 ? 0 : ret; } +static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc) +{ + return __percpu_counter_sum(fbc, 1); +} + + static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { - return __percpu_counter_sum(fbc); + return __percpu_counter_sum(fbc, 0); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) diff --git a/trunk/lib/percpu_counter.c b/trunk/lib/percpu_counter.c index 119174494cb5..4a8ba4bf5f6f 100644 --- a/trunk/lib/percpu_counter.c +++ b/trunk/lib/percpu_counter.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ -s64 __percpu_counter_sum(struct percpu_counter *fbc) +s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) { s64 ret; int cpu; @@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; + if (set) + *pcount = 0; } + if (set) + fbc->count = ret; + spin_unlock(&fbc->lock); return ret; }