From b3607ef976a212e1db8bc7fec67ab04ff3b03943 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 23 Mar 2012 15:02:03 -0700 Subject: [PATCH] --- yaml --- r: 295173 b: refs/heads/master c: 307b1cd7ecd7f3dc5ce3d3860957f034f0abe4df h: refs/heads/master i: 295171: 6ccc90b9070e5a901c6d8b15ea13ff745adb5f0c v: v3 --- [refs] | 2 +- trunk/arch/x86/kernel/cpu/perf_event.c | 4 ++-- trunk/include/linux/bitops.h | 5 ++++- trunk/tools/perf/util/include/linux/bitops.h | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/[refs] b/[refs] index ead9d162e892..8ca7f7cf1bea 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2df5e12900d847af9113980d015939dde0d1b1d4 +refs/heads/master: 307b1cd7ecd7f3dc5ce3d3860957f034f0abe4df diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index 0a18d16cb58d..fa2900c0e398 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -643,14 +643,14 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) /* Prefer fixed purpose counters */ if (x86_pmu.num_counters_fixed) { idx = X86_PMC_IDX_FIXED; - for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_MAX) { + for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { if (!__test_and_set_bit(idx, sched->state.used)) goto done; } } /* Grab the first unused counter starting with idx */ idx = sched->state.counter; - for_each_set_bit_cont(idx, c->idxmsk, X86_PMC_IDX_FIXED) { + for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) { if (!__test_and_set_bit(idx, sched->state.used)) goto done; } diff --git a/trunk/include/linux/bitops.h b/trunk/include/linux/bitops.h index 94300fe46cce..a78e358f0c17 100644 --- a/trunk/include/linux/bitops.h +++ b/trunk/include/linux/bitops.h @@ -27,11 +27,14 @@ extern unsigned long __sw_hweight64(__u64 w); (bit) = find_next_bit((addr), (size), (bit) + 1)) /* same as for_each_set_bit() but use bit as value to start with */ -#define for_each_set_bit_cont(bit, addr, size) \ +#define for_each_set_bit_from(bit, addr, size) \ for ((bit) = find_next_bit((addr), (size), (bit)); \ (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) +#define for_each_set_bit_cont(bit, addr, size) \ + for_each_set_bit_from(bit, addr, size) + static __inline__ int get_bitmask_order(unsigned int count) { int order; diff --git a/trunk/tools/perf/util/include/linux/bitops.h b/trunk/tools/perf/util/include/linux/bitops.h index 62cdee78db7b..f1584833bd22 100644 --- a/trunk/tools/perf/util/include/linux/bitops.h +++ b/trunk/tools/perf/util/include/linux/bitops.h @@ -15,7 +15,7 @@ (bit) = find_next_bit((addr), (size), (bit) + 1)) /* same as for_each_set_bit() but use bit as value to start with */ -#define for_each_set_bit_cont(bit, addr, size) \ +#define for_each_set_bit_from(bit, addr, size) \ for ((bit) = find_next_bit((addr), (size), (bit)); \ (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1))