Skip to content

Commit

Permalink
percpu-refcount: add helpers for ->percpu_count accesses
Browse files Browse the repository at this point in the history
* All four percpu_ref_*() operations implemented in the header file
  perform the same operation to determine whether the percpu_ref is
  alive and extract the percpu pointer.  Factor out the common logic
  into __pcpu_ref_alive().  This doesn't change the generated code.

* There are a couple places in percpu-refcount.c which masks out
  PCPU_REF_DEAD to obtain the percpu pointer.  Factor it out into
  pcpu_count_ptr().

* The above changes make the WARN_ON_ONCE() conditional at the top of
  percpu_ref_kill_and_confirm() the only user of REF_STATUS().  Test
  PCPU_REF_DEAD directly and remove REF_STATUS().

This patch doesn't introduce any functional change.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Kent Overstreet <kmo@daterainc.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
  • Loading branch information
Tejun Heo committed Jun 28, 2014
1 parent d630dc4 commit eae7975
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 22 deletions.
35 changes: 21 additions & 14 deletions include/linux/percpu-refcount.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,25 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
return percpu_ref_kill_and_confirm(ref, NULL);
}

#define PCPU_REF_PTR 0
#define PCPU_REF_DEAD 1

#define REF_STATUS(count) (((unsigned long) count) & PCPU_REF_DEAD)
/*
* Internal helper. Don't use outside percpu-refcount proper. The
* function doesn't return the pointer and let the caller test it for NULL
* because doing so forces the compiler to generate two conditional
* branches as it can't assume that @ref->pcpu_count is not NULL.
*/
static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
unsigned __percpu **pcpu_countp)
{
unsigned long pcpu_ptr = (unsigned long)ACCESS_ONCE(ref->pcpu_count);

if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
return false;

*pcpu_countp = (unsigned __percpu *)pcpu_ptr;
return true;
}

/**
* percpu_ref_get - increment a percpu refcount
Expand All @@ -105,9 +120,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)

rcu_read_lock_sched();

pcpu_count = ACCESS_ONCE(ref->pcpu_count);

if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
if (__pcpu_ref_alive(ref, &pcpu_count))
this_cpu_inc(*pcpu_count);
else
atomic_inc(&ref->count);
Expand All @@ -131,9 +144,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)

rcu_read_lock_sched();

pcpu_count = ACCESS_ONCE(ref->pcpu_count);

if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
if (__pcpu_ref_alive(ref, &pcpu_count)) {
this_cpu_inc(*pcpu_count);
ret = true;
} else {
Expand Down Expand Up @@ -166,9 +177,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)

rcu_read_lock_sched();

pcpu_count = ACCESS_ONCE(ref->pcpu_count);

if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
if (__pcpu_ref_alive(ref, &pcpu_count)) {
this_cpu_inc(*pcpu_count);
ret = true;
}
Expand All @@ -191,9 +200,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)

rcu_read_lock_sched();

pcpu_count = ACCESS_ONCE(ref->pcpu_count);

if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
if (__pcpu_ref_alive(ref, &pcpu_count))
this_cpu_dec(*pcpu_count);
else if (unlikely(atomic_dec_and_test(&ref->count)))
ref->release(ref);
Expand Down
17 changes: 9 additions & 8 deletions lib/percpu-refcount.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,11 @@

#define PCPU_COUNT_BIAS (1U << 31)

static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
{
return (unsigned __percpu *)((unsigned long)ref->pcpu_count & ~PCPU_REF_DEAD);
}

/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
Expand Down Expand Up @@ -74,30 +79,26 @@ EXPORT_SYMBOL_GPL(percpu_ref_init);
*/
void percpu_ref_cancel_init(struct percpu_ref *ref)
{
unsigned __percpu *pcpu_count = ref->pcpu_count;
unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
int cpu;

WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);

if (pcpu_count) {
for_each_possible_cpu(cpu)
WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
free_percpu(ref->pcpu_count);
free_percpu(pcpu_count);
}
}
EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);

static void percpu_ref_kill_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
unsigned __percpu *pcpu_count = ref->pcpu_count;
unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
unsigned count = 0;
int cpu;

/* Mask out PCPU_REF_DEAD */
pcpu_count = (unsigned __percpu *)
(((unsigned long) pcpu_count) & ~PCPU_REF_DEAD);

for_each_possible_cpu(cpu)
count += *per_cpu_ptr(pcpu_count, cpu);

Expand Down Expand Up @@ -152,7 +153,7 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
WARN_ONCE((unsigned long)ref->pcpu_count & PCPU_REF_DEAD,
"percpu_ref_kill() called more than once!\n");

ref->pcpu_count = (unsigned __percpu *)
Expand Down

0 comments on commit eae7975

Please sign in to comment.