Skip to content

Commit

Permalink
percpu_ref: add PCPU_REF_DEAD
Browse files Browse the repository at this point in the history
percpu_ref will be restructured so that percpu/atomic mode switching
and reference killing are dedoupled.  In preparation, add
PCPU_REF_DEAD and PCPU_REF_ATOMIC_DEAD which is OR of ATOMIC and DEAD.
For now, ATOMIC and DEAD are changed together and all PCPU_REF_ATOMIC
uses are converted to PCPU_REF_ATOMIC_DEAD without causing any
behavior changes.

percpu_ref_init() now specifies an explicit alignment when allocating
the percpu counters so that the pointer has enough unused low bits to
accomodate the flags.  Note that one flag was fine as min alignment
for percpu memory is 2 bytes but two flags are already too many for
the natural alignment of unsigned longs on archs like cris and m68k.

v2: The original patch had BUILD_BUG_ON() which triggers if unsigned
    long's alignment isn't enough to accomodate the flags, which
    triggered on cris and m64k.  percpu_ref_init() updated to specify
    the required alignment explicitly.  Reported by Fengguang.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Kent Overstreet <kmo@daterainc.com>
Cc: kbuild test robot <fengguang.wu@intel.com>
  • Loading branch information
Tejun Heo committed Sep 24, 2014
1 parent 9e804d1 commit 27344a9
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 9 deletions.
6 changes: 5 additions & 1 deletion include/linux/percpu-refcount.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ typedef void (percpu_ref_func_t)(struct percpu_ref *);
/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
enum {
__PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
__PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,

__PERCPU_REF_FLAG_BITS = 2,
};

struct percpu_ref {
Expand Down Expand Up @@ -107,7 +111,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
/* paired with smp_store_release() in percpu_ref_reinit() */
smp_read_barrier_depends();

if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
return false;

*percpu_countp = (unsigned long __percpu *)percpu_ptr;
Expand Down
19 changes: 11 additions & 8 deletions lib/percpu-refcount.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
{
return (unsigned long __percpu *)
(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
}

/**
Expand All @@ -52,10 +52,13 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
gfp_t gfp)
{
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
__alignof__(unsigned long));

atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);

ref->percpu_count_ptr =
(unsigned long)alloc_percpu_gfp(unsigned long, gfp);
ref->percpu_count_ptr = (unsigned long)
__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
if (!ref->percpu_count_ptr)
return -ENOMEM;

Expand All @@ -80,7 +83,7 @@ void percpu_ref_exit(struct percpu_ref *ref)

if (percpu_count) {
free_percpu(percpu_count);
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC;
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
}
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
Expand Down Expand Up @@ -145,10 +148,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC,
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC_DEAD,
"%s called more than once on %pf!", __func__, ref->release);

ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC_DEAD;
ref->confirm_switch = confirm_kill;

call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
Expand Down Expand Up @@ -180,12 +183,12 @@ void percpu_ref_reinit(struct percpu_ref *ref)
* Restore per-cpu operation. smp_store_release() is paired with
* smp_read_barrier_depends() in __ref_is_percpu() and guarantees
* that the zeroing is visible to all percpu accesses which can see
* the following __PERCPU_REF_ATOMIC clearing.
* the following __PERCPU_REF_ATOMIC_DEAD clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(percpu_count, cpu) = 0;

smp_store_release(&ref->percpu_count_ptr,
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);

0 comments on commit 27344a9

Please sign in to comment.