Skip to content

Commit

Permalink
percpu_counter: make percpu_counters_lock irq-safe
Browse files Browse the repository at this point in the history
percpu_counter is scheduled to grow @gfp support to allow atomic
initialization.  This patch makes percpu_counters_lock irq-safe so
that it can be safely used from atomic contexts.

Signed-off-by: Tejun Heo <tj@kernel.org>
  • Loading branch information
Tejun Heo committed Sep 8, 2014
1 parent 1a4d760 commit ebd8fef
Showing 1 changed file with 10 additions and 6 deletions.
16 changes: 10 additions & 6 deletions lib/percpu_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,8 @@ EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
unsigned long flags __maybe_unused;

raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
Expand All @@ -126,25 +128,27 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,

#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
spin_lock(&percpu_counters_lock);
spin_lock_irqsave(&percpu_counters_lock, flags);
list_add(&fbc->list, &percpu_counters);
spin_unlock(&percpu_counters_lock);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
EXPORT_SYMBOL(__percpu_counter_init);

void percpu_counter_destroy(struct percpu_counter *fbc)
{
unsigned long flags __maybe_unused;

if (!fbc->counters)
return;

debug_percpu_counter_deactivate(fbc);

#ifdef CONFIG_HOTPLUG_CPU
spin_lock(&percpu_counters_lock);
spin_lock_irqsave(&percpu_counters_lock, flags);
list_del(&fbc->list);
spin_unlock(&percpu_counters_lock);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
Expand Down Expand Up @@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;

cpu = (unsigned long)hcpu;
spin_lock(&percpu_counters_lock);
spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
Expand All @@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
spin_unlock(&percpu_counters_lock);
spin_unlock_irq(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}
Expand Down

0 comments on commit ebd8fef

Please sign in to comment.