Skip to content

Commit

Permalink
dynticks: avoid flow_cache_flush() interrupting every core
Browse files Browse the repository at this point in the history
Previously, if you did an "ifconfig down" or similar on one core, and
the kernel had CONFIG_XFRM enabled, every core would be interrupted to
check its percpu flow list for items that could be garbage collected.

With this change, we generate a mask of cores that actually have any
percpu items, and only interrupt those cores.  When we are trying to
isolate a set of cpus from interrupts, this is important to do.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Chris Metcalf authored and David S. Miller committed Mar 20, 2013
1 parent 7fa6f34 commit 8fdc929
Showing 1 changed file with 39 additions and 3 deletions.
42 changes: 39 additions & 3 deletions net/core/flow.c
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,24 @@ static void flow_cache_flush_tasklet(unsigned long data)
complete(&info->completion);
}

/*
* Return whether a cpu needs flushing. Conservatively, we assume
* the presence of any entries means the core may require flushing,
* since the flow_cache_ops.check() function may assume it's running
* on the same core as the per-cpu cache component.
*/
static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
{
struct flow_cache_percpu *fcp;
int i;

fcp = &per_cpu(*fc->percpu, cpu);
for (i = 0; i < flow_cache_hash_size(fc); i++)
if (!hlist_empty(&fcp->hash_table[i]))
return 0;
return 1;
}

static void flow_cache_flush_per_cpu(void *data)
{
struct flow_flush_info *info = data;
Expand All @@ -337,22 +355,40 @@ void flow_cache_flush(void)
{
struct flow_flush_info info;
static DEFINE_MUTEX(flow_flush_sem);
cpumask_var_t mask;
int i, self;

/* Track which cpus need flushing to avoid disturbing all cores. */
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return;
cpumask_clear(mask);

/* Don't want cpus going down or up during this. */
get_online_cpus();
mutex_lock(&flow_flush_sem);
info.cache = &flow_cache_global;
atomic_set(&info.cpuleft, num_online_cpus());
for_each_online_cpu(i)
if (!flow_cache_percpu_empty(info.cache, i))
cpumask_set_cpu(i, mask);
atomic_set(&info.cpuleft, cpumask_weight(mask));
if (atomic_read(&info.cpuleft) == 0)
goto done;

init_completion(&info.completion);

local_bh_disable();
smp_call_function(flow_cache_flush_per_cpu, &info, 0);
flow_cache_flush_tasklet((unsigned long)&info);
self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
if (self)
flow_cache_flush_tasklet((unsigned long)&info);
local_bh_enable();

wait_for_completion(&info.completion);

done:
mutex_unlock(&flow_flush_sem);
put_online_cpus();
free_cpumask_var(mask);
}

static void flow_cache_flush_task(struct work_struct *work)
Expand Down

0 comments on commit 8fdc929

Please sign in to comment.