Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 368338
b: refs/heads/master
c: 8fdc929
h: refs/heads/master
v: v3
  • Loading branch information
Chris Metcalf authored and David S. Miller committed Mar 20, 2013
1 parent d621e76 commit f2a47e7
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7fa6f34081f168975af72be51715bdc6601931f7
refs/heads/master: 8fdc929f5727d999d11ba3763b92f6eeacc096f9
42 changes: 39 additions & 3 deletions trunk/net/core/flow.c
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,24 @@ static void flow_cache_flush_tasklet(unsigned long data)
complete(&info->completion);
}

/*
* Return whether a cpu needs flushing. Conservatively, we assume
* the presence of any entries means the core may require flushing,
* since the flow_cache_ops.check() function may assume it's running
* on the same core as the per-cpu cache component.
*/
static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
{
struct flow_cache_percpu *fcp;
int i;

fcp = &per_cpu(*fc->percpu, cpu);
for (i = 0; i < flow_cache_hash_size(fc); i++)
if (!hlist_empty(&fcp->hash_table[i]))
return 0;
return 1;
}

static void flow_cache_flush_per_cpu(void *data)
{
struct flow_flush_info *info = data;
Expand All @@ -337,22 +355,40 @@ void flow_cache_flush(void)
{
struct flow_flush_info info;
static DEFINE_MUTEX(flow_flush_sem);
cpumask_var_t mask;
int i, self;

/* Track which cpus need flushing to avoid disturbing all cores. */
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return;
cpumask_clear(mask);

/* Don't want cpus going down or up during this. */
get_online_cpus();
mutex_lock(&flow_flush_sem);
info.cache = &flow_cache_global;
atomic_set(&info.cpuleft, num_online_cpus());
for_each_online_cpu(i)
if (!flow_cache_percpu_empty(info.cache, i))
cpumask_set_cpu(i, mask);
atomic_set(&info.cpuleft, cpumask_weight(mask));
if (atomic_read(&info.cpuleft) == 0)
goto done;

init_completion(&info.completion);

local_bh_disable();
smp_call_function(flow_cache_flush_per_cpu, &info, 0);
flow_cache_flush_tasklet((unsigned long)&info);
self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
if (self)
flow_cache_flush_tasklet((unsigned long)&info);
local_bh_enable();

wait_for_completion(&info.completion);

done:
mutex_unlock(&flow_flush_sem);
put_online_cpus();
free_cpumask_var(mask);
}

static void flow_cache_flush_task(struct work_struct *work)
Expand Down

0 comments on commit f2a47e7

Please sign in to comment.