Skip to content

Commit

Permalink
flow: virtualize flow cache entry methods
Browse files Browse the repository at this point in the history
This allows to validate the cached object before returning it.
It also allows to destruct object properly, if the last reference
was held in flow cache. This is also a prepartion for caching
bundles in the flow cache.

In return for virtualizing the methods, we save on:
- not having to regenerate the whole flow cache on policy removal:
  each flow matching a killed policy gets refreshed as the getter
  function notices it smartly.
- we do not have to call flow_cache_flush from policy gc, since the
  flow cache now properly deletes the object if it had any references

Signed-off-by: Timo Teras <timo.teras@iki.fi>
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Timo Teräs authored and David S. Miller committed Apr 7, 2010
1 parent 8020eb8 commit fe1a5f0
Show file tree
Hide file tree
Showing 4 changed files with 163 additions and 102 deletions.
23 changes: 19 additions & 4 deletions include/net/flow.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,26 @@ struct flowi {

struct net;
struct sock;
typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
u8 dir, void **objp, atomic_t **obj_refp);
struct flow_cache_ops;

struct flow_cache_object {
const struct flow_cache_ops *ops;
};

struct flow_cache_ops {
struct flow_cache_object *(*get)(struct flow_cache_object *);
int (*check)(struct flow_cache_object *);
void (*delete)(struct flow_cache_object *);
};

typedef struct flow_cache_object *(*flow_resolve_t)(
struct net *net, struct flowi *key, u16 family,
u8 dir, struct flow_cache_object *oldobj, void *ctx);

extern struct flow_cache_object *flow_cache_lookup(
struct net *net, struct flowi *key, u16 family,
u8 dir, flow_resolve_t resolver, void *ctx);

extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
u8 dir, flow_resolve_t resolver);
extern void flow_cache_flush(void);
extern atomic_t flow_cache_genid;

Expand Down
2 changes: 2 additions & 0 deletions include/net/xfrm.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/flow.h>

#include <linux/interrupt.h>

Expand Down Expand Up @@ -481,6 +482,7 @@ struct xfrm_policy {
atomic_t refcnt;
struct timer_list timer;

struct flow_cache_object flo;
u32 priority;
u32 index;
struct xfrm_mark mark;
Expand Down
128 changes: 68 additions & 60 deletions net/core/flow.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,33 +26,32 @@
#include <linux/security.h>

struct flow_cache_entry {
struct flow_cache_entry *next;
u16 family;
u8 dir;
u32 genid;
struct flowi key;
void *object;
atomic_t *object_ref;
struct flow_cache_entry *next;
u16 family;
u8 dir;
u32 genid;
struct flowi key;
struct flow_cache_object *object;
};

struct flow_cache_percpu {
struct flow_cache_entry ** hash_table;
struct flow_cache_entry **hash_table;
int hash_count;
u32 hash_rnd;
int hash_rnd_recalc;
struct tasklet_struct flush_tasklet;
};

struct flow_flush_info {
struct flow_cache * cache;
struct flow_cache *cache;
atomic_t cpuleft;
struct completion completion;
};

struct flow_cache {
u32 hash_shift;
unsigned long order;
struct flow_cache_percpu * percpu;
struct flow_cache_percpu *percpu;
struct notifier_block hotcpu_notifier;
int low_watermark;
int high_watermark;
Expand All @@ -78,12 +77,21 @@ static void flow_cache_new_hashrnd(unsigned long arg)
add_timer(&fc->rnd_timer);
}

static int flow_entry_valid(struct flow_cache_entry *fle)
{
if (atomic_read(&flow_cache_genid) != fle->genid)
return 0;
if (fle->object && !fle->object->ops->check(fle->object))
return 0;
return 1;
}

static void flow_entry_kill(struct flow_cache *fc,
struct flow_cache_percpu *fcp,
struct flow_cache_entry *fle)
{
if (fle->object)
atomic_dec(fle->object_ref);
fle->object->ops->delete(fle->object);
kmem_cache_free(flow_cachep, fle);
fcp->hash_count--;
}
Expand All @@ -96,16 +104,18 @@ static void __flow_cache_shrink(struct flow_cache *fc,
int i;

for (i = 0; i < flow_cache_hash_size(fc); i++) {
int k = 0;
int saved = 0;

flp = &fcp->hash_table[i];
while ((fle = *flp) != NULL && k < shrink_to) {
k++;
flp = &fle->next;
}
while ((fle = *flp) != NULL) {
*flp = fle->next;
flow_entry_kill(fc, fcp, fle);
if (saved < shrink_to &&
flow_entry_valid(fle)) {
saved++;
flp = &fle->next;
} else {
*flp = fle->next;
flow_entry_kill(fc, fcp, fle);
}
}
}
}
Expand Down Expand Up @@ -166,46 +176,39 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
return 0;
}

void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
flow_resolve_t resolver)
struct flow_cache_object *
flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
flow_resolve_t resolver, void *ctx)
{
struct flow_cache *fc = &flow_cache_global;
struct flow_cache_percpu *fcp;
struct flow_cache_entry *fle, **head;
struct flow_cache_object *flo;
unsigned int hash;

local_bh_disable();
fcp = per_cpu_ptr(fc->percpu, smp_processor_id());

fle = NULL;
flo = NULL;
/* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */
if (!fcp->hash_table)
goto nocache;

if (fcp->hash_rnd_recalc)
flow_new_hash_rnd(fc, fcp);
hash = flow_hash_code(fc, fcp, key);

hash = flow_hash_code(fc, fcp, key);
head = &fcp->hash_table[hash];
for (fle = *head; fle; fle = fle->next) {
if (fle->family == family &&
fle->dir == dir &&
flow_key_compare(key, &fle->key) == 0) {
if (fle->genid == atomic_read(&flow_cache_genid)) {
void *ret = fle->object;

if (ret)
atomic_inc(fle->object_ref);
local_bh_enable();

return ret;
}
flow_key_compare(key, &fle->key) == 0)
break;
}
}

if (!fle) {
if (unlikely(!fle)) {
if (fcp->hash_count > fc->high_watermark)
flow_cache_shrink(fc, fcp);

Expand All @@ -219,33 +222,39 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
fle->object = NULL;
fcp->hash_count++;
}
} else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
flo = fle->object;
if (!flo)
goto ret_object;
flo = flo->ops->get(flo);
if (flo)
goto ret_object;
} else if (fle->object) {
flo = fle->object;
flo->ops->delete(flo);
fle->object = NULL;
}

nocache:
{
int err;
void *obj;
atomic_t *obj_ref;

err = resolver(net, key, family, dir, &obj, &obj_ref);

if (fle && !err) {
fle->genid = atomic_read(&flow_cache_genid);

if (fle->object)
atomic_dec(fle->object_ref);

fle->object = obj;
fle->object_ref = obj_ref;
if (obj)
atomic_inc(fle->object_ref);
}
local_bh_enable();

if (err)
obj = ERR_PTR(err);
return obj;
flo = NULL;
if (fle) {
flo = fle->object;
fle->object = NULL;
}
flo = resolver(net, key, family, dir, flo, ctx);
if (fle) {
fle->genid = atomic_read(&flow_cache_genid);
if (!IS_ERR(flo))
fle->object = flo;
else
fle->genid--;
} else {
if (flo && !IS_ERR(flo))
flo->ops->delete(flo);
}
ret_object:
local_bh_enable();
return flo;
}

static void flow_cache_flush_tasklet(unsigned long data)
Expand All @@ -261,13 +270,12 @@ static void flow_cache_flush_tasklet(unsigned long data)

fle = fcp->hash_table[i];
for (; fle; fle = fle->next) {
unsigned genid = atomic_read(&flow_cache_genid);

if (!fle->object || fle->genid == genid)
if (flow_entry_valid(fle))
continue;

if (fle->object)
fle->object->ops->delete(fle->object);
fle->object = NULL;
atomic_dec(fle->object_ref);
}
}

Expand Down
Loading

0 comments on commit fe1a5f0

Please sign in to comment.