Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 6257
b: refs/heads/master
c: a86888b
h: refs/heads/master
i:
  6255: 8e1748f
v: v3
  • Loading branch information
Patrick McHardy authored and David S. Miller committed Aug 29, 2005
1 parent 7023440 commit 53ce478
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 95 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a55ebcc4c4532107ad9eee1c9bb698ab5f12c00f
refs/heads/master: a86888b925299330053d20e0eba03ac4d2648c4b
29 changes: 13 additions & 16 deletions trunk/include/linux/netfilter_ipv4/ip_conntrack.h
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,7 @@ struct ip_conntrack_stat

#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
#include <linux/notifier.h>
#include <linux/interrupt.h>

struct ip_conntrack_ecache {
struct ip_conntrack *ct;
Expand Down Expand Up @@ -445,26 +446,24 @@ ip_conntrack_expect_unregister_notifier(struct notifier_block *nb)
return notifier_chain_unregister(&ip_conntrack_expect_chain, nb);
}

extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct);
extern void __ip_ct_event_cache_init(struct ip_conntrack *ct);

static inline void
ip_conntrack_event_cache(enum ip_conntrack_events event,
const struct sk_buff *skb)
{
struct ip_conntrack_ecache *ecache =
&__get_cpu_var(ip_conntrack_ecache);

if (unlikely((struct ip_conntrack *) skb->nfct != ecache->ct)) {
if (net_ratelimit()) {
printk(KERN_ERR "ctevent: skb->ct != ecache->ct !!!\n");
dump_stack();
}
}
struct ip_conntrack *ct = (struct ip_conntrack *)skb->nfct;
struct ip_conntrack_ecache *ecache;

local_bh_disable();
ecache = &__get_cpu_var(ip_conntrack_ecache);
if (ct != ecache->ct)
__ip_ct_event_cache_init(ct);
ecache->events |= event;
local_bh_enable();
}

extern void
ip_conntrack_deliver_cached_events_for(const struct ip_conntrack *ct);
extern void ip_conntrack_event_cache_init(const struct sk_buff *skb);

static inline void ip_conntrack_event(enum ip_conntrack_events event,
struct ip_conntrack *ct)
{
Expand All @@ -483,9 +482,7 @@ static inline void ip_conntrack_event_cache(enum ip_conntrack_events event,
const struct sk_buff *skb) {}
static inline void ip_conntrack_event(enum ip_conntrack_events event,
struct ip_conntrack *ct) {}
static inline void ip_conntrack_deliver_cached_events_for(
struct ip_conntrack *ct) {}
static inline void ip_conntrack_event_cache_init(const struct sk_buff *skb) {}
static inline void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) {}
static inline void
ip_conntrack_expect_event(enum ip_conntrack_expect_events event,
struct ip_conntrack_expect *exp) {}
Expand Down
14 changes: 5 additions & 9 deletions trunk/include/linux/netfilter_ipv4/ip_conntrack_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,14 @@ static inline int ip_conntrack_confirm(struct sk_buff **pskb)
struct ip_conntrack *ct = (struct ip_conntrack *)(*pskb)->nfct;
int ret = NF_ACCEPT;

if (ct && !is_confirmed(ct))
ret = __ip_conntrack_confirm(pskb);
ip_conntrack_deliver_cached_events_for(ct);

if (ct) {
if (!is_confirmed(ct))
ret = __ip_conntrack_confirm(pskb);
ip_ct_deliver_cached_events(ct);
}
return ret;
}

#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
struct ip_conntrack_ecache;
extern void __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ec);
#endif

extern void __ip_ct_expect_unlink_destroy(struct ip_conntrack_expect *exp);

extern struct list_head *ip_conntrack_hash;
Expand Down
105 changes: 38 additions & 67 deletions trunk/net/ipv4/netfilter/ip_conntrack_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,73 +85,62 @@ struct notifier_block *ip_conntrack_expect_chain;

DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);

static inline void __deliver_cached_events(struct ip_conntrack_ecache *ecache)
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
static inline void
__ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
{
DEBUGP("ecache: delivering events for %p\n", ecache->ct);
if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
notifier_call_chain(&ip_conntrack_chain, ecache->events,
ecache->ct);
ecache->events = 0;
}

void __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
{
__deliver_cached_events(ecache);
ip_conntrack_put(ecache->ct);
ecache->ct = NULL;
}

/* Deliver all cached events for a particular conntrack. This is called
* by code prior to async packet handling or freeing the skb */
void
ip_conntrack_deliver_cached_events_for(const struct ip_conntrack *ct)
void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
{
struct ip_conntrack_ecache *ecache =
&__get_cpu_var(ip_conntrack_ecache);

if (!ct)
return;
struct ip_conntrack_ecache *ecache;

local_bh_disable();
ecache = &__get_cpu_var(ip_conntrack_ecache);
if (ecache->ct == ct)
__ip_ct_deliver_cached_events(ecache);
local_bh_enable();
}

if (ecache->ct == ct) {
DEBUGP("ecache: delivering event for %p\n", ct);
__deliver_cached_events(ecache);
} else {
if (net_ratelimit())
printk(KERN_WARNING "ecache: want to deliver for %p, "
"but cache has %p\n", ct, ecache->ct);
}
void __ip_ct_event_cache_init(struct ip_conntrack *ct)
{
struct ip_conntrack_ecache *ecache;

/* signalize that events have already been delivered */
ecache->ct = NULL;
/* take care of delivering potentially old events */
ecache = &__get_cpu_var(ip_conntrack_ecache);
BUG_ON(ecache->ct == ct);
if (ecache->ct)
__ip_ct_deliver_cached_events(ecache);
/* initialize for this conntrack/packet */
ecache->ct = ct;
nf_conntrack_get(&ct->ct_general);
}

/* Deliver cached events for old pending events, if current conntrack != old */
void ip_conntrack_event_cache_init(const struct sk_buff *skb)
/* flush the event cache - touches other CPU's data and must not be called while
* packets are still passing through the code */
static void ip_ct_event_cache_flush(void)
{
struct ip_conntrack *ct = (struct ip_conntrack *) skb->nfct;
struct ip_conntrack_ecache *ecache =
&__get_cpu_var(ip_conntrack_ecache);
struct ip_conntrack_ecache *ecache;
int cpu;

/* take care of delivering potentially old events */
if (ecache->ct != ct) {
enum ip_conntrack_info ctinfo;
/* we have to check, since at startup the cache is NULL */
if (likely(ecache->ct)) {
DEBUGP("ecache: entered for different conntrack: "
"ecache->ct=%p, skb->nfct=%p. delivering "
"events\n", ecache->ct, ct);
__deliver_cached_events(ecache);
for_each_cpu(cpu) {
ecache = &per_cpu(ip_conntrack_ecache, cpu);
if (ecache->ct)
ip_conntrack_put(ecache->ct);
} else {
DEBUGP("ecache: entered for conntrack %p, "
"cache was clean before\n", ct);
}

/* initialize for this conntrack/packet */
ecache->ct = ip_conntrack_get(skb, &ctinfo);
/* ecache->events cleared by __deliver_cached_devents() */
} else {
DEBUGP("ecache: re-entered for conntrack %p.\n", ct);
}
}

#else
static inline void ip_ct_event_cache_flush(void) {}
#endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */

DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
Expand Down Expand Up @@ -878,8 +867,6 @@ unsigned int ip_conntrack_in(unsigned int hooknum,

IP_NF_ASSERT((*pskb)->nfct);

ip_conntrack_event_cache_init(*pskb);

ret = proto->packet(ct, *pskb, ctinfo);
if (ret < 0) {
/* Invalid: inverse of the return code tells
Expand Down Expand Up @@ -1278,23 +1265,6 @@ ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *), void *data)

ip_conntrack_put(ct);
}

#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
{
/* we need to deliver all cached events in order to drop
* the reference counts */
int cpu;
for_each_cpu(cpu) {
struct ip_conntrack_ecache *ecache =
&per_cpu(ip_conntrack_ecache, cpu);
if (ecache->ct) {
__ip_ct_deliver_cached_events(ecache);
ip_conntrack_put(ecache->ct);
ecache->ct = NULL;
}
}
}
#endif
}

/* Fast function for those who don't want to parse /proc (and I don't
Expand Down Expand Up @@ -1381,6 +1351,7 @@ void ip_conntrack_flush()
delete... */
synchronize_net();

ip_ct_event_cache_flush();
i_see_dead_people:
ip_ct_iterate_cleanup(kill_all, NULL);
if (atomic_read(&ip_conntrack_count) != 0) {
Expand Down
3 changes: 1 addition & 2 deletions trunk/net/ipv4/netfilter/ip_conntrack_standalone.c
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,6 @@ static unsigned int ip_confirm(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
ip_conntrack_event_cache_init(*pskb);
/* We've seen it coming out the other side: confirm it */
return ip_conntrack_confirm(pskb);
}
Expand All @@ -419,7 +418,6 @@ static unsigned int ip_conntrack_help(unsigned int hooknum,
ct = ip_conntrack_get(*pskb, &ctinfo);
if (ct && ct->helper) {
unsigned int ret;
ip_conntrack_event_cache_init(*pskb);
ret = ct->helper->help(pskb, ct, ctinfo);
if (ret != NF_ACCEPT)
return ret;
Expand Down Expand Up @@ -978,6 +976,7 @@ EXPORT_SYMBOL_GPL(ip_conntrack_chain);
EXPORT_SYMBOL_GPL(ip_conntrack_expect_chain);
EXPORT_SYMBOL_GPL(ip_conntrack_register_notifier);
EXPORT_SYMBOL_GPL(ip_conntrack_unregister_notifier);
EXPORT_SYMBOL_GPL(__ip_ct_event_cache_init);
EXPORT_PER_CPU_SYMBOL_GPL(ip_conntrack_ecache);
#endif
EXPORT_SYMBOL(ip_conntrack_protocol_register);
Expand Down

0 comments on commit 53ce478

Please sign in to comment.