Skip to content

Commit

Permalink
netfilter: conntrack: move generation seqcnt out of netns_ct
Browse files Browse the repository at this point in the history
We only allow rehash in init namespace, so we only use
init_ns.generation.  And even if we would allow it, it makes no sense
as the conntrack locks are global; any ongoing rehash prevents insert/
delete.

So make this private to nf_conntrack_core instead.

Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
  • Loading branch information
Florian Westphal authored and Pablo Neira Ayuso committed Apr 25, 2016
1 parent 15cfd40 commit a3efd81
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 10 deletions.
1 change: 0 additions & 1 deletion include/net/netns/conntrack.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ struct netns_ct {
int sysctl_checksum;

unsigned int htable_size;
seqcount_t generation;
struct kmem_cache *nf_conntrack_cachep;
struct hlist_nulls_head *hash;
struct hlist_head *expect_hash;
Expand Down
20 changes: 11 additions & 9 deletions net/netfilter/nf_conntrack_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);

static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
static __read_mostly seqcount_t nf_conntrack_generation;
static __read_mostly bool nf_conntrack_locks_all;

void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
Expand Down Expand Up @@ -107,7 +108,7 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
spin_lock_nested(&nf_conntrack_locks[h1],
SINGLE_DEPTH_NESTING);
}
if (read_seqcount_retry(&net->ct.generation, sequence)) {
if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
nf_conntrack_double_unlock(h1, h2);
return true;
}
Expand Down Expand Up @@ -393,7 +394,7 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)

local_bh_disable();
do {
sequence = read_seqcount_begin(&net->ct.generation);
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
reply_hash = hash_conntrack(net,
Expand Down Expand Up @@ -560,7 +561,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)

local_bh_disable();
do {
sequence = read_seqcount_begin(&net->ct.generation);
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
reply_hash = hash_conntrack(net,
Expand Down Expand Up @@ -628,7 +629,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
local_bh_disable();

do {
sequence = read_seqcount_begin(&net->ct.generation);
sequence = read_seqcount_begin(&nf_conntrack_generation);
/* reuse the hash saved before */
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
hash = hash_bucket(hash, net);
Expand Down Expand Up @@ -771,12 +772,12 @@ static noinline int early_drop(struct net *net, unsigned int _hash)

local_bh_disable();
restart:
sequence = read_seqcount_begin(&net->ct.generation);
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_bucket(_hash, net);
for (; i < net->ct.htable_size; i++) {
lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
nf_conntrack_lock(lockp);
if (read_seqcount_retry(&net->ct.generation, sequence)) {
if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
spin_unlock(lockp);
goto restart;
}
Expand Down Expand Up @@ -1607,7 +1608,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)

local_bh_disable();
nf_conntrack_all_lock();
write_seqcount_begin(&init_net.ct.generation);
write_seqcount_begin(&nf_conntrack_generation);

/* Lookups in the old hash might happen in parallel, which means we
* might get false negatives during connection lookup. New connections
Expand All @@ -1631,7 +1632,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
init_net.ct.hash = hash;

write_seqcount_end(&init_net.ct.generation);
write_seqcount_end(&nf_conntrack_generation);
nf_conntrack_all_unlock();
local_bh_enable();

Expand All @@ -1657,6 +1658,8 @@ int nf_conntrack_init_start(void)
int max_factor = 8;
int i, ret, cpu;

seqcount_init(&nf_conntrack_generation);

for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_lock_init(&nf_conntrack_locks[i]);

Expand Down Expand Up @@ -1783,7 +1786,6 @@ int nf_conntrack_init_net(struct net *net)
int cpu;

atomic_set(&net->ct.count, 0);
seqcount_init(&net->ct.generation);

net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
if (!net->ct.pcpu_lists)
Expand Down

0 comments on commit a3efd81

Please sign in to comment.