Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 10171
b: refs/heads/master
c: eed75f1
h: refs/heads/master
i:
  10169: ae8596d
  10167: 08ad6aa
v: v3
  • Loading branch information
Harald Welte authored and Arnaldo Carvalho de Melo committed Oct 26, 2005
1 parent 351e4e3 commit b13e4d0
Show file tree
Hide file tree
Showing 2 changed files with 96 additions and 38 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d50a6b56f0f239cf061630c85add121dc3555339
refs/heads/master: eed75f191d8318a2b144da8aae9774e1cfcae492
132 changes: 95 additions & 37 deletions trunk/net/ipv4/netfilter/ip_conntrack_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
#include <linux/netfilter_ipv4/ip_conntrack_core.h>
#include <linux/netfilter_ipv4/listhelp.h>

#define IP_CONNTRACK_VERSION "2.3"
#define IP_CONNTRACK_VERSION "2.4"

#if 0
#define DEBUGP printk
Expand Down Expand Up @@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
static int ip_conntrack_hash_rnd_initted;
static unsigned int ip_conntrack_hash_rnd;

static u_int32_t
hash_conntrack(const struct ip_conntrack_tuple *tuple)
static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
unsigned int size, unsigned int rnd)
{
#if 0
dump_tuple(tuple);
#endif
return (jhash_3words(tuple->src.ip,
(tuple->dst.ip ^ tuple->dst.protonum),
(tuple->src.u.all | (tuple->dst.u.all << 16)),
ip_conntrack_hash_rnd) % ip_conntrack_htable_size);
rnd) % size);
}

static u_int32_t
hash_conntrack(const struct ip_conntrack_tuple *tuple)
{
return __hash_conntrack(tuple, ip_conntrack_htable_size,
ip_conntrack_hash_rnd);
}

int
Expand Down Expand Up @@ -1341,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data)
return 1;
}

static void free_conntrack_hash(void)
static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
{
if (ip_conntrack_vmalloc)
vfree(ip_conntrack_hash);
if (vmalloced)
vfree(hash);
else
free_pages((unsigned long)ip_conntrack_hash,
get_order(sizeof(struct list_head)
* ip_conntrack_htable_size));
free_pages((unsigned long)hash,
get_order(sizeof(struct list_head) * size));
}

void ip_conntrack_flush()
Expand Down Expand Up @@ -1378,12 +1381,83 @@ void ip_conntrack_cleanup(void)
ip_conntrack_flush();
kmem_cache_destroy(ip_conntrack_cachep);
kmem_cache_destroy(ip_conntrack_expect_cachep);
free_conntrack_hash();
free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
ip_conntrack_htable_size);
nf_unregister_sockopt(&so_getorigdst);
}

static int hashsize;
module_param(hashsize, int, 0400);
static struct list_head *alloc_hashtable(int size, int *vmalloced)
{
struct list_head *hash;
unsigned int i;

*vmalloced = 0;
hash = (void*)__get_free_pages(GFP_KERNEL,
get_order(sizeof(struct list_head)
* size));
if (!hash) {
*vmalloced = 1;
printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
hash = vmalloc(sizeof(struct list_head) * size);
}

if (hash)
for (i = 0; i < size; i++)
INIT_LIST_HEAD(&hash[i]);

return hash;
}

int set_hashsize(const char *val, struct kernel_param *kp)
{
int i, bucket, hashsize, vmalloced;
int old_vmalloced, old_size;
int rnd;
struct list_head *hash, *old_hash;
struct ip_conntrack_tuple_hash *h;

/* On boot, we can set this without any fancy locking. */
if (!ip_conntrack_htable_size)
return param_set_int(val, kp);

hashsize = simple_strtol(val, NULL, 0);
if (!hashsize)
return -EINVAL;

hash = alloc_hashtable(hashsize, &vmalloced);
if (!hash)
return -ENOMEM;

/* We have to rehash for the new table anyway, so we also can
* use a new random seed */
get_random_bytes(&rnd, 4);

write_lock_bh(&ip_conntrack_lock);
for (i = 0; i < ip_conntrack_htable_size; i++) {
while (!list_empty(&ip_conntrack_hash[i])) {
h = list_entry(ip_conntrack_hash[i].next,
struct ip_conntrack_tuple_hash, list);
list_del(&h->list);
bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
list_add_tail(&h->list, &hash[bucket]);
}
}
old_size = ip_conntrack_htable_size;
old_vmalloced = ip_conntrack_vmalloc;
old_hash = ip_conntrack_hash;

ip_conntrack_htable_size = hashsize;
ip_conntrack_vmalloc = vmalloced;
ip_conntrack_hash = hash;
ip_conntrack_hash_rnd = rnd;
write_unlock_bh(&ip_conntrack_lock);

free_conntrack_hash(old_hash, old_vmalloced, old_size);
return 0;
}

module_param_call(hashsize, set_hashsize, param_get_uint,
&ip_conntrack_htable_size, 0600);

int __init ip_conntrack_init(void)
{
Expand All @@ -1392,9 +1466,7 @@ int __init ip_conntrack_init(void)

/* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
* machine has 256 buckets. >= 1GB machines have 8192 buckets. */
if (hashsize) {
ip_conntrack_htable_size = hashsize;
} else {
if (!ip_conntrack_htable_size) {
ip_conntrack_htable_size
= (((num_physpages << PAGE_SHIFT) / 16384)
/ sizeof(struct list_head));
Expand All @@ -1416,20 +1488,8 @@ int __init ip_conntrack_init(void)
return ret;
}

/* AK: the hash table is twice as big than needed because it
uses list_head. it would be much nicer to caches to use a
single pointer list head here. */
ip_conntrack_vmalloc = 0;
ip_conntrack_hash
=(void*)__get_free_pages(GFP_KERNEL,
get_order(sizeof(struct list_head)
*ip_conntrack_htable_size));
if (!ip_conntrack_hash) {
ip_conntrack_vmalloc = 1;
printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n");
ip_conntrack_hash = vmalloc(sizeof(struct list_head)
* ip_conntrack_htable_size);
}
ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size,
&ip_conntrack_vmalloc);
if (!ip_conntrack_hash) {
printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
goto err_unreg_sockopt;
Expand Down Expand Up @@ -1461,9 +1521,6 @@ int __init ip_conntrack_init(void)
ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp;
write_unlock_bh(&ip_conntrack_lock);

for (i = 0; i < ip_conntrack_htable_size; i++)
INIT_LIST_HEAD(&ip_conntrack_hash[i]);

/* For use by ipt_REJECT */
ip_ct_attach = ip_conntrack_attach;

Expand All @@ -1478,7 +1535,8 @@ int __init ip_conntrack_init(void)
err_free_conntrack_slab:
kmem_cache_destroy(ip_conntrack_cachep);
err_free_hash:
free_conntrack_hash();
free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
ip_conntrack_htable_size);
err_unreg_sockopt:
nf_unregister_sockopt(&so_getorigdst);

Expand Down

0 comments on commit b13e4d0

Please sign in to comment.