Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 6269
b: refs/heads/master
c: 6e04e02
h: refs/heads/master
i:
  6267: 27a681f
v: v3
  • Loading branch information
Arnaldo Carvalho de Melo authored and David S. Miller committed Aug 29, 2005
1 parent e575805 commit 1d33ae5
Show file tree
Hide file tree
Showing 9 changed files with 119 additions and 129 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2d8c4ce51903636ce0f60addc8134aa50ab8fa76
refs/heads/master: 6e04e02165a7209a71db553b7bc48d68421e5ebf
1 change: 1 addition & 0 deletions trunk/include/net/inet_hashtables.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ struct inet_hashinfo {
wait_queue_head_t lhash_wait;
spinlock_t portalloc_lock;
kmem_cache_t *bind_bucket_cachep;
int port_rover;
};

static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ struct sock_common {
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_hashent: hash entry in several tables (e.g. tcp_ehash)
* @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used
Expand Down
26 changes: 7 additions & 19 deletions trunk/include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,19 +41,7 @@
#endif
#include <linux/seq_file.h>

extern struct inet_hashinfo tcp_hashinfo;
#define tcp_ehash (tcp_hashinfo.ehash)
#define tcp_bhash (tcp_hashinfo.bhash)
#define tcp_ehash_size (tcp_hashinfo.ehash_size)
#define tcp_bhash_size (tcp_hashinfo.bhash_size)
#define tcp_listening_hash (tcp_hashinfo.listening_hash)
#define tcp_lhash_lock (tcp_hashinfo.lhash_lock)
#define tcp_lhash_users (tcp_hashinfo.lhash_users)
#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)
#define tcp_bucket_cachep (tcp_hashinfo.bind_bucket_cachep)

extern int tcp_port_rover;
extern struct inet_hashinfo tcp_hashinfo;

#if (BITS_PER_LONG == 64)
#define TCP_ADDRCMP_ALIGN_BYTES 8
Expand Down Expand Up @@ -1463,21 +1451,21 @@ extern void tcp_listen_wlock(void);

/* - We may sleep inside this lock.
* - If sleeping is not required (or called from BH),
* use plain read_(un)lock(&tcp_lhash_lock).
* use plain read_(un)lock(&inet_hashinfo.lhash_lock).
*/

static inline void tcp_listen_lock(void)
{
/* read_lock synchronizes to candidates to writers */
read_lock(&tcp_lhash_lock);
atomic_inc(&tcp_lhash_users);
read_unlock(&tcp_lhash_lock);
read_lock(&tcp_hashinfo.lhash_lock);
atomic_inc(&tcp_hashinfo.lhash_users);
read_unlock(&tcp_hashinfo.lhash_lock);
}

static inline void tcp_listen_unlock(void)
{
if (atomic_dec_and_test(&tcp_lhash_users))
wake_up(&tcp_lhash_wait);
if (atomic_dec_and_test(&tcp_hashinfo.lhash_users))
wake_up(&tcp_hashinfo.lhash_wait);
}

static inline int keepalive_intvl_when(const struct tcp_sock *tp)
Expand Down
42 changes: 21 additions & 21 deletions trunk/net/ipv4/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -2257,11 +2257,11 @@ void __init tcp_init(void)
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
sizeof(skb->cb));

tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!tcp_bucket_cachep)
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!tcp_hashinfo.bind_bucket_cachep)
panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");

tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
Expand All @@ -2276,45 +2276,45 @@ void __init tcp_init(void)
*
* The methodology is similar to that of the buffer cache.
*/
tcp_ehash =
tcp_hashinfo.ehash =
alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket),
thash_entries,
(num_physpages >= 128 * 1024) ?
(25 - PAGE_SHIFT) :
(27 - PAGE_SHIFT),
HASH_HIGHMEM,
&tcp_ehash_size,
&tcp_hashinfo.ehash_size,
NULL,
0);
tcp_ehash_size = (1 << tcp_ehash_size) >> 1;
for (i = 0; i < (tcp_ehash_size << 1); i++) {
rwlock_init(&tcp_ehash[i].lock);
INIT_HLIST_HEAD(&tcp_ehash[i].chain);
tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
rwlock_init(&tcp_hashinfo.ehash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
}

tcp_bhash =
tcp_hashinfo.bhash =
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
tcp_ehash_size,
tcp_hashinfo.ehash_size,
(num_physpages >= 128 * 1024) ?
(25 - PAGE_SHIFT) :
(27 - PAGE_SHIFT),
HASH_HIGHMEM,
&tcp_bhash_size,
&tcp_hashinfo.bhash_size,
NULL,
64 * 1024);
tcp_bhash_size = 1 << tcp_bhash_size;
for (i = 0; i < tcp_bhash_size; i++) {
spin_lock_init(&tcp_bhash[i].lock);
INIT_HLIST_HEAD(&tcp_bhash[i].chain);
tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
spin_lock_init(&tcp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}

/* Try to be a bit smarter and adjust defaults depending
* on available memory.
*/
for (order = 0; ((1 << order) << PAGE_SHIFT) <
(tcp_bhash_size * sizeof(struct inet_bind_hashbucket));
(tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
order++)
;
if (order >= 4) {
Expand All @@ -2329,7 +2329,7 @@ void __init tcp_init(void)
sysctl_tcp_max_orphans >>= (3 - order);
sysctl_max_syn_backlog = 128;
}
tcp_port_rover = sysctl_local_port_range[0] - 1;
tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;

sysctl_tcp_mem[0] = 768 << order;
sysctl_tcp_mem[1] = 1024 << order;
Expand All @@ -2344,7 +2344,7 @@ void __init tcp_init(void)

printk(KERN_INFO "TCP: Hash tables configured "
"(established %d bind %d)\n",
tcp_ehash_size << 1, tcp_bhash_size);
tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);

tcp_register_congestion_control(&tcp_reno);
}
Expand Down
8 changes: 4 additions & 4 deletions trunk/net/ipv4/tcp_diag.c
Original file line number Diff line number Diff line change
Expand Up @@ -595,7 +595,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_node *node;

num = 0;
sk_for_each(sk, node, &tcp_listening_hash[i]) {
sk_for_each(sk, node, &tcp_hashinfo.listening_hash[i]) {
struct inet_sock *inet = inet_sk(sk);

if (num < s_num) {
Expand Down Expand Up @@ -645,8 +645,8 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV)))
return skb->len;

for (i = s_i; i < tcp_ehash_size; i++) {
struct inet_ehash_bucket *head = &tcp_ehash[i];
for (i = s_i; i < tcp_hashinfo.ehash_size; i++) {
struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[i];
struct sock *sk;
struct hlist_node *node;

Expand Down Expand Up @@ -678,7 +678,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)

if (r->tcpdiag_states&TCPF_TIME_WAIT) {
sk_for_each(sk, node,
&tcp_ehash[i + tcp_ehash_size].chain) {
&tcp_hashinfo.ehash[i + tcp_hashinfo.ehash_size].chain) {
struct inet_sock *inet = inet_sk(sk);

if (num < s_num)
Expand Down
Loading

0 comments on commit 1d33ae5

Please sign in to comment.