From b42d98969f0fa4b16a502517a18e545b7ba59e4f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 16 Nov 2010 05:56:04 +0000 Subject: [PATCH] --- yaml --- r: 224353 b: refs/heads/master c: b178bb3dfc30d9555bdd2401e95af98e23e83e10 h: refs/heads/master i: 224351: 2d7464d94b8e605a99ce0798c40e7d758b371566 v: v3 --- [refs] | 2 +- trunk/include/net/sock.h | 55 ++++++++++++++++++++++------------------ 2 files changed, 32 insertions(+), 25 deletions(-) diff --git a/[refs] b/[refs] index 3e59db2b8413..a6d2d8767355 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c31504dc0d1dc853dcee509d9999169a9097a717 +refs/heads/master: b178bb3dfc30d9555bdd2401e95af98e23e83e10 diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index eb0c1f504678..5557dfb3dd68 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -241,59 +241,67 @@ struct sock { #define sk_bind_node __sk_common.skc_bind_node #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net - kmemcheck_bitfield_begin(flags); - unsigned int sk_shutdown : 2, - sk_no_check : 2, - sk_userlocks : 4, - sk_protocol : 8, - sk_type : 16; - kmemcheck_bitfield_end(flags); - int sk_rcvbuf; socket_lock_t sk_lock; + struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with * the per-socket spinlock held and requires low latency * access. Therefore we special case it's implementation. + * Note : rmem_alloc is in this structure to fill a hole + * on 64bit arches, not because its logically part of + * backlog. */ struct { - struct sk_buff *head; - struct sk_buff *tail; - int len; + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; } sk_backlog; +#define sk_rmem_alloc sk_backlog.rmem_alloc + int sk_forward_alloc; +#ifdef CONFIG_RPS + __u32 sk_rxhash; +#endif + atomic_t sk_drops; + int sk_rcvbuf; + + struct sk_filter __rcu *sk_filter; struct socket_wq *sk_wq; - struct dst_entry *sk_dst_cache; + +#ifdef CONFIG_NET_DMA + struct sk_buff_head sk_async_wait_queue; +#endif + #ifdef CONFIG_XFRM struct xfrm_policy *sk_policy[2]; #endif + unsigned long sk_flags; + struct dst_entry *sk_dst_cache; spinlock_t sk_dst_lock; - atomic_t sk_rmem_alloc; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; - struct sk_buff_head sk_receive_queue; struct sk_buff_head sk_write_queue; -#ifdef CONFIG_NET_DMA - struct sk_buff_head sk_async_wait_queue; -#endif + kmemcheck_bitfield_begin(flags); + unsigned int sk_shutdown : 2, + sk_no_check : 2, + sk_userlocks : 4, + sk_protocol : 8, + sk_type : 16; + kmemcheck_bitfield_end(flags); int sk_wmem_queued; - int sk_forward_alloc; gfp_t sk_allocation; int sk_route_caps; int sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; int sk_rcvlowat; -#ifdef CONFIG_RPS - __u32 sk_rxhash; -#endif - unsigned long sk_flags; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; struct proto *sk_prot_creator; rwlock_t sk_callback_lock; int sk_err, sk_err_soft; - atomic_t sk_drops; unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; @@ -301,7 +309,6 @@ struct sock { const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; - struct sk_filter __rcu *sk_filter; void *sk_protinfo; struct timer_list sk_timer; ktime_t sk_stamp;