Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 224353
b: refs/heads/master
c: b178bb3
h: refs/heads/master
i:
  224351: 2d7464d
v: v3
  • Loading branch information
Eric Dumazet authored and David S. Miller committed Nov 16, 2010
1 parent 91e8b70 commit b42d989
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 25 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c31504dc0d1dc853dcee509d9999169a9097a717
refs/heads/master: b178bb3dfc30d9555bdd2401e95af98e23e83e10
55 changes: 31 additions & 24 deletions trunk/include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,67 +241,74 @@ struct sock {
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
kmemcheck_bitfield_begin(flags);
unsigned int sk_shutdown : 2,
sk_no_check : 2,
sk_userlocks : 4,
sk_protocol : 8,
sk_type : 16;
kmemcheck_bitfield_end(flags);
int sk_rcvbuf;
socket_lock_t sk_lock;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
* the per-socket spinlock held and requires low latency
* access. Therefore we special case it's implementation.
* Note : rmem_alloc is in this structure to fill a hole
* on 64bit arches, not because its logically part of
* backlog.
*/
struct {
struct sk_buff *head;
struct sk_buff *tail;
int len;
atomic_t rmem_alloc;
int len;
struct sk_buff *head;
struct sk_buff *tail;
} sk_backlog;
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
atomic_t sk_drops;
int sk_rcvbuf;

struct sk_filter __rcu *sk_filter;
struct socket_wq *sk_wq;
struct dst_entry *sk_dst_cache;

#ifdef CONFIG_NET_DMA
struct sk_buff_head sk_async_wait_queue;
#endif

#ifdef CONFIG_XFRM
struct xfrm_policy *sk_policy[2];
#endif
unsigned long sk_flags;
struct dst_entry *sk_dst_cache;
spinlock_t sk_dst_lock;
atomic_t sk_rmem_alloc;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_sndbuf;
struct sk_buff_head sk_receive_queue;
struct sk_buff_head sk_write_queue;
#ifdef CONFIG_NET_DMA
struct sk_buff_head sk_async_wait_queue;
#endif
kmemcheck_bitfield_begin(flags);
unsigned int sk_shutdown : 2,
sk_no_check : 2,
sk_userlocks : 4,
sk_protocol : 8,
sk_type : 16;
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
int sk_forward_alloc;
gfp_t sk_allocation;
int sk_route_caps;
int sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
int sk_rcvlowat;
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
unsigned long sk_flags;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
atomic_t sk_drops;
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
struct sk_filter __rcu *sk_filter;
void *sk_protinfo;
struct timer_list sk_timer;
ktime_t sk_stamp;
Expand Down

0 comments on commit b42d989

Please sign in to comment.