Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 2276
b: refs/heads/master
c: 36c5ed2
h: refs/heads/master
v: v3
  • Loading branch information
Russell King committed Jun 19, 2005
1 parent 51d79a3 commit eb00996
Show file tree
Hide file tree
Showing 64 changed files with 1,106 additions and 2,408 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7df551254add79a445d2e47e8f849cef8fee6e38
refs/heads/master: 36c5ed23b9f535d1c79986efb45f9c1f115e0997
2 changes: 1 addition & 1 deletion trunk/arch/arm/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ static void __init setup_processor(void)
* cpu_init dumps the cache information, initialises SMP specific
* information, and sets up the per-CPU stacks.
*/
void __init cpu_init(void)
void cpu_init(void)
{
unsigned int cpu = smp_processor_id();
struct stack *stk = &stacks[cpu];
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/arm/mach-pxa/pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,8 @@ static int pxa_pm_enter(suspend_state_t state)
/* *** go zzz *** */
pxa_cpu_pm_enter(state);

cpu_init();

/* after sleeping, validate the checksum */
checksum = 0;
for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++)
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/arm/mach-sa1100/pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ static int sa11x0_pm_enter(suspend_state_t state)
/* go zzz */
sa1100_cpu_suspend();

cpu_init();

/*
* Ensure not to come back here if it wasn't intended
*/
Expand Down
1 change: 1 addition & 0 deletions trunk/include/asm-arm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);

extern int cpu_architecture(void);
extern void cpu_init(void);

#define set_cr(x) \
__asm__ __volatile__( \
Expand Down
21 changes: 0 additions & 21 deletions trunk/include/linux/ip.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/types.h>
#include <net/request_sock.h>
#include <net/sock.h>
#include <linux/igmp.h>
#include <net/flow.h>
Expand All @@ -108,26 +107,6 @@ struct ip_options {

#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)

struct inet_request_sock {
struct request_sock req;
u32 loc_addr;
u32 rmt_addr;
u16 rmt_port;
u16 snd_wscale : 4,
rcv_wscale : 4,
tstamp_ok : 1,
sack_ok : 1,
wscale_ok : 1,
ecn_ok : 1,
acked : 1;
struct ip_options *opt;
};

static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
{
return (struct inet_request_sock *)sk;
}

struct ipv6_pinfo;

struct inet_sock {
Expand Down
13 changes: 0 additions & 13 deletions trunk/include/linux/ipv6.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,19 +193,6 @@ struct inet6_skb_parm {

#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))

struct tcp6_request_sock {
struct tcp_request_sock req;
struct in6_addr loc_addr;
struct in6_addr rmt_addr;
struct sk_buff *pktopts;
int iif;
};

static inline struct tcp6_request_sock *tcp6_rsk(const struct request_sock *sk)
{
return (struct tcp6_request_sock *)sk;
}

/**
* struct ipv6_pinfo - ipv6 private area
*
Expand Down
24 changes: 4 additions & 20 deletions trunk/include/linux/netlink.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,39 +156,23 @@ struct netlink_notify
};

static __inline__ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
{
struct nlmsghdr *nlh;
int size = NLMSG_LENGTH(len);

nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
nlh->nlmsg_flags = 0;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
return nlh;
}

#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) \
goto nlmsg_failure; \
__nlmsg_put(skb, pid, seq, type, len, flags); })

#define NLMSG_PUT(skb, pid, seq, type, len) \
NLMSG_NEW(skb, pid, seq, type, len, 0)

#define NLMSG_NEW_ANSWER(skb, cb, type, len, flags) \
NLMSG_NEW(skb, NETLINK_CB((cb)->skb).pid, \
(cb)->nlh->nlmsg_seq, type, len, flags)

#define NLMSG_END(skb, nlh) \
({ (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \
(skb)->len; })

#define NLMSG_CANCEL(skb, nlh) \
({ skb_trim(skb, (unsigned char *) (nlh) - (skb)->data); \
-1; })
({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) goto nlmsg_failure; \
__nlmsg_put(skb, pid, seq, type, len); })

extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct nlmsghdr *nlh,
Expand Down
176 changes: 0 additions & 176 deletions trunk/include/linux/rtnetlink.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,6 @@ enum {
RTM_GETANYCAST = 62,
#define RTM_GETANYCAST RTM_GETANYCAST

RTM_NEWNEIGHTBL = 64,
#define RTM_NEWNEIGHTBL RTM_NEWNEIGHTBL
RTM_GETNEIGHTBL = 66,
#define RTM_GETNEIGHTBL RTM_GETNEIGHTBL
RTM_SETNEIGHTBL,
#define RTM_SETNEIGHTBL RTM_SETNEIGHTBL

__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
Expand Down Expand Up @@ -500,106 +493,6 @@ struct nda_cacheinfo
__u32 ndm_refcnt;
};


/*****************************************************************
* Neighbour tables specific messages.
*
* To retrieve the neighbour tables send RTM_GETNEIGHTBL with the
* NLM_F_DUMP flag set. Every neighbour table configuration is
* spread over multiple messages to avoid running into message
* size limits on systems with many interfaces. The first message
* in the sequence transports all not device specific data such as
* statistics, configuration, and the default parameter set.
* This message is followed by 0..n messages carrying device
* specific parameter sets.
* Although the ordering should be sufficient, NDTA_NAME can be
* used to identify sequences. The initial message can be identified
* by checking for NDTA_CONFIG. The device specific messages do
* not contain this TLV but have NDTPA_IFINDEX set to the
* corresponding interface index.
*
* To change neighbour table attributes, send RTM_SETNEIGHTBL
* with NDTA_NAME set. Changeable attribute include NDTA_THRESH[1-3],
* NDTA_GC_INTERVAL, and all TLVs in NDTA_PARMS unless marked
* otherwise. Device specific parameter sets can be changed by
* setting NDTPA_IFINDEX to the interface index of the corresponding
* device.
****/

struct ndt_stats
{
__u64 ndts_allocs;
__u64 ndts_destroys;
__u64 ndts_hash_grows;
__u64 ndts_res_failed;
__u64 ndts_lookups;
__u64 ndts_hits;
__u64 ndts_rcv_probes_mcast;
__u64 ndts_rcv_probes_ucast;
__u64 ndts_periodic_gc_runs;
__u64 ndts_forced_gc_runs;
};

enum {
NDTPA_UNSPEC,
NDTPA_IFINDEX, /* u32, unchangeable */
NDTPA_REFCNT, /* u32, read-only */
NDTPA_REACHABLE_TIME, /* u64, read-only, msecs */
NDTPA_BASE_REACHABLE_TIME, /* u64, msecs */
NDTPA_RETRANS_TIME, /* u64, msecs */
NDTPA_GC_STALETIME, /* u64, msecs */
NDTPA_DELAY_PROBE_TIME, /* u64, msecs */
NDTPA_QUEUE_LEN, /* u32 */
NDTPA_APP_PROBES, /* u32 */
NDTPA_UCAST_PROBES, /* u32 */
NDTPA_MCAST_PROBES, /* u32 */
NDTPA_ANYCAST_DELAY, /* u64, msecs */
NDTPA_PROXY_DELAY, /* u64, msecs */
NDTPA_PROXY_QLEN, /* u32 */
NDTPA_LOCKTIME, /* u64, msecs */
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)

struct ndtmsg
{
__u8 ndtm_family;
__u8 ndtm_pad1;
__u16 ndtm_pad2;
};

struct ndt_config
{
__u16 ndtc_key_len;
__u16 ndtc_entry_size;
__u32 ndtc_entries;
__u32 ndtc_last_flush; /* delta to now in msecs */
__u32 ndtc_last_rand; /* delta to now in msecs */
__u32 ndtc_hash_rnd;
__u32 ndtc_hash_mask;
__u32 ndtc_hash_chain_gc;
__u32 ndtc_proxy_qlen;
};

enum {
NDTA_UNSPEC,
NDTA_NAME, /* char *, unchangeable */
NDTA_THRESH1, /* u32 */
NDTA_THRESH2, /* u32 */
NDTA_THRESH3, /* u32 */
NDTA_CONFIG, /* struct ndt_config, read-only */
NDTA_PARMS, /* nested TLV NDTPA_* */
NDTA_STATS, /* struct ndt_stats, read-only */
NDTA_GC_INTERVAL, /* u64, msecs */
__NDTA_MAX
};
#define NDTA_MAX (__NDTA_MAX - 1)

#define NDTA_RTA(r) ((struct rtattr*)(((char*)(r)) + \
NLMSG_ALIGN(sizeof(struct ndtmsg))))
#define NDTA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndtmsg))


/****
* General form of address family dependent message.
****/
Expand Down Expand Up @@ -896,75 +789,6 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
goto rtattr_failure; \
memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); })

#define RTA_PUT_U8(skb, attrtype, value) \
({ u8 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })

#define RTA_PUT_U16(skb, attrtype, value) \
({ u16 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })

#define RTA_PUT_U32(skb, attrtype, value) \
({ u32 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })

#define RTA_PUT_U64(skb, attrtype, value) \
({ u64 _tmp = (value); \
RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })

#define RTA_PUT_SECS(skb, attrtype, value) \
RTA_PUT_U64(skb, attrtype, (value) / HZ)

#define RTA_PUT_MSECS(skb, attrtype, value) \
RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))

#define RTA_PUT_STRING(skb, attrtype, value) \
RTA_PUT(skb, attrtype, strlen(value) + 1, value)

#define RTA_PUT_FLAG(skb, attrtype) \
RTA_PUT(skb, attrtype, 0, NULL);

#define RTA_NEST(skb, type) \
({ struct rtattr *__start = (struct rtattr *) (skb)->tail; \
RTA_PUT(skb, type, 0, NULL); \
__start; })

#define RTA_NEST_END(skb, start) \
({ (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \
(skb)->len; })

#define RTA_NEST_CANCEL(skb, start) \
({ if (start) \
skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
-1; })

#define RTA_GET_U8(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
goto rtattr_failure; \
*(u8 *) RTA_DATA(rta); })

#define RTA_GET_U16(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
goto rtattr_failure; \
*(u16 *) RTA_DATA(rta); })

#define RTA_GET_U32(rta) \
({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
goto rtattr_failure; \
*(u32 *) RTA_DATA(rta); })

#define RTA_GET_U64(rta) \
({ u64 _tmp; \
if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
goto rtattr_failure; \
memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
_tmp; })

#define RTA_GET_FLAG(rta) (!!(rta))

#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))

static inline struct rtattr *
__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
Expand Down
1 change: 0 additions & 1 deletion trunk/include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ extern int kmem_cache_shrink(kmem_cache_t *);
extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *);
extern const char *kmem_cache_name(kmem_cache_t *);
extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);

/* Size description struct for general caches. */
Expand Down
28 changes: 16 additions & 12 deletions trunk/include/linux/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,17 +230,6 @@ struct tcp_options_received {
__u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
};

struct tcp_request_sock {
struct inet_request_sock req;
__u32 rcv_isn;
__u32 snt_isn;
};

static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
{
return (struct tcp_request_sock *)req;
}

struct tcp_sock {
/* inet_sock has to be the first member of tcp_sock */
struct inet_sock inet;
Expand Down Expand Up @@ -379,7 +368,22 @@ struct tcp_sock {

__u32 total_retrans; /* Total retransmits for entire connection */

struct request_sock_queue accept_queue; /* FIFO of established children */
/* The syn_wait_lock is necessary only to avoid proc interface having
* to grab the main lock sock while browsing the listening hash
* (otherwise it's deadlock prone).
* This lock is acquired in read mode only from listening_get_next()
* and it's acquired in write mode _only_ from code that is actively
* changing the syn_wait_queue. All readers that are holding
* the master sock lock don't need to grab this lock in read mode
* too as the syn_wait_queue writes are always protected from
* the main sock lock.
*/
rwlock_t syn_wait_lock;
struct tcp_listen_opt *listen_opt;

/* FIFO of established children */
struct open_request *accept_queue;
struct open_request *accept_queue_tail;

unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
Expand Down
Loading

0 comments on commit eb00996

Please sign in to comment.