Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 202963
b: refs/heads/master
c: 5933dd2
h: refs/heads/master
i:
  202961: 00c160b
  202959: a86db5e
v: v3
  • Loading branch information
Eric Dumazet authored and David S. Miller committed Jun 16, 2010
1 parent 79e2fb9 commit c5a4ae2
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 10 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a95d8c88bea0c93505e1d143d075f112be2b25e3
refs/heads/master: 5933dd2f028cdcbb4b3169dca594324704ba10ae
3 changes: 0 additions & 3 deletions trunk/arch/microblaze/include/asm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,7 @@ extern struct dentry *of_debugfs_root;
* MicroBlaze doesn't handle unaligned accesses in hardware.
*
* Based on this we force the IP header alignment in network drivers.
* We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
* cacheline alignment of buffers.
*/
#define NET_IP_ALIGN 2
#define NET_SKB_PAD L1_CACHE_BYTES

#endif /* _ASM_MICROBLAZE_SYSTEM_H */
3 changes: 0 additions & 3 deletions trunk/arch/powerpc/include/asm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -515,11 +515,8 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
* We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
* cacheline alignment of buffers.
*/
#define NET_IP_ALIGN 0
#define NET_SKB_PAD L1_CACHE_BYTES

#define cmpxchg64(ptr, o, n) \
({ \
Expand Down
8 changes: 5 additions & 3 deletions trunk/include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -1414,12 +1414,14 @@ static inline int skb_network_offset(const struct sk_buff *skb)
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
* With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
* a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
*
* Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
* to reduce average number of cache lines per packet.
* get_rps_cpus() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
#define NET_SKB_PAD 64
#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
#endif

extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
Expand Down

0 comments on commit c5a4ae2

Please sign in to comment.