From 48b7c04bcdf9664ae15dfbe06f0c58d620d062e6 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 10 Jun 2008 23:45:45 +0200 Subject: [PATCH] --- yaml --- r: 99399 b: refs/heads/master c: 0db125c467afcbcc229abb1a87bc36ef72777dc2 h: refs/heads/master i: 99397: 99655392162d1bcbb0f6ef3f43f6eeed0ed51a7f 99395: 996fb1a00a7ab8518e6a54185a0870533a116ffa 99391: bb77db2b90a020bce5746d2e4c6cefbc41327eec v: v3 --- [refs] | 2 +- trunk/include/asm-x86/seccomp_64.h | 1 + trunk/include/asm-x86/suspend_32.h | 5 +++++ trunk/include/asm-x86/xor_32.h | 5 +++++ trunk/include/asm-x86/xor_64.h | 5 +++++ 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 86e588524f9f..ee6980d4bfc5 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: e6e07d8a2d2989c1f42287131308aa2fde253631 +refs/heads/master: 0db125c467afcbcc229abb1a87bc36ef72777dc2 diff --git a/trunk/include/asm-x86/seccomp_64.h b/trunk/include/asm-x86/seccomp_64.h index 553af65a2287..76cfe69aa63c 100644 --- a/trunk/include/asm-x86/seccomp_64.h +++ b/trunk/include/asm-x86/seccomp_64.h @@ -1,4 +1,5 @@ #ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H #include diff --git a/trunk/include/asm-x86/suspend_32.h b/trunk/include/asm-x86/suspend_32.h index 24e1c080aa8a..8675c6782a7d 100644 --- a/trunk/include/asm-x86/suspend_32.h +++ b/trunk/include/asm-x86/suspend_32.h @@ -3,6 +3,9 @@ * Based on code * Copyright 2001 Patrick Mochel */ +#ifndef __ASM_X86_32_SUSPEND_H +#define __ASM_X86_32_SUSPEND_H + #include #include @@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point) /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); #endif + +#endif /* __ASM_X86_32_SUSPEND_H */ diff --git a/trunk/include/asm-x86/xor_32.h b/trunk/include/asm-x86/xor_32.h index 067b5c1835a3..921b45840449 100644 --- a/trunk/include/asm-x86/xor_32.h +++ b/trunk/include/asm-x86/xor_32.h @@ -1,3 +1,6 @@ +#ifndef ASM_X86__XOR_32_H +#define ASM_X86__XOR_32_H + /* * Optimized RAID-5 checksumming functions for MMX and SSE. * @@ -881,3 +884,5 @@ do { \ deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) + +#endif /* ASM_X86__XOR_32_H */ diff --git a/trunk/include/asm-x86/xor_64.h b/trunk/include/asm-x86/xor_64.h index 24957e39ac8a..2d3a18de295b 100644 --- a/trunk/include/asm-x86/xor_64.h +++ b/trunk/include/asm-x86/xor_64.h @@ -1,3 +1,6 @@ +#ifndef ASM_X86__XOR_64_H +#define ASM_X86__XOR_64_H + /* * Optimized RAID-5 checksumming functions for MMX and SSE. * @@ -354,3 +357,5 @@ do { \ We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) + +#endif /* ASM_X86__XOR_64_H */