Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 80044
b: refs/heads/master
c: ae2e15e
h: refs/heads/master
v: v3
  • Loading branch information
Glauber de Oliveira Costa authored and Ingo Molnar committed Jan 30, 2008
1 parent 89d20cc commit b7bc312
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 34 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1a53905adddf6cc6d795bd7e988c60a19773f72e
refs/heads/master: ae2e15eb3b6c2a011bee615470bf52d2beb99a4b
30 changes: 30 additions & 0 deletions trunk/include/asm-x86/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,36 @@ extern char ignore_fpu_irq;
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH

#ifdef CONFIG_X86_32
#define BASE_PREFETCH ASM_NOP4
#define ARCH_HAS_PREFETCH
#else
#define BASE_PREFETCH "prefetcht0 (%1)"
#endif

/* Prefetch instructions for Pentium III and AMD Athlon */
/* It's not worth to care about 3dnow! prefetches for the K6
because they are microcoded there and very slow.
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
static inline void prefetch(const void *x)
{
alternative_input(BASE_PREFETCH,
"prefetchnta (%1)",
X86_FEATURE_XMM,
"r" (x));
}

/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
static inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}

#define spin_lock_prefetch(x) prefetchw(x)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
Expand Down
25 changes: 0 additions & 25 deletions trunk/include/asm-x86/processor_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,29 +228,4 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);

#define ASM_NOP_MAX 8

/* Prefetch instructions for Pentium III and AMD Athlon */
/* It's not worth to care about 3dnow! prefetches for the K6
because they are microcoded there and very slow.
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
static inline void prefetch(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchnta (%1)",
X86_FEATURE_XMM,
"r" (x));
}

#define ARCH_HAS_PREFETCH

/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
static inline void prefetchw(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}

#endif /* __ASM_I386_PROCESSOR_H */
8 changes: 0 additions & 8 deletions trunk/include/asm-x86/processor_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,12 +124,4 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);

#define ASM_NOP_MAX 8

static inline void prefetchw(void *x)
{
alternative_input("prefetcht0 (%1)",
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}

#endif /* __ASM_X86_64_PROCESSOR_H */

0 comments on commit b7bc312

Please sign in to comment.