From 5844e2dbe244e7ab2844ae25d7ae613f0f2ed7e4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Jun 2005 16:20:28 -0700 Subject: [PATCH] --- yaml --- r: 2431 b: refs/heads/master c: 7049e6800f40046c384c522a990669024d5f5836 h: refs/heads/master i: 2429: 274515ddcafb42cf89d810f10302d26ce8009de0 2427: 3295bafea69d05562652f6c9b104c72c76726bb5 2423: e41bcfae21e4f15d9b0d24f9587b4a951ebe4d09 2415: 48e402606b73bf0704701c6b9f4e7c46a7500fa2 2399: c171c0a74e08be8755d81701113d54d74d7ecb62 2367: a37675db23c939bcfb2393058ee06a1352a4f62f 2303: 859ef282f4a2b82eecb5f0e8813c1e0d48e6448d v: v3 --- [refs] | 2 +- trunk/include/asm-sparc64/processor.h | 34 +++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 255c936b0fd8..e57c450445c6 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 8005aba69a6440a535a4cc2aed99ffca580847e0 +refs/heads/master: 7049e6800f40046c384c522a990669024d5f5836 diff --git a/trunk/include/asm-sparc64/processor.h b/trunk/include/asm-sparc64/processor.h index bc1445b904ef..d0bee2413560 100644 --- a/trunk/include/asm-sparc64/processor.h +++ b/trunk/include/asm-sparc64/processor.h @@ -192,6 +192,40 @@ extern unsigned long get_wchan(struct task_struct *task); #define cpu_relax() barrier() +/* Prefetch support. This is tuned for UltraSPARC-III and later. + * UltraSPARC-I will treat these as nops, and UltraSPARC-II has + * a shallower prefetch queue than later chips. + */ +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +static inline void prefetch(const void *x) +{ + /* We do not use the read prefetch mnemonic because that + * prefetches into the prefetch-cache which only is accessible + * by floating point operations in UltraSPARC-III and later. + * By contrast, "#one_write" prefetches into the L2 cache + * in shared state. + */ + __asm__ __volatile__("prefetch [%0], #one_write" + : /* no outputs */ + : "r" (x)); +} + +static inline void prefetchw(const void *x) +{ + /* The most optimal prefetch to use for writes is + * "#n_writes". This brings the cacheline into the + * L2 cache in "owned" state. + */ + __asm__ __volatile__("prefetch [%0], #n_writes" + : /* no outputs */ + : "r" (x)); +} + +#define spin_lock_prefetch(x) prefetchw(x) + #endif /* !(__ASSEMBLY__) */ #endif /* !(__ASM_SPARC64_PROCESSOR_H) */