Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 2431
b: refs/heads/master
c: 7049e68
h: refs/heads/master
i:
  2429: 274515d
  2427: 3295baf
  2423: e41bcfa
  2415: 48e4026
  2399: c171c0a
  2367: a37675d
  2303: 859ef28
v: v3
  • Loading branch information
David S. Miller committed Jun 21, 2005
1 parent 6271979 commit 5844e2d
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8005aba69a6440a535a4cc2aed99ffca580847e0
refs/heads/master: 7049e6800f40046c384c522a990669024d5f5836
34 changes: 34 additions & 0 deletions trunk/include/asm-sparc64/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,40 @@ extern unsigned long get_wchan(struct task_struct *task);

#define cpu_relax() barrier()

/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
* a shallower prefetch queue than later chips.
*/
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH

static inline void prefetch(const void *x)
{
/* We do not use the read prefetch mnemonic because that
* prefetches into the prefetch-cache which only is accessible
* by floating point operations in UltraSPARC-III and later.
* By contrast, "#one_write" prefetches into the L2 cache
* in shared state.
*/
__asm__ __volatile__("prefetch [%0], #one_write"
: /* no outputs */
: "r" (x));
}

static inline void prefetchw(const void *x)
{
/* The most optimal prefetch to use for writes is
* "#n_writes". This brings the cacheline into the
* L2 cache in "owned" state.
*/
__asm__ __volatile__("prefetch [%0], #n_writes"
: /* no outputs */
: "r" (x));
}

#define spin_lock_prefetch(x) prefetchw(x)

#endif /* !(__ASSEMBLY__) */

#endif /* !(__ASM_SPARC64_PROCESSOR_H) */

0 comments on commit 5844e2d

Please sign in to comment.