Skip to content

Commit

Permalink
sh: Use L1_CACHE_BYTES for .data.cacheline_aligned.
Browse files Browse the repository at this point in the history
Previously this was using a hardcoded 32, use L1_CACHE_BYTES for
cacheline alignment instead.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
  • Loading branch information
Paul Mundt committed Mar 5, 2007
1 parent 5c36e65 commit 87e29ca
Showing 2 changed files with 4 additions and 2 deletions.
3 changes: 2 additions & 1 deletion arch/sh/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
@@ -3,6 +3,7 @@
* Written by Niibe Yutaka
*/
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm-generic/vmlinux.lds.h>

#ifdef CONFIG_CPU_LITTLE_ENDIAN
@@ -53,7 +54,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.data.page_aligned : { *(.data.page_aligned) }

. = ALIGN(32);
. = ALIGN(L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
3 changes: 2 additions & 1 deletion include/asm-sh/cache.h
Original file line number Diff line number Diff line change
@@ -21,6 +21,7 @@

#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))

#ifndef __ASSEMBLY__
struct cache_info {
unsigned int ways; /* Number of cache ways */
unsigned int sets; /* Number of cache sets */
@@ -47,6 +48,6 @@ struct cache_info {

unsigned long flags;
};

#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */

0 comments on commit 87e29ca

Please sign in to comment.