From 57b31eb44594bc66d39c4ae6035a4d18980a329c Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Mon, 11 Jun 2007 17:17:14 +0200 Subject: [PATCH] --- yaml --- r: 57779 b: refs/heads/master c: 093d0faf57e59feee224217273f944e10e4e3562 h: refs/heads/master i: 57777: 5506e955b9d3c06b956dfcb8eb45c8e44b00c0d6 57775: cc763bcbb0bcc75b5926a30d2135be47e63e6895 v: v3 --- [refs] | 2 +- trunk/include/asm-avr32/cache.h | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 24ed1b2b1bea..f112e1a889c8 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2fdfe8d9a2687718b07a35196b89fbf48ba0c82f +refs/heads/master: 093d0faf57e59feee224217273f944e10e4e3562 diff --git a/trunk/include/asm-avr32/cache.h b/trunk/include/asm-avr32/cache.h index dabb955f3c00..d3cf35ab11ab 100644 --- a/trunk/include/asm-avr32/cache.h +++ b/trunk/include/asm-avr32/cache.h @@ -4,6 +4,15 @@ #define L1_CACHE_SHIFT 5 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +/* + * Memory returned by kmalloc() may be used for DMA, so we must make + * sure that all such allocations are cache aligned. Otherwise, + * unrelated code may cause parts of the buffer to be read into the + * cache before the transfer is done, causing old data to be seen by + * the CPU. + */ +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + #ifndef __ASSEMBLER__ struct cache_info { unsigned int ways;