diff --git a/[refs] b/[refs] index eeea9a0d9168..39c65355d695 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3192b920bf7d0c528ab54e7d3689f44055316a37 +refs/heads/master: 90810645f78f894acfb04b3768e8a7d45f2b303a diff --git a/trunk/include/linux/slab.h b/trunk/include/linux/slab.h index 646a639a4aae..573c809c33d9 100644 --- a/trunk/include/linux/slab.h +++ b/trunk/include/linux/slab.h @@ -133,12 +133,22 @@ unsigned int kmem_cache_size(struct kmem_cache *); #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) +/* + * Some archs want to perform DMA into kmalloc caches and need a guaranteed + * alignment larger than the alignment of a 64-bit integer. + * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. + */ #ifdef ARCH_DMA_MINALIGN #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN #else #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif +/* + * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. + * Intended for arches that get misalignment faults even for 64 bit integer + * aligned buffers. + */ #ifndef ARCH_SLAB_MINALIGN #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif