From 0a7144df235cc160c78b7273704f219a282f6452 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 29 Sep 2007 15:28:48 +0200 Subject: [PATCH] --- yaml --- r: 65220 b: refs/heads/master c: 4827bbb06e4b59922c2b9bfb13ad1bf936bdebe5 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/include/asm-i386/system.h | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/[refs] b/[refs] index a742a27a8764..9d7ee3ddf2bf 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 1bef7dc00caa7bcbff4fdb55e599e2591461fafa +refs/heads/master: 4827bbb06e4b59922c2b9bfb13ad1bf936bdebe5 diff --git a/trunk/include/asm-i386/system.h b/trunk/include/asm-i386/system.h index 609756c61676..d69ba937e092 100644 --- a/trunk/include/asm-i386/system.h +++ b/trunk/include/asm-i386/system.h @@ -214,11 +214,6 @@ static inline unsigned long get_limit(unsigned long segment) */ -/* - * Actually only lfence would be needed for mb() because all stores done - * by the kernel should be already ordered. But keep a full barrier for now. - */ - #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)