Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 18983
b: refs/heads/master
c: 715b49e
h: refs/heads/master
i:
  18981: 7dbd104
  18979: eb02421
  18975: 569a8e4
v: v3
  • Loading branch information
Alan Cox authored and Linus Torvalds committed Jan 19, 2006
1 parent 053f19f commit 27349d2
Show file tree
Hide file tree
Showing 8 changed files with 30 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3213e913b0d6baeb28aa1affbdd4bfa7efedc35f
refs/heads/master: 715b49ef2de6fcead0776d9349071670282faf65
1 change: 1 addition & 0 deletions trunk/drivers/md/kcopyd.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
* completion notification.
*/

#include <asm/types.h>
#include <asm/atomic.h>

#include <linux/blkdev.h>
Expand Down
1 change: 1 addition & 0 deletions trunk/fs/nfsctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
*
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sunrpc/svc.h>
Expand Down
12 changes: 12 additions & 0 deletions trunk/include/asm-i386/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,5 +255,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()

/* ECC atomic, DMA, SMP and interrupt safe scrub function */

static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size)
{
u32 i;
for (i = 0; i < size / 4; i++, virt_addr++)
/* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*/
__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
}

#include <asm-generic/atomic.h>
#endif
12 changes: 12 additions & 0 deletions trunk/include/asm-x86_64/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -426,5 +426,17 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()

/* ECC atomic, DMA, SMP and interrupt safe scrub function */

static __inline__ void atomic_scrub(u32 *virt_addr, u32 size)
{
u32 i;
for (i = 0; i < size / 4; i++, virt_addr++)
/* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*/
__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
}

#include <asm-generic/atomic.h>
#endif
2 changes: 1 addition & 1 deletion trunk/kernel/audit.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@
*/

#include <linux/init.h>
#include <asm/atomic.h>
#include <asm/types.h>
#include <asm/atomic.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/err.h>
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/auditsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
*/

#include <linux/init.h>
#include <asm/atomic.h>
#include <asm/types.h>
#include <asm/atomic.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mount.h>
Expand Down
2 changes: 1 addition & 1 deletion trunk/net/ipv4/raw.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@
*/

#include <linux/config.h>
#include <linux/types.h>
#include <asm/atomic.h>
#include <asm/byteorder.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/errno.h>
Expand Down

0 comments on commit 27349d2

Please sign in to comment.