Skip to content

Commit

Permalink
riscv/mmiowb: Hook up mmwiob() implementation to asm-generic code
Browse files Browse the repository at this point in the history
In a bid to kill off explicit mmiowb() usage in driver code, hook up
the asm-generic mmiowb() tracking code for riscv, so that an mmiowb()
is automatically issued from spin_unlock() if an I/O write was performed
in the critical section.

Reviewed-by: Palmer Dabbelt <palmer@sifive.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
  • Loading branch information
Will Deacon committed Apr 8, 2019
1 parent 420af15 commit b012980
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 14 deletions.
1 change: 1 addition & 0 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ config RISCV
select RISCV_TIMER
select GENERIC_IRQ_MULTI_HANDLER
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MMIOWB
select HAVE_EBPF_JIT if 64BIT

config MMU
Expand Down
1 change: 0 additions & 1 deletion arch/riscv/include/asm/Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += mutex.h
generic-y += percpu.h
generic-y += preempt.h
Expand Down
15 changes: 2 additions & 13 deletions arch/riscv/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#define _ASM_RISCV_IO_H

#include <linux/types.h>
#include <asm/mmiowb.h>

extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);

Expand Down Expand Up @@ -99,18 +100,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
#endif

/*
* FIXME: I'm flip-flopping on whether or not we should keep this or enforce
* the ordering with I/O on spinlocks like PowerPC does. The worry is that
* drivers won't get this correct, but I also don't want to introduce a fence
* into the lock code that otherwise only uses AMOs (and is essentially defined
* by the ISA to be correct). For now I'm leaving this here: "o,w" is
* sufficient to ensure that all writes to the device have completed before the
* write to the spinlock is allowed to commit. I surmised this from reading
* "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
*/
#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");

/*
* Unordered I/O memory access primitives. These are even more relaxed than
* the relaxed versions, as they don't even order accesses between successive
Expand Down Expand Up @@ -165,7 +154,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
#define __io_aw() do {} while (0)
#define __io_aw() mmiowb_set_pending()

#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
Expand Down
14 changes: 14 additions & 0 deletions arch/riscv/include/asm/mmiowb.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_RISCV_MMIOWB_H
#define _ASM_RISCV_MMIOWB_H

/*
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");

#include <asm-generic/mmiowb.h>

#endif /* ASM_RISCV_MMIOWB_H */

0 comments on commit b012980

Please sign in to comment.