Skip to content

Commit

Permalink
sh: move xchg_cmpxchg to a header by itself
Browse files Browse the repository at this point in the history
Looks like future sh variants will support a 4-byte cas which will be
used to implement 1 and 2 byte xchg.

This is exactly what we do for llsc now, move the portable part of the
code into a separate header so it's easy to reuse.

Suggested-by:  Rich Felker <dalias@libc.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
  • Loading branch information
Michael S. Tsirkin committed Jan 12, 2016
1 parent 3226aad commit 9e3f84c
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 34 deletions.
35 changes: 1 addition & 34 deletions arch/sh/include/asm/cmpxchg-llsc.h
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
#ifndef __ASM_SH_CMPXCHG_LLSC_H
#define __ASM_SH_CMPXCHG_LLSC_H

#include <linux/bitops.h>
#include <asm/byteorder.h>

static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long retval;
Expand Down Expand Up @@ -50,36 +47,6 @@ __cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new)
return retval;
}

static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
{
int off = (unsigned long)ptr % sizeof(u32);
volatile u32 *p = ptr - off;
#ifdef __BIG_ENDIAN
int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
#else
int bitoff = off * BITS_PER_BYTE;
#endif
u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
u32 oldv, newv;
u32 ret;

do {
oldv = READ_ONCE(*p);
ret = (oldv & bitmask) >> bitoff;
newv = (oldv & ~bitmask) | (x << bitoff);
} while (__cmpxchg_u32(p, oldv, newv) != oldv);

return ret;
}

static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
{
return __xchg_cmpxchg(m, val, sizeof *m);
}

static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
return __xchg_cmpxchg(m, val, sizeof *m);
}
#include <asm/cmpxchg-xchg.h>

#endif /* __ASM_SH_CMPXCHG_LLSC_H */
51 changes: 51 additions & 0 deletions arch/sh/include/asm/cmpxchg-xchg.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#ifndef __ASM_SH_CMPXCHG_XCHG_H
#define __ASM_SH_CMPXCHG_XCHG_H

/*
* Copyright (C) 2016 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See the
* file "COPYING" in the main directory of this archive for more details.
*/
#include <linux/bitops.h>
#include <asm/byteorder.h>

/*
* Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg.
* Note: this header isn't self-contained: before including it, __cmpxchg_u32
* must be defined first.
*/
static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
{
int off = (unsigned long)ptr % sizeof(u32);
volatile u32 *p = ptr - off;
#ifdef __BIG_ENDIAN
int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
#else
int bitoff = off * BITS_PER_BYTE;
#endif
u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
u32 oldv, newv;
u32 ret;

do {
oldv = READ_ONCE(*p);
ret = (oldv & bitmask) >> bitoff;
newv = (oldv & ~bitmask) | (x << bitoff);
} while (__cmpxchg_u32(p, oldv, newv) != oldv);

return ret;
}

static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
{
return __xchg_cmpxchg(m, val, sizeof *m);
}

static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
return __xchg_cmpxchg(m, val, sizeof *m);
}

#endif /* __ASM_SH_CMPXCHG_XCHG_H */

0 comments on commit 9e3f84c

Please sign in to comment.