Skip to content

Commit

Permalink
Merge branch 'core/urgent' into sched/core
Browse files Browse the repository at this point in the history
Merge in asm goto fix, to be able to apply the asm/rmwcc.h fix.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Ingo Molnar committed Oct 11, 2013
2 parents 3354781 + 3f0116c commit ec0ad3d
Showing 72 changed files with 456 additions and 202 deletions.
9 changes: 7 additions & 2 deletions arch/arm/Makefile
Original file line number Diff line number Diff line change
@@ -296,10 +296,15 @@ archprepare:
# Convert bzImage to zImage
bzImage: zImage

zImage Image xipImage bootpImage uImage: vmlinux
BOOT_TARGETS = zImage Image xipImage bootpImage uImage
INSTALL_TARGETS = zinstall uinstall install

PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)

$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@

zinstall uinstall install: vmlinux
$(INSTALL_TARGETS):
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@

%.dtb: | scripts
16 changes: 8 additions & 8 deletions arch/arm/boot/Makefile
Original file line number Diff line number Diff line change
@@ -95,24 +95,24 @@ initrd:
@test "$(INITRD)" != "" || \
(echo You must specify INITRD; exit -1)

install: $(obj)/Image
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
install:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/Image System.map "$(INSTALL_PATH)"

zinstall: $(obj)/zImage
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
zinstall:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/zImage System.map "$(INSTALL_PATH)"

uinstall: $(obj)/uImage
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
uinstall:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/uImage System.map "$(INSTALL_PATH)"

zi:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/zImage System.map "$(INSTALL_PATH)"

i:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
$(obj)/Image System.map "$(INSTALL_PATH)"

subdir- := bootp compressed dts
14 changes: 14 additions & 0 deletions arch/arm/boot/install.sh
Original file line number Diff line number Diff line change
@@ -20,6 +20,20 @@
# $4 - default install path (blank if root directory)
#

verify () {
if [ ! -f "$1" ]; then
echo "" 1>&2
echo " *** Missing file: $1" 1>&2
echo ' *** You need to run "make" before "make install".' 1>&2
echo "" 1>&2
exit 1
fi
}

# Make sure the files actually exist
verify "$2"
verify "$3"

# User may have a custom install script
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
2 changes: 1 addition & 1 deletion arch/arm/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -16,7 +16,7 @@

static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\n\t"
asm_volatile_goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
2 changes: 1 addition & 1 deletion arch/mips/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -22,7 +22,7 @@

static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\tnop\n\t"
asm_volatile_goto("1:\tnop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -19,7 +19,7 @@

static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\n\t"
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
5 changes: 3 additions & 2 deletions arch/powerpc/kernel/irq.c
Original file line number Diff line number Diff line change
@@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
struct thread_info *curtp, *irqtp;
struct thread_info *curtp, *irqtp, *sirqtp;

/* Switch to the irq stack to handle this */
curtp = current_thread_info();
irqtp = hardirq_ctx[raw_smp_processor_id()];
sirqtp = softirq_ctx[raw_smp_processor_id()];

/* Already there ? */
if (unlikely(curtp == irqtp)) {
if (unlikely(curtp == irqtp || curtp == sirqtp)) {
__do_irq(regs);
set_irq_regs(old_regs);
return;
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s_hv_rmhandlers.S
Original file line number Diff line number Diff line change
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
std r8, VCPU_DSCR(r7)
std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)

18 changes: 17 additions & 1 deletion arch/powerpc/kvm/e500_mmu_host.c
Original file line number Diff line number Diff line change
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
unsigned long hva;
int pfnmap = 0;
int tsize = BOOK3E_PAGESZ_4K;
int ret = 0;
unsigned long mmu_seq;
struct kvm *kvm = vcpu_e500->vcpu.kvm;

/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();

/*
* Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}

spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq)) {
ret = -EAGAIN;
goto out;
}

kvmppc_e500_ref_setup(ref, gtlbe, pfn);

kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/* Clear i-cache for new pages */
kvmppc_mmu_flush_icache(pfn);

out:
spin_unlock(&kvm->mmu_lock);

/* Drop refcount on page, so that mmu notifiers can clear it */
kvm_release_pfn_clean(pfn);

return 0;
return ret;
}

/* XXX only map the one-one case, for now use TLB0 */
2 changes: 1 addition & 1 deletion arch/s390/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -15,7 +15,7 @@

static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("0: brcl 0,0\n"
asm_volatile_goto("0: brcl 0,0\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
42 changes: 20 additions & 22 deletions arch/s390/kernel/crash_dump.c
Original file line number Diff line number Diff line change
@@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr)
}

/*
* Copy up to one page to vmalloc or real memory
* Copy real to virtual or real memory
*/
static ssize_t copy_page_real(void *buf, void *src, size_t csize)
static int copy_from_realmem(void *dest, void *src, size_t count)
{
size_t size;
unsigned long size;
int rc;

if (is_vmalloc_addr(buf)) {
BUG_ON(csize >= PAGE_SIZE);
/* If buf is not page aligned, copy first part */
size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
if (size) {
if (memcpy_real(load_real_addr(buf), src, size))
return -EFAULT;
buf += size;
src += size;
}
/* Copy second part */
size = csize - size;
return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
} else {
return memcpy_real(buf, src, csize);
}
if (!count)
return 0;
if (!is_vmalloc_or_module_addr(dest))
return memcpy_real(dest, src, count);
do {
size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
if (memcpy_real(load_real_addr(dest), src, size))
return -EFAULT;
count -= size;
dest += size;
src += size;
} while (count);
return 0;
}

/*
@@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
rc = copy_to_user_real((void __force __user *) buf,
(void *) src, csize);
else
rc = copy_page_real(buf, (void *) src, csize);
rc = copy_from_realmem(buf, (void *) src, csize);
return (rc == 0) ? rc : csize;
}

@@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
if (OLDMEM_BASE) {
if ((unsigned long) src < OLDMEM_SIZE) {
copied = min(count, OLDMEM_SIZE - (unsigned long) src);
rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
if (rc)
return rc;
}
@@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
return rc;
}
}
return memcpy_real(dest + copied, src + copied, count - copied);
return copy_from_realmem(dest + copied, src + copied, count - copied);
}

/*
1 change: 1 addition & 0 deletions arch/s390/kernel/entry.S
Original file line number Diff line number Diff line change
@@ -266,6 +266,7 @@ sysc_sigpending:
tm __TI_flags+3(%r12),_TIF_SYSCALL
jno sysc_return
lm %r2,%r7,__PT_R2(%r11) # load svc arguments
l %r10,__TI_sysc_table(%r12) # 31 bit system call table
xr %r8,%r8 # svc 0 returns -ENOSYS
clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
jnl sysc_nr_ok # invalid svc number -> do svc 0
1 change: 1 addition & 0 deletions arch/s390/kernel/entry64.S
Original file line number Diff line number Diff line change
@@ -297,6 +297,7 @@ sysc_sigpending:
tm __TI_flags+7(%r12),_TIF_SYSCALL
jno sysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
6 changes: 5 additions & 1 deletion arch/s390/kernel/kprobes.c
Original file line number Diff line number Diff line change
@@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
case 0xac: /* stnsm */
case 0xad: /* stosm */
return -EINVAL;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x00: /* exrl */
return -EINVAL;
}
}
switch (insn[0]) {
case 0x0101: /* pr */
@@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
break;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x00: /* exrl */
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
2 changes: 1 addition & 1 deletion arch/sparc/include/asm/jump_label.h
Original file line number Diff line number Diff line change
@@ -9,7 +9,7 @@

static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\n\t"
asm_volatile_goto("1:\n\t"
"nop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
5 changes: 3 additions & 2 deletions arch/tile/include/asm/atomic.h
Original file line number Diff line number Diff line change
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
*
* Atomically sets @v to @i and returns old @v
*/
static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
static inline long long atomic64_xchg(atomic64_t *v, long long n)
{
return xchg64(&v->counter, n);
}
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
* Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v.
*/
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
long long n)
{
return cmpxchg64(&v->counter, o, n);
}
27 changes: 15 additions & 12 deletions arch/tile/include/asm/atomic_32.h
Original file line number Diff line number Diff line change
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
/* A 64bit atomic type */

typedef struct {
u64 __aligned(8) counter;
long long counter;
} atomic64_t;

#define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
*
* Atomically reads the value of @v.
*/
static inline u64 atomic64_read(const atomic64_t *v)
static inline long long atomic64_read(const atomic64_t *v)
{
/*
* Requires an atomic op to read both 32-bit parts consistently.
* Casting away const is safe since the atomic support routines
* do not write to memory if the value has not been modified.
*/
return _atomic64_xchg_add((u64 *)&v->counter, 0);
return _atomic64_xchg_add((long long *)&v->counter, 0);
}

/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
*
* Atomically adds @i to @v.
*/
static inline void atomic64_add(u64 i, atomic64_t *v)
static inline void atomic64_add(long long i, atomic64_t *v)
{
_atomic64_xchg_add(&v->counter, i);
}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
static inline long long atomic64_add_unless(atomic64_t *v, long long a,
long long u)
{
smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
* atomic64_set() can't be just a raw store, since it would be lost if it
* fell between the load and store of one of the other atomic ops.
*/
static inline void atomic64_set(atomic64_t *v, u64 n)
static inline void atomic64_set(atomic64_t *v, long long n)
{
_atomic64_xchg(&v->counter, n);
}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
int *lock, u64 o, u64 n);
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
long long o, long long n);
extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
long long n);
extern long long __atomic64_xchg_add_unless(volatile long long *p,
int *lock, long long o, long long n);

/* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr);
Loading

0 comments on commit ec0ad3d

Please sign in to comment.