Skip to content

Commit

Permalink
Merge branch 'linus' into sched/core, to pick up fixes
Browse files Browse the repository at this point in the history
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Ingo Molnar committed Jun 24, 2017
2 parents 8887cd9 + 94a6df2 commit 1bc3cd4
Show file tree
Hide file tree
Showing 119 changed files with 1,158 additions and 665 deletions.
6 changes: 3 additions & 3 deletions Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ Required properties:
Optional properties:

In order to use the GPIO lines in PWM mode, some additional optional
properties are required. Only Armada 370 and XP support these properties.
properties are required.

- compatible: Must contain "marvell,armada-370-xp-gpio"
- compatible: Must contain "marvell,armada-370-gpio"

- reg: an additional register set is needed, for the GPIO Blink
Counter on/off registers.
Expand Down Expand Up @@ -71,7 +71,7 @@ Example:
};

gpio1: gpio@18140 {
compatible = "marvell,armada-370-xp-gpio";
compatible = "marvell,armada-370-gpio";
reg = <0x18140 0x40>, <0x181c8 0x08>;
reg-names = "gpio", "pwm";
ngpios = <17>;
Expand Down
2 changes: 1 addition & 1 deletion Documentation/devicetree/bindings/mfd/stm32-timers.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Example:
compatible = "st,stm32-timers";
reg = <0x40010000 0x400>;
clocks = <&rcc 0 160>;
clock-names = "clk_int";
clock-names = "int";

pwm {
compatible = "st,stm32-pwm";
Expand Down
2 changes: 1 addition & 1 deletion Documentation/devicetree/bindings/net/dsa/b53.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Required properties:
"brcm,bcm6328-switch"
"brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"

See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
required and optional properties.

Examples:
Expand Down
1 change: 1 addition & 0 deletions Documentation/devicetree/bindings/net/smsc911x.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ Optional properties:
of the device. On many systems this is wired high so the device goes
out of reset at power-on, but if it is under program control, this
optional GPIO can wake up in response to it.
- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies

Examples:

Expand Down
6 changes: 5 additions & 1 deletion arch/mips/kvm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
bool user, bool kernel)
{
int idx_user, idx_kernel;
/*
* Initialize idx_user and idx_kernel to workaround bogus
* maybe-initialized warning when using GCC 6.
*/
int idx_user = 0, idx_kernel = 0;
unsigned long flags, old_entryhi;

local_irq_save(flags);
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
extern int is_current_kprobe_addr(unsigned long addr);
#ifdef CONFIG_KPROBES_ON_FTRACE
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb);
Expand Down
11 changes: 7 additions & 4 deletions arch/powerpc/kernel/exceptions-64s.S
Original file line number Diff line number Diff line change
Expand Up @@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
.balign IFETCH_ALIGN_BYTES
do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64
andis. r0,r4,0xa410 /* weird error? */
andis. r0,r4,0xa450 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
Expand All @@ -1438,11 +1436,16 @@ do_hash_page:

/* Error */
blt- 13f

/* Reload DSISR into r4 for the DABR check below */
ld r4,_DSISR(r1)
#endif /* CONFIG_PPC_STD_MMU_64 */

/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
11: ld r4,_DAR(r1)
11: andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
Expand Down
17 changes: 17 additions & 0 deletions arch/powerpc/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};

int is_current_kprobe_addr(unsigned long addr)
{
struct kprobe *p = kprobe_running();
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
}

bool arch_within_kprobe_blacklist(unsigned long addr)
{
return (addr >= (unsigned long)__kprobes_text_start &&
Expand Down Expand Up @@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif

/*
* jprobes use jprobe_return() which skips the normal return
* path of the function, and this messes up the accounting of the
* function graph tracer.
*
* Pause function graph tracing while performing the jprobe function.
*/
pause_graph_tracing();

return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
Expand All @@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* saved regs...
*/
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
/* It's OK to start function graph tracing again */
unpause_graph_tracing();
preempt_enable_no_resched();
return 1;
}
Expand Down
31 changes: 28 additions & 3 deletions arch/powerpc/kernel/setup_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
}
#endif

/*
* Emergency stacks are used for a range of things, from asynchronous
* NMIs (system reset, machine check) to synchronous, process context.
* We set preempt_count to zero, even though that isn't necessarily correct. To
* get the right value we'd need to copy it from the previous thread_info, but
* doing that might fault causing more problems.
* TODO: what to do with accounting?
*/
static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
{
ti->task = NULL;
ti->cpu = cpu;
ti->preempt_count = 0;
ti->local_flags = 0;
ti->flags = 0;
klp_init_thread_info(ti);
}

/*
* Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled. Exclusive emergency
Expand All @@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
* Since we use these as temporary stacks during secondary CPU
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*
* The IRQ stacks allocated elsewhere in this file are zeroed and
* initialized in kernel/irq.c. These are initialized here in order
* to have emergency stacks available as early as possible.
*/
limit = min(safe_stack_limit(), ppc64_rma_size);

for_each_possible_cpu(i) {
struct thread_info *ti;
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti);
memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;

#ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for NMI exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti);
memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;

/* emergency stack for machine check exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti);
memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif
}
Expand Down
59 changes: 46 additions & 13 deletions arch/powerpc/kernel/trace/ftrace_64_mprofile.S
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
stdu r1,-SWITCH_FRAME_SIZE(r1)

/* Save all gprs to pt_regs */
SAVE_8GPRS(0,r1)
SAVE_8GPRS(8,r1)
SAVE_8GPRS(16,r1)
SAVE_8GPRS(24,r1)
SAVE_GPR(0, r1)
SAVE_10GPRS(2, r1)
SAVE_10GPRS(12, r1)
SAVE_10GPRS(22, r1)

/* Save previous stack pointer (r1) */
addi r8, r1, SWITCH_FRAME_SIZE
std r8, GPR1(r1)

/* Load special regs for save below */
mfmsr r8
Expand Down Expand Up @@ -95,18 +99,44 @@ ftrace_call:
bl ftrace_stub
nop

/* Load ctr with the possibly modified NIP */
ld r3, _NIP(r1)
mtctr r3
/* Load the possibly modified NIP */
ld r15, _NIP(r1)

#ifdef CONFIG_LIVEPATCH
cmpd r14,r3 /* has NIP been altered? */
cmpd r14, r15 /* has NIP been altered? */
#endif

#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
/* NIP has not been altered, skip over further checks */
beq 1f

/* Check if there is an active kprobe on us */
subi r3, r14, 4
bl is_current_kprobe_addr
nop

/*
* If r3 == 1, then this is a kprobe/jprobe.
* else, this is livepatched function.
*
* The conditional branch for livepatch_handler below will use the
* result of this comparison. For kprobe/jprobe, we just need to branch to
* the new NIP, not call livepatch_handler. The branch below is bne, so we
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
* CR0[EQ] = (r3 == 1).
*/
cmpdi r3, 1
1:
#endif

/* Load CTR with the possibly modified NIP */
mtctr r15

/* Restore gprs */
REST_8GPRS(0,r1)
REST_8GPRS(8,r1)
REST_8GPRS(16,r1)
REST_8GPRS(24,r1)
REST_GPR(0,r1)
REST_10GPRS(2,r1)
REST_10GPRS(12,r1)
REST_10GPRS(22,r1)

/* Restore possibly modified LR */
ld r0, _LINK(r1)
Expand All @@ -119,7 +149,10 @@ ftrace_call:
addi r1, r1, SWITCH_FRAME_SIZE

#ifdef CONFIG_LIVEPATCH
/* Based on the cmpd above, if the NIP was altered handle livepatch */
/*
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
* not on a kprobe/jprobe, then handle livepatch.
*/
bne- livepatch_handler
#endif

Expand Down
51 changes: 51 additions & 0 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
/*
* POWER9 DD1 has an erratum where writing TBU40 causes
* the timebase to lose ticks. So we don't let the
* timebase offset be changed on P9 DD1. (It is
* initialized to zero.)
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
break;
/* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24);
Expand Down Expand Up @@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
unsigned long ebb_regs[3] = {}; /* shut up GCC */
unsigned long user_tar = 0;
unsigned int user_vrsave;

if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}

/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
* If the guest has TM enabled, save away their TM-related SPRs
* (they will get restored by the TM unavailable interrupt).
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
current->thread.regs->msr &= ~MSR_TM;
}
#endif

kvmppc_core_prepare_to_enter(vcpu);

/* No need to go into the guest when all we'll do is come back out */
Expand All @@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)

flush_all_to_thread(current);

/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
ebb_regs[0] = mfspr(SPRN_EBBHR);
ebb_regs[1] = mfspr(SPRN_EBBRR);
ebb_regs[2] = mfspr(SPRN_BESCR);
user_tar = mfspr(SPRN_TAR);
}
user_vrsave = mfspr(SPRN_VRSAVE);

vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
Expand All @@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
} while (is_kvmppc_resume_guest(r));

/* Restore userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
mtspr(SPRN_EBBHR, ebb_regs[0]);
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);

out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
Expand Down
12 changes: 11 additions & 1 deletion arch/powerpc/kvm/book3s_hv_interrupts.S
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Put whatever is in the decrementer into the
* hypervisor decrementer.
*/
BEGIN_FTR_SECTION
ld r5, HSTATE_KVM_VCORE(r13)
ld r6, VCORE_KVM(r5)
ld r9, KVM_HOST_LPCR(r6)
andis. r9, r9, LPCR_LD@h
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mfspr r8,SPRN_DEC
mftb r7
mtspr SPRN_HDEC,r8
BEGIN_FTR_SECTION
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
bne 32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8
32: mtspr SPRN_HDEC,r8
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)

Expand Down
Loading

0 comments on commit 1bc3cd4

Please sign in to comment.