Skip to content

Commit

Permalink
Merge tag 'v4.3.5' into dev-4.3
Browse files Browse the repository at this point in the history
This is the 4.3.5 stable release

Signed-off-by: Joel Stanley <joel@jms.id.au>
  • Loading branch information
Joel Stanley committed Feb 2, 2016
2 parents 2a3f5f1 + 849a2d3 commit f3f4aeb
Show file tree
Hide file tree
Showing 172 changed files with 1,622 additions and 722 deletions.
16 changes: 9 additions & 7 deletions Documentation/ABI/testing/sysfs-bus-usb
Original file line number Diff line number Diff line change
Expand Up @@ -114,19 +114,21 @@ Description:
enabled for the device. Developer can write y/Y/1 or n/N/0 to
the file to enable/disable the feature.

What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm
Date: June 2015
What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm_u1
/sys/bus/usb/devices/.../power/usb3_hardware_lpm_u2
Date: November 2015
Contact: Kevin Strasser <kevin.strasser@linux.intel.com>
Lu Baolu <baolu.lu@linux.intel.com>
Description:
If CONFIG_PM is set and a USB 3.0 lpm-capable device is plugged
in to a xHCI host which supports link PM, it will check if U1
and U2 exit latencies have been set in the BOS descriptor; if
the check is is passed and the host supports USB3 hardware LPM,
the check is passed and the host supports USB3 hardware LPM,
USB3 hardware LPM will be enabled for the device and the USB
device directory will contain a file named
power/usb3_hardware_lpm. The file holds a string value (enable
or disable) indicating whether or not USB3 hardware LPM is
enabled for the device.
device directory will contain two files named
power/usb3_hardware_lpm_u1 and power/usb3_hardware_lpm_u2. These
files hold a string value (enable or disable) indicating whether
or not USB3 hardware LPM U1 or U2 is enabled for the device.

What: /sys/bus/usb/devices/.../removable
Date: February 2012
Expand Down
11 changes: 6 additions & 5 deletions Documentation/usb/power-management.txt
Original file line number Diff line number Diff line change
Expand Up @@ -537,17 +537,18 @@ relevant attribute files are usb2_hardware_lpm and usb3_hardware_lpm.
can write y/Y/1 or n/N/0 to the file to enable/disable
USB2 hardware LPM manually. This is for test purpose mainly.

power/usb3_hardware_lpm
power/usb3_hardware_lpm_u1
power/usb3_hardware_lpm_u2

When a USB 3.0 lpm-capable device is plugged in to a
xHCI host which supports link PM, it will check if U1
and U2 exit latencies have been set in the BOS
descriptor; if the check is is passed and the host
supports USB3 hardware LPM, USB3 hardware LPM will be
enabled for the device and this file will be created.
The file holds a string value (enable or disable)
indicating whether or not USB3 hardware LPM is
enabled for the device.
enabled for the device and these files will be created.
The files hold a string value (enable or disable)
indicating whether or not USB3 hardware LPM U1 or U2
is enabled for the device.

USB Port Power Control
----------------------
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 3
SUBLEVEL = 4
SUBLEVEL = 5
EXTRAVERSION =
NAME = Blurry Fish Butt

Expand Down
15 changes: 7 additions & 8 deletions arch/arm/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
__kvm_flush_dcache_pud(pud);
}

static bool kvm_is_device_pfn(unsigned long pfn)
{
return !pfn_valid(pfn);
}

/**
* stage2_dissolve_pmd() - clear and flush huge PMD entry
* @kvm: pointer to kvm structure.
Expand Down Expand Up @@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
kvm_tlb_flush_vmid_ipa(kvm, addr);

/* No need to invalidate the cache for device mappings */
if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
if (!kvm_is_device_pfn(pte_pfn(old_pte)))
kvm_flush_dcache_pte(old_pte);

put_page(virt_to_page(pte));
Expand Down Expand Up @@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,

pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte) &&
(pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
Expand Down Expand Up @@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
return kvm_vcpu_dabt_iswrite(vcpu);
}

static bool kvm_is_device_pfn(unsigned long pfn)
{
return !pfn_valid(pfn);
}

/**
* stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry
Expand Down
16 changes: 1 addition & 15 deletions arch/arm/net/bpf_jit_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -182,19 +182,6 @@ static inline int mem_words_used(struct jit_ctx *ctx)
return fls(ctx->seen & SEEN_MEM);
}

static inline bool is_load_to_a(u16 inst)
{
switch (inst) {
case BPF_LD | BPF_W | BPF_LEN:
case BPF_LD | BPF_W | BPF_ABS:
case BPF_LD | BPF_H | BPF_ABS:
case BPF_LD | BPF_B | BPF_ABS:
return true;
default:
return false;
}
}

static void jit_fill_hole(void *area, unsigned int size)
{
u32 *ptr;
Expand All @@ -206,7 +193,6 @@ static void jit_fill_hole(void *area, unsigned int size)
static void build_prologue(struct jit_ctx *ctx)
{
u16 reg_set = saved_regs(ctx);
u16 first_inst = ctx->skf->insns[0].code;
u16 off;

#ifdef CONFIG_FRAME_POINTER
Expand Down Expand Up @@ -236,7 +222,7 @@ static void build_prologue(struct jit_ctx *ctx)
emit(ARM_MOV_I(r_X, 0), ctx);

/* do not leak kernel data to userspace */
if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
if (bpf_needs_clear_a(&ctx->skf->insns[0]))
emit(ARM_MOV_I(r_A, 0), ctx);

/* stack space for the BPF_MEM words */
Expand Down
21 changes: 21 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,27 @@ config ARM64_ERRATUM_832075

If unsure, say Y.

config ARM64_ERRATUM_834220
bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
depends on KVM
default y
help
This option adds an alternative code sequence to work around ARM
erratum 834220 on Cortex-A57 parts up to r1p2.

Affected Cortex-A57 parts might report a Stage 2 translation
fault as a the result of a Stage 1 fault for a load crossing
a page boundary when there is a Stage 1 permission or device
memory alignment fault and a Stage 2 translation fault

The workaround is to verify that the Stage-1 translation
doesn't generate a fault before handling the Stage-2 fault.
Please note that this does not necessarily enable the workaround,
as it depends on the alternative framework, which will only patch
the kernel if an affected CPU is detected.

If unsure, say Y.

config ARM64_ERRATUM_845719
bool "Cortex-A53: 845719: a load might read incorrect data"
depends on COMPAT
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/atomic_ll_sc.h
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ __CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory")
#undef __CMPXCHG_CASE

#define __CMPXCHG_DBL(name, mb, rel, cl) \
__LL_SC_INLINE int \
__LL_SC_INLINE long \
__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/atomic_lse.h
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ __CMPXCHG_CASE(x, , mb_8, al, "memory")
#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)

#define __CMPXCHG_DBL(name, mb, cl...) \
static inline int __cmpxchg_double##name(unsigned long old1, \
static inline long __cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
unsigned long new2, \
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,9 @@
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
#define ARM64_HAS_PAN 4
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_834220 6

#define ARM64_NCAPS 6
#define ARM64_NCAPS 7

#ifndef __ASSEMBLY__

Expand Down
8 changes: 5 additions & 3 deletions arch/arm64/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
}

/*
* vcpu_reg should always be passed a register number coming from a
* read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
* with banked registers.
*/
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
{
if (vcpu_mode_is_32bit(vcpu))
return vcpu_reg32(vcpu, reg_num);

return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}

Expand Down
9 changes: 9 additions & 0 deletions arch/arm64/kernel/cpu_errata.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
(1 << MIDR_VARIANT_SHIFT) | 2),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_834220
{
/* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 834220",
.capability = ARM64_WORKAROUND_834220,
MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 2),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_845719
{
/* Cortex-A53 r0p[01234] */
Expand Down
5 changes: 5 additions & 0 deletions arch/arm64/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -524,9 +524,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
#endif

/* EL2 debug */
mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
sbfx x0, x0, #8, #4
cmp x0, #1
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
msr mdcr_el2, x0 // all PMU counters from EL1
4:

/* Stage-2 translation */
msr vttbr_el2, xzr
Expand Down
3 changes: 0 additions & 3 deletions arch/arm64/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1159,9 +1159,6 @@ static void armv8pmu_reset(void *info)

/* Initialize & Reset PMNC: C and P bits. */
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);

/* Disable access from userspace. */
asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
}

static int armv8_pmuv3_map_event(struct perf_event *event)
Expand Down
6 changes: 6 additions & 0 deletions arch/arm64/kernel/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,12 @@
*/
void ptrace_disable(struct task_struct *child)
{
/*
* This would be better off in core code, but PTRACE_DETACH has
* grown its fair share of arch-specific worts and changing it
* is likely to cause regressions on obscure architectures.
*/
user_disable_single_step(child);
}

#ifdef CONFIG_HAVE_HW_BREAKPOINT
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,10 @@ static int c_show(struct seq_file *m, void *v)
*/
seq_printf(m, "processor\t: %d\n", i);

seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000UL/HZ),
loops_per_jiffy / (5000UL/HZ) % 100);

/*
* Dump out the common processor features in a single line.
* Userspace should read the hwcaps with getauxval(AT_HWCAP)
Expand Down
10 changes: 10 additions & 0 deletions arch/arm64/kernel/suspend.c
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
Expand Down Expand Up @@ -70,6 +71,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
local_dbg_save(flags);

/*
* Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka suspend finishers) hence
* disable graph tracing during their execution.
*/
pause_graph_tracing();

/*
* mm context saved on the stack, it will be restored when
* the cpu comes out of reset through the identity mapped
Expand Down Expand Up @@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
hw_breakpoint_restore(NULL);
}

unpause_graph_tracing();

/*
* Restore pstate flags. OS lock and mdscr have been already
* restored, so from this point onwards, debugging is fully
Expand Down
6 changes: 6 additions & 0 deletions arch/arm64/kvm/hyp.S
Original file line number Diff line number Diff line change
Expand Up @@ -1007,9 +1007,15 @@ el1_trap:
b.ne 1f // Not an abort we care about

/* This is an abort. Check for permission fault */
alternative_if_not ARM64_WORKAROUND_834220
and x2, x1, #ESR_ELx_FSC_TYPE
cmp x2, #FSC_PERM
b.ne 1f // Not a permission fault
alternative_else
nop // Force a Stage-1 translation to occur
nop // and return to the guest if it failed
nop
alternative_endif

/*
* Check for Stage-1 page table walk, which is guaranteed
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kvm/inject_fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)

/* Note: These now point to the banked copies */
*vcpu_spsr(vcpu) = new_spsr_value;
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;

/* Branch to exception vector */
if (sctlr & (1 << 13))
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -451,6 +451,9 @@ void __init paging_init(void)

empty_zero_page = virt_to_page(zero_page);

/* Ensure the zero page is visible to the page table walker */
dsb(ishst);

/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
Expand Down
12 changes: 12 additions & 0 deletions arch/arm64/mm/proc-macros.S
Original file line number Diff line number Diff line change
Expand Up @@ -62,3 +62,15 @@
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
#endif
.endm

/*
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
*/
.macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
sbfx \tmpreg, \tmpreg, #8, #4
cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0
9000:
.endm
2 changes: 2 additions & 0 deletions arch/arm64/mm/proc.S
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ ENTRY(cpu_do_resume)
*/
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
mov x0, x12
dsb nsh // Make sure local tlb invalidation completed
isb
Expand Down Expand Up @@ -153,6 +154,7 @@ ENTRY(__cpu_setup)
msr cpacr_el1, x0 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
/*
* Memory region attributes for LPAE:
*
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/net/bpf_jit.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/*
* BPF JIT compiler for ARM64
*
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
* Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
Expand Down Expand Up @@ -35,6 +35,7 @@
aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
AARCH64_INSN_BRANCH_COMP_##type)
#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)

/* Conditional branch (immediate) */
#define A64_COND_BRANCH(cond, offset) \
Expand Down
Loading

0 comments on commit f3f4aeb

Please sign in to comment.