Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Browse files Browse the repository at this point in the history
Conflicts:
	drivers/net/ethernet/mellanox/mlx5/core/en.h
	drivers/net/ethernet/mellanox/mlx5/core/en_main.c
	drivers/net/usb/r8152.c

All three conflicts were overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jul 6, 2016
2 parents ae3e456 + bc86765 commit 30d0844
Show file tree
Hide file tree
Showing 154 changed files with 1,455 additions and 618 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Psychotic Stoned Sheep

# *DOCUMENTATION*
Expand Down
2 changes: 0 additions & 2 deletions arch/arc/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,6 @@ endif

endif

cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables

# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
ifeq ($(atleast_gcc48),y)
cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
Expand Down
2 changes: 1 addition & 1 deletion arch/arc/kernel/stacktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
* prelogue is setup (callee regs saved and then fp set and not other
* way around
*/
pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
return 0;

#endif
Expand Down
1 change: 1 addition & 0 deletions arch/arm/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}

Expand Down
10 changes: 6 additions & 4 deletions arch/mips/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ struct mm_struct;
struct vm_area_struct;

#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
_CACHE_CACHABLE_NONCOHERENT)
_page_cachable_default)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
Expand Down Expand Up @@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Expand All @@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
#endif

Expand Down Expand Up @@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)

static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd;
}

Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/book3s/64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
#define KERN_VIRT_SIZE __kernel_virt_size
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */

#include <asm/book3s/64/hash.h>
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/eeh_driver.c
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
pci_unlock_rescan_remove();
}
} else if (frozen_bus) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
}

/*
Expand Down
1 change: 0 additions & 1 deletion arch/powerpc/kernel/pci_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ static int __init pcibios_init(void)

printk(KERN_INFO "PCI: Probing PCI hardware\n");

pci_io_base = ISA_IO_BASE;
/* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/
Expand Down
10 changes: 10 additions & 0 deletions arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.regs = regs - 1;
}

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Clear any transactional state, we're exec()ing. The cause is
* not important as there will never be a recheckpoint so it's not
* user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
#endif

memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0;
regs->link = 0;
Expand Down
61 changes: 44 additions & 17 deletions arch/powerpc/kernel/tm.S
Original file line number Diff line number Diff line change
Expand Up @@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)

/* We need to setup MSR for VSX register save instructions. Here we
* also clear the MSR RI since when we do the treclaim, we won't have a
* valid kernel pointer for a while. We clear RI here as it avoids
* adding another mtmsr closer to the treclaim. This makes the region
* maked as non-recoverable wider than it needs to be but it saves on
* inserting another mtmsrd later.
*/
/* We need to setup MSR for VSX register save instructions. */
mfmsr r14
mr r15, r14
ori r15, r15, MSR_FP
li r16, MSR_RI
li r16, 0
ori r16, r16, MSR_EE /* IRQs hard off */
andc r15, r15, r16
oris r15, r15, MSR_VEC@h
Expand Down Expand Up @@ -176,7 +170,17 @@ dont_backup_fp:
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0

/* The moment we treclaim, ALL of our GPRs will switch
/* Clear MSR RI since we are about to change r1, EE is already off. */
li r4, 0
mtmsrd r4, 1

/*
* BE CAREFUL HERE:
* At this point we can't take an SLB miss since we have MSR_RI
* off. Load only to/from the stack/paca which are in SLB bolted regions
* until we turn MSR RI back on.
*
* The moment we treclaim, ALL of our GPRs will switch
* to user register state. (FPRs, CCR etc. also!)
* Use an sprg and a tm_scratch in the PACA to shuffle.
*/
Expand All @@ -197,6 +201,11 @@ dont_backup_fp:

/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */

/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1

mfspr r11, SPRN_PPR
HMT_MEDIUM

Expand Down Expand Up @@ -397,11 +406,6 @@ restore_gprs:
ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3)

/* Clear the MSR RI since we are about to change R1. EE is already off
*/
li r4, 0
mtmsrd r4, 1

REST_GPR(0, r7) /* GPR0 */
REST_2GPRS(2, r7) /* GPR2-3 */
REST_GPR(4, r7) /* GPR4 */
Expand Down Expand Up @@ -439,10 +443,33 @@ restore_gprs:
ld r6, _CCR(r7)
mtcr r6

REST_GPR(1, r7) /* GPR1 */
REST_GPR(5, r7) /* GPR5-7 */
REST_GPR(6, r7)
ld r7, GPR7(r7)

/*
* Store r1 and r5 on the stack so that we can access them
* after we clear MSR RI.
*/

REST_GPR(5, r7)
std r5, -8(r1)
ld r5, GPR1(r7)
std r5, -16(r1)

REST_GPR(7, r7)

/* Clear MSR RI since we are about to change r1. EE is already off */
li r5, 0
mtmsrd r5, 1

/*
* BE CAREFUL HERE:
* At this point we can't take an SLB miss since we have MSR_RI
* off. Load only to/from the stack/paca which are in SLB bolted regions
* until we turn MSR RI back on.
*/

ld r5, -8(r1)
ld r1, -16(r1)

/* Commit register state as checkpointed state: */
TRECHKPT
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/mm/hash_utils_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -922,6 +922,10 @@ void __init hash__early_init_mmu(void)
vmemmap = (struct page *)H_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;

#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
#endif

/* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/mm/pgtable-radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,11 @@ void __init radix__early_init_mmu(void)
__vmalloc_end = RADIX_VMALLOC_END;
vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;

#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
#endif

/*
* For now radix also use the same frag size
*/
Expand Down
25 changes: 9 additions & 16 deletions arch/x86/include/asm/pvclock.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,30 +68,23 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
return product;
}

static __always_inline
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
{
u64 delta = rdtsc_ordered() - src->tsc_timestamp;
return pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift);
}

static __always_inline
unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
cycle_t *cycles, u8 *flags)
{
unsigned version;
cycle_t ret, offset;
u8 ret_flags;
cycle_t offset;
u64 delta;

version = src->version;
/* Make the latest version visible */
smp_rmb();

offset = pvclock_get_nsec_offset(src);
ret = src->system_time + offset;
ret_flags = src->flags;

*cycles = ret;
*flags = ret_flags;
delta = rdtsc_ordered() - src->tsc_timestamp;
offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift);
*cycles = src->system_time + offset;
*flags = src->flags;
return version;
}

Expand Down
11 changes: 9 additions & 2 deletions arch/x86/kernel/pvclock.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,16 @@ void pvclock_resume(void)
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
{
unsigned version;
cycle_t ret;
u8 flags;

do {
version = __pvclock_read_cycles(src, &ret, &flags);
version = src->version;
/* Make the latest version visible */
smp_rmb();

flags = src->flags;
/* Make sure that the version double-check is last. */
smp_rmb();
} while ((src->version & 1) || version != src->version);

return flags & valid_flags;
Expand All @@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)

do {
version = __pvclock_read_cycles(src, &ret, &flags);
/* Make sure that the version double-check is last. */
smp_rmb();
} while ((src->version & 1) || version != src->version);

if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/lapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)

/* __delay is delay_tsc whenever the hardware has TSC, thus always. */
if (guest_tsc < tsc_deadline)
__delay(tsc_deadline - guest_tsc);
__delay(min(tsc_deadline - guest_tsc,
nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
}

static void start_apic_timer(struct kvm_lapic *apic)
Expand Down
23 changes: 11 additions & 12 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -6671,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,

/* Checks for #GP/#SS exceptions. */
exn = false;
if (is_protmode(vcpu)) {
if (is_long_mode(vcpu)) {
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is the only check on the memory
* destination for long mode!
*/
exn = is_noncanonical_address(*ret);
} else if (is_protmode(vcpu)) {
/* Protected mode: apply checks for segment validity in the
* following order:
* - segment type check (#GP(0) may be thrown)
Expand All @@ -6688,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
* execute-only code segment
*/
exn = ((s.type & 0xa) == 8);
}
if (exn) {
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return 1;
}
if (is_long_mode(vcpu)) {
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is an only check for long mode.
*/
exn = is_noncanonical_address(*ret);
} else if (is_protmode(vcpu)) {
if (exn) {
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return 1;
}
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
Expand Down
6 changes: 0 additions & 6 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static unsigned long max_tsc_khz;

static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}

static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{
u64 v = (u64)khz * (1000000 + ppm);
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/kvm/x86.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#define ARCH_X86_KVM_X86_H

#include <linux/kvm_host.h>
#include <asm/pvclock.h>
#include "kvm_cache_regs.h"

#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
Expand Down Expand Up @@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;

extern struct static_key kvm_no_apic_vcpu;

static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}

/* Same "calling convention" as do_div:
* - divide (n << 32) by base
* - put result in n
Expand Down
Loading

0 comments on commit 30d0844

Please sign in to comment.