Skip to content

Commit

Permalink
Merge branch 'nfs-rdma'
Browse files Browse the repository at this point in the history
  • Loading branch information
Trond Myklebust committed Jul 24, 2016
2 parents 668f455 + f044567 commit 1592c4d
Show file tree
Hide file tree
Showing 205 changed files with 2,417 additions and 1,698 deletions.
6 changes: 3 additions & 3 deletions Documentation/x86/intel_mpx.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
MPX-instrumented.
3) The kernel detects that the CPU has MPX, allows the new prctl() to
succeed, and notes the location of the bounds directory. Userspace is
expected to keep the bounds directory at that locationWe note it
expected to keep the bounds directory at that location. We note it
instead of reading it each time because the 'xsave' operation needed
to access the bounds directory register is an expensive operation.
4) If the application needs to spill bounds out of the 4 registers, it
Expand Down Expand Up @@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
We need to decode MPX instructions to get violation address and
set this address into extended struct siginfo.

The _sigfault feild of struct siginfo is extended as follow:
The _sigfault field of struct siginfo is extended as follow:

87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
88 struct {
Expand Down Expand Up @@ -240,5 +240,5 @@ them at the same bounds table.
This is allowed architecturally. See more information "Intel(R) Architecture
Instruction Set Extensions Programming Reference" (9.3.4).

However, if users did this, the kernel might be fooled in to unmaping an
However, if users did this, the kernel might be fooled in to unmapping an
in-use bounds table since it does not recognize sharing.
4 changes: 2 additions & 2 deletions Documentation/x86/tlb.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ memory, it has two choices:
from areas other than the one we are trying to flush will be
destroyed and must be refilled later, at some cost.
2. Use the invlpg instruction to invalidate a single page at a
time. This could potentialy cost many more instructions, but
time. This could potentially cost many more instructions, but
it is a much more precise operation, causing no collateral
damage to other TLB entries.

Expand All @@ -19,7 +19,7 @@ Which method to do depends on a few things:
work.
3. The size of the TLB. The larger the TLB, the more collateral
damage we do with a full flush. So, the larger the TLB, the
more attrative an individual flush looks. Data and
more attractive an individual flush looks. Data and
instructions have separate TLBs, as do different page sizes.
4. The microarchitecture. The TLB has become a multi-level
cache on modern CPUs, and the global flushes have become more
Expand Down
2 changes: 1 addition & 1 deletion Documentation/x86/x86_64/machinecheck
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ between all CPUs.

check_interval
How often to poll for corrected machine check errors, in seconds
(Note output is hexademical). Default 5 minutes. When the poller
(Note output is hexadecimal). Default 5 minutes. When the poller
finds MCEs it triggers an exponential speedup (poll more often) on
the polling interval. When the poller stops finding MCEs, it
triggers an exponential backoff (poll less often) on the polling
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Psychotic Stoned Sheep

# *DOCUMENTATION*
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/cputype.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,14 @@
#define APM_CPU_PART_POTENZA 0x000

#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2

#define BRCM_CPU_PART_VULCAN 0x516

#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)

#ifndef __ASSEMBLY__

Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ struct pt_regs {
};
u64 orig_x0;
u64 syscallno;
u64 orig_addr_limit;
u64 unused; // maintain 16 byte alignment
};

#define arch_has_single_step() (1)
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ int main(void)
DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
Expand Down
6 changes: 6 additions & 0 deletions arch/arm64/kernel/cpu_errata.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_RANGE(MIDR_THUNDERX, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 1),
},
{
/* Cavium ThunderX, T81 pass 1.0 */
.desc = "Cavium erratum 27456",
.capability = ARM64_WORKAROUND_CAVIUM_27456,
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
},
#endif
{
}
Expand Down
19 changes: 17 additions & 2 deletions arch/arm64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>

Expand Down Expand Up @@ -97,7 +98,14 @@
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
.endif
get_thread_info tsk
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
ldr x20, [tsk, #TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #TASK_SIZE_64
str x20, [tsk, #TI_ADDR_LIMIT]
ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
.endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
Expand Down Expand Up @@ -128,6 +136,14 @@
.endm

.macro kernel_exit, el
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TI_ADDR_LIMIT]

/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif

ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
Expand Down Expand Up @@ -406,7 +422,6 @@ el1_irq:
bl trace_hardirqs_off
#endif

get_thread_info tsk
irq_handler

#ifdef CONFIG_PREEMPT
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
}

if (permission_fault(esr) && (addr < USER_DS)) {
if (get_fs() == KERNEL_DS)
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);

if (!search_exception_tables(regs->pc))
Expand Down
10 changes: 6 additions & 4 deletions arch/mips/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ struct mm_struct;
struct vm_area_struct;

#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
_CACHE_CACHABLE_NONCOHERENT)
_page_cachable_default)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
Expand Down Expand Up @@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Expand All @@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
#endif

Expand Down Expand Up @@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)

static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd;
}

Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/book3s/64/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
#define KERN_VIRT_SIZE __kernel_virt_size
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */

#include <asm/book3s/64/hash.h>
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/eeh_driver.c
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
pci_unlock_rescan_remove();
}
} else if (frozen_bus) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
}

/*
Expand Down
1 change: 0 additions & 1 deletion arch/powerpc/kernel/pci_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ static int __init pcibios_init(void)

printk(KERN_INFO "PCI: Probing PCI hardware\n");

pci_io_base = ISA_IO_BASE;
/* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/
Expand Down
10 changes: 10 additions & 0 deletions arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.regs = regs - 1;
}

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Clear any transactional state, we're exec()ing. The cause is
* not important as there will never be a recheckpoint so it's not
* user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
#endif

memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ctr = 0;
regs->link = 0;
Expand Down
61 changes: 44 additions & 17 deletions arch/powerpc/kernel/tm.S
Original file line number Diff line number Diff line change
Expand Up @@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)

/* We need to setup MSR for VSX register save instructions. Here we
* also clear the MSR RI since when we do the treclaim, we won't have a
* valid kernel pointer for a while. We clear RI here as it avoids
* adding another mtmsr closer to the treclaim. This makes the region
* maked as non-recoverable wider than it needs to be but it saves on
* inserting another mtmsrd later.
*/
/* We need to setup MSR for VSX register save instructions. */
mfmsr r14
mr r15, r14
ori r15, r15, MSR_FP
li r16, MSR_RI
li r16, 0
ori r16, r16, MSR_EE /* IRQs hard off */
andc r15, r15, r16
oris r15, r15, MSR_VEC@h
Expand Down Expand Up @@ -176,7 +170,17 @@ dont_backup_fp:
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0

/* The moment we treclaim, ALL of our GPRs will switch
/* Clear MSR RI since we are about to change r1, EE is already off. */
li r4, 0
mtmsrd r4, 1

/*
* BE CAREFUL HERE:
* At this point we can't take an SLB miss since we have MSR_RI
* off. Load only to/from the stack/paca which are in SLB bolted regions
* until we turn MSR RI back on.
*
* The moment we treclaim, ALL of our GPRs will switch
* to user register state. (FPRs, CCR etc. also!)
* Use an sprg and a tm_scratch in the PACA to shuffle.
*/
Expand All @@ -197,6 +201,11 @@ dont_backup_fp:

/* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */

/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1

mfspr r11, SPRN_PPR
HMT_MEDIUM

Expand Down Expand Up @@ -397,11 +406,6 @@ restore_gprs:
ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3)

/* Clear the MSR RI since we are about to change R1. EE is already off
*/
li r4, 0
mtmsrd r4, 1

REST_GPR(0, r7) /* GPR0 */
REST_2GPRS(2, r7) /* GPR2-3 */
REST_GPR(4, r7) /* GPR4 */
Expand Down Expand Up @@ -439,10 +443,33 @@ restore_gprs:
ld r6, _CCR(r7)
mtcr r6

REST_GPR(1, r7) /* GPR1 */
REST_GPR(5, r7) /* GPR5-7 */
REST_GPR(6, r7)
ld r7, GPR7(r7)

/*
* Store r1 and r5 on the stack so that we can access them
* after we clear MSR RI.
*/

REST_GPR(5, r7)
std r5, -8(r1)
ld r5, GPR1(r7)
std r5, -16(r1)

REST_GPR(7, r7)

/* Clear MSR RI since we are about to change r1. EE is already off */
li r5, 0
mtmsrd r5, 1

/*
* BE CAREFUL HERE:
* At this point we can't take an SLB miss since we have MSR_RI
* off. Load only to/from the stack/paca which are in SLB bolted regions
* until we turn MSR RI back on.
*/

ld r5, -8(r1)
ld r1, -16(r1)

/* Commit register state as checkpointed state: */
TRECHKPT
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/mm/hash_utils_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -922,6 +922,10 @@ void __init hash__early_init_mmu(void)
vmemmap = (struct page *)H_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;

#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
#endif

/* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/mm/pgtable-radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,11 @@ void __init radix__early_init_mmu(void)
__vmalloc_end = RADIX_VMALLOC_END;
vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;

#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
#endif

/*
* For now radix also use the same frag size
*/
Expand Down
Loading

0 comments on commit 1592c4d

Please sign in to comment.