Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 376829
b: refs/heads/master
c: 6ea31c5
h: refs/heads/master
i:
  376827: b76f7e4
v: v3
  • Loading branch information
Linus Torvalds committed Jun 8, 2013
1 parent c218b81 commit 61beeed
Show file tree
Hide file tree
Showing 230 changed files with 2,509 additions and 1,240 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0e32fde96bb9c1fa8fa477e52c1d6ae2f4995cea
refs/heads/master: 6ea31c56a54ef79b46b56ad35a308db474d600d3
6 changes: 3 additions & 3 deletions trunk/Documentation/dmatest.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ command:
After a while you will start to get messages about current status or error like
in the original code.

Note that running a new test will stop any in progress test.
Note that running a new test will not stop any in progress test.

The following command should return actual state of the test.
% cat /sys/kernel/debug/dmatest/run
Expand All @@ -52,8 +52,8 @@ To wait for test done the user may perform a busy loop that checks the state.

The module parameters that is supplied to the kernel command line will be used
for the first performed test. After user gets a control, the test could be
interrupted or re-run with same or different parameters. For the details see
the above section "Part 2 - When dmatest is built as a module..."
re-run with the same or different parameters. For the details see the above
section "Part 2 - When dmatest is built as a module..."

In both cases the module parameters are used as initial values for the test case.
You always could check them at run-time by running
Expand Down
3 changes: 3 additions & 0 deletions trunk/Documentation/filesystems/xfs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ When mounting an XFS filesystem, the following options are accepted.
removing extended attributes) the on-disk superblock feature
bit field will be updated to reflect this format being in use.

CRC enabled filesystems always use the attr2 format, and so
will reject the noattr2 mount option if it is set.

barrier
Enables the use of block layer write barriers for writes into
the journal and unwritten extent conversion. This allows for
Expand Down
12 changes: 11 additions & 1 deletion trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -2890,8 +2890,8 @@ F: drivers/media/dvb-frontends/ec100*

ECRYPT FILE SYSTEM
M: Tyler Hicks <tyhicks@canonical.com>
M: Dustin Kirkland <dustin.kirkland@gazzang.com>
L: ecryptfs@vger.kernel.org
W: http://ecryptfs.org
W: https://launchpad.net/ecryptfs
S: Supported
F: Documentation/filesystems/ecryptfs.txt
Expand Down Expand Up @@ -4448,6 +4448,16 @@ S: Maintained
F: drivers/scsi/*iscsi*
F: include/scsi/*iscsi*

ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Or Gerlitz <ogerlitz@mellanox.com>
M: Roi Dayan <roid@mellanox.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.openfabrics.org
W: www.open-iscsi.org
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/ulp/iser

ISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
Expand Down
27 changes: 4 additions & 23 deletions trunk/arch/arm/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>

/*
* We need to delay page freeing for SMP as other CPUs can access pages
* which have been removed but not yet had their TLB entries invalidated.
* Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
* we need to apply this same delaying tactic to ensure correct operation.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
#define tlb_fast_mode(tlb) 0
#else
#define tlb_fast_mode(tlb) 1
#endif

#define MMU_GATHER_BUNDLE 8

/*
Expand Down Expand Up @@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
if (tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
}
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
if (tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
}

static inline void
Expand Down Expand Up @@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)

static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return 1; /* avoid calling tlb_flush_mmu */
}

tlb->pages[tlb->nr++] = page;
VM_BUG_ON(tlb->nr > tlb->max);
return tlb->max - tlb->nr;
Expand Down
15 changes: 13 additions & 2 deletions trunk/arch/arm/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu)
wait_event_interruptible(*wq, !vcpu->arch.pause);
}

static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
}

/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
Expand All @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int ret;
sigset_t sigsaved;

/* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
if (unlikely(vcpu->arch.target < 0))
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;

ret = kvm_vcpu_first_run_init(vcpu);
Expand Down Expand Up @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;

if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;

if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT;
if (ioctl == KVM_SET_ONE_REG)
Expand All @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_reg_list reg_list;
unsigned n;

if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;

if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
Expand Down
41 changes: 26 additions & 15 deletions trunk/arch/arm/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,14 @@ static phys_addr_t hyp_idmap_vector;

static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
/*
* This function also gets called when dealing with HYP page
* tables. As HYP doesn't have an associated struct kvm (and
* the HYP page tables are fairly static), we don't do
* anything there.
*/
if (kvm)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}

static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
Expand Down Expand Up @@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
return p;
}

static void clear_pud_entry(pud_t *pud)
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
pmd_t *pmd_table = pmd_offset(pud, 0);
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
pmd_free(NULL, pmd_table);
put_page(virt_to_page(pud));
}

static void clear_pmd_entry(pmd_t *pmd)
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
{
pte_t *pte_table = pte_offset_kernel(pmd, 0);
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
pte_free_kernel(NULL, pte_table);
put_page(virt_to_page(pmd));
}
Expand All @@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd)
return page_count(pmd_page) == 1;
}

static void clear_pte_entry(pte_t *pte)
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
{
if (pte_present(*pte)) {
kvm_set_pte(pte, __pte(0));
put_page(virt_to_page(pte));
kvm_tlb_flush_vmid_ipa(kvm, addr);
}
}

Expand All @@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte)
return page_count(pte_page) == 1;
}

static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
unsigned long long start, u64 size)
{
pgd_t *pgd;
pud_t *pud;
Expand All @@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
}

pte = pte_offset_kernel(pmd, addr);
clear_pte_entry(pte);
clear_pte_entry(kvm, pte, addr);
range = PAGE_SIZE;

/* If we emptied the pte, walk back up the ladder */
if (pte_empty(pte)) {
clear_pmd_entry(pmd);
clear_pmd_entry(kvm, pmd, addr);
range = PMD_SIZE;
if (pmd_empty(pmd)) {
clear_pud_entry(pud);
clear_pud_entry(kvm, pud, addr);
range = PUD_SIZE;
}
}
Expand All @@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void)
mutex_lock(&kvm_hyp_pgd_mutex);

if (boot_hyp_pgd) {
unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(boot_hyp_pgd);
boot_hyp_pgd = NULL;
}

if (hyp_pgd)
unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);

kfree(init_bounce_page);
init_bounce_page = NULL;
Expand Down Expand Up @@ -200,9 +211,10 @@ void free_hyp_pgds(void)

if (hyp_pgd) {
for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);

kfree(hyp_pgd);
hyp_pgd = NULL;
}
Expand Down Expand Up @@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
*/
static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
{
unmap_range(kvm->arch.pgd, start, size);
unmap_range(kvm, kvm->arch.pgd, start, size);
}

/**
Expand Down Expand Up @@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm,
static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
kvm_tlb_flush_vmid_ipa(kvm, gpa);
}

int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
Expand Down
41 changes: 8 additions & 33 deletions trunk/arch/ia64/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,6 @@
#include <asm/tlbflush.h>
#include <asm/machvec.h>

#ifdef CONFIG_SMP
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
# define tlb_fast_mode(tlb) (1)
#endif

/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
Expand All @@ -60,7 +54,7 @@

struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* == ~0U => fast mode */
unsigned int nr;
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
Expand Down Expand Up @@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
unsigned long i;
unsigned int nr;

if (!tlb->need_flush)
Expand Down Expand Up @@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e

/* lastly, release the freed pages */
nr = tlb->nr;
if (!tlb_fast_mode(tlb)) {
unsigned long i;
tlb->nr = 0;
tlb->start_addr = ~0UL;
for (i = 0; i < nr; ++i)
free_page_and_swap_cache(tlb->pages[i]);
}

tlb->nr = 0;
tlb->start_addr = ~0UL;
for (i = 0; i < nr; ++i)
free_page_and_swap_cache(tlb->pages[i]);
}

static inline void __tlb_alloc_page(struct mmu_gather *tlb)
Expand All @@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
/*
* Use fast mode if only 1 CPU is online.
*
* It would be tempting to turn on fast-mode for full_mm_flush as well. But this
* doesn't work because of speculative accesses and software prefetching: the page
* table of "mm" may (and usually is) the currently active page table and even
* though the kernel won't do any user-space accesses during the TLB shoot down, a
* compiler might use speculation or lfetch.fault on what happens to be a valid
* user-space address. This in turn could trigger a TLB miss fault (or a VHPT
* walk) and re-insert a TLB entry we just removed. Slow mode avoids such
* problems. (We could make fast-mode work by switching the current task to a
* different "mm" during the shootdown.) --davidm 08/02/2002
*/
tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
tlb->nr = 0;
tlb->fullmm = full_mm_flush;
tlb->start_addr = ~0UL;
}
Expand Down Expand Up @@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;

if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return 1; /* avoid calling tlb_flush_mmu */
}

if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb);

Expand Down
Loading

0 comments on commit 61beeed

Please sign in to comment.