Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 300123
b: refs/heads/master
c: a7ac56d
h: refs/heads/master
i:
  300121: d8fa28e
  300119: ecf0436
v: v3
  • Loading branch information
Ian Campbell authored and Jason Cooper committed May 8, 2012
1 parent 8deb5f8 commit cf88297
Show file tree
Hide file tree
Showing 29 changed files with 284 additions and 179 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 22b6dd78aec32abf38d9b187dea2e0a8b28aa186
refs/heads/master: a7ac56de8316c0eb1111824c9add045cac2bd7a2
24 changes: 15 additions & 9 deletions trunk/arch/arm/kernel/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -906,14 +906,27 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}

#ifdef __ARMEB__
#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
#else
#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
#endif

asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;

if (why)
/*
* Save IP. IP is used to denote syscall entry/exit:
* IP = 0 -> entry, = 1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;

if (!ip)
audit_syscall_exit(regs);
else
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);

if (!test_thread_flag(TIF_SYSCALL_TRACE))
Expand All @@ -923,13 +936,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)

current_thread_info()->syscall = scno;

/*
* IP is used to denote syscall entry/exit:
* IP = 0 -> entry, =1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;

/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/arm/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();

printk("CPU%u: Booted secondary processor\n", cpu);

/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
Expand All @@ -262,8 +264,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
enter_lazy_tlb(mm, current);
local_flush_tlb_all();

printk("CPU%u: Booted secondary processor\n", cpu);

cpu_init();
preempt_disable();
trace_hardirqs_off();
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/kernel/sys_arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
"Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs),
"Ir" (sizeof(regs))
: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
: "r0", "r1", "r2", "r3", "ip", "lr", "memory");

out:
return ret;
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/arm/mach-kirkwood/board-dt.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/kexec.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/bridge-regs.h>
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/ia64/kvm/kvm-ia64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1174,7 +1174,7 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)

bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
{
return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
}

int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
Expand Down
7 changes: 7 additions & 0 deletions trunk/arch/powerpc/include/asm/exception-64s.h
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,13 @@ label##_hv: \
/* Exception addition: Hard disable interrupts */
#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)

/* Exception addition: Keep interrupt state */
#define ENABLE_INTS \
ld r11,PACAKMSR(r13); \
ld r12,_MSR(r1); \
rlwimi r11,r12,0,MSR_EE; \
mtmsrd r11,1

#define ADD_NVGPRS \
bl .save_nvgprs

Expand Down
18 changes: 18 additions & 0 deletions trunk/arch/powerpc/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,16 @@ do_work:
SOFT_DISABLE_INTS(r3,r4)
1: bl .preempt_schedule_irq

/* Hard-disable interrupts again (and update PACA) */
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)

/* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r9)
Expand All @@ -777,6 +787,14 @@ do_work:
user_work:
#endif /* CONFIG_PREEMPT */

/* Enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
ori r10,r10,MSR_EE
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */

andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
bl .restore_interrupts
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/exceptions-64s.S
Original file line number Diff line number Diff line change
Expand Up @@ -768,8 +768,8 @@ alignment_common:
std r3,_DAR(r1)
std r4,_DSISR(r1)
bl .save_nvgprs
DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
ENABLE_INTS
bl .alignment_exception
b .ret_from_except

Expand Down
8 changes: 1 addition & 7 deletions trunk/arch/powerpc/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,17 +260,11 @@ EXPORT_SYMBOL(arch_local_irq_restore);
* if they are currently disabled. This is typically called before
* schedule() or do_signal() when returning to userspace. We do it
* in C to avoid the burden of dealing with lockdep etc...
*
* NOTE: This is called with interrupts hard disabled but not marked
* as such in paca->irq_happened, so we need to resync this.
*/
void restore_interrupts(void)
{
if (irqs_disabled()) {
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
if (irqs_disabled())
local_irq_enable();
} else
__hard_irq_enable();
}

#endif /* CONFIG_PPC64 */
Expand Down
10 changes: 2 additions & 8 deletions trunk/arch/powerpc/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
addr, regs->nip, regs->link, code);
}

if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
if (!arch_irq_disabled_regs(regs))
local_irq_enable();

memset(&info, 0, sizeof(info));
Expand Down Expand Up @@ -1019,9 +1019,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
}

/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
local_irq_enable();

#ifdef CONFIG_MATH_EMULATION
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
Expand Down Expand Up @@ -1071,10 +1069,6 @@ void alignment_exception(struct pt_regs *regs)
{
int sig, code, fixed = 0;

/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();

/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
Expand Down
22 changes: 9 additions & 13 deletions trunk/arch/powerpc/kvm/book3s_64_mmu_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,8 +258,6 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
!(memslot->userspace_addr & (s - 1))) {
start &= ~(s - 1);
pgsize = s;
get_page(hpage);
put_page(page);
page = hpage;
}
}
Expand All @@ -283,8 +281,11 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
err = 0;

out:
if (got)
if (got) {
if (PageHuge(page))
page = compound_head(page);
put_page(page);
}
return err;

up_err:
Expand Down Expand Up @@ -677,15 +678,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
SetPageDirty(page);

out_put:
if (page) {
/*
* We drop pages[0] here, not page because page might
* have been set to the head page of a compound, but
* we have to drop the reference on the correct tail
* page to match the get inside gup()
*/
put_page(pages[0]);
}
if (page)
put_page(page);
return ret;

out_unlock:
Expand Down Expand Up @@ -985,7 +979,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
pa = *physp;
}
page = pfn_to_page(pa >> PAGE_SHIFT);
get_page(page);
} else {
hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, 1, pages);
Expand All @@ -998,6 +991,8 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
page = compound_head(page);
psize <<= compound_order(page);
}
if (!kvm->arch.using_mmu_notifiers)
get_page(page);
offset = gpa & (psize - 1);
if (nb_ret)
*nb_ret = psize - offset;
Expand All @@ -1008,6 +1003,7 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
{
struct page *page = virt_to_page(va);

page = compound_head(page);
put_page(page);
}

Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1192,6 +1192,8 @@ static void unpin_slot(struct kvm *kvm, int slot_id)
continue;
pfn = physp[j] >> PAGE_SHIFT;
page = pfn_to_page(pfn);
if (PageHuge(page))
page = compound_head(page);
SetPageDirty(page);
put_page(page);
}
Expand Down
9 changes: 8 additions & 1 deletion trunk/arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ struct kvm_task_sleep_node {
u32 token;
int cpu;
bool halted;
struct mm_struct *mm;
};

static struct kvm_task_sleep_head {
Expand Down Expand Up @@ -125,7 +126,9 @@ void kvm_async_pf_task_wait(u32 token)

n.token = token;
n.cpu = smp_processor_id();
n.mm = current->active_mm;
n.halted = idle || preempt_count() > 1;
atomic_inc(&n.mm->mm_count);
init_waitqueue_head(&n.wq);
hlist_add_head(&n.link, &b->list);
spin_unlock(&b->lock);
Expand Down Expand Up @@ -158,6 +161,9 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
{
hlist_del_init(&n->link);
if (!n->mm)
return;
mmdrop(n->mm);
if (n->halted)
smp_send_reschedule(n->cpu);
else if (waitqueue_active(&n->wq))
Expand Down Expand Up @@ -201,7 +207,7 @@ void kvm_async_pf_task_wake(u32 token)
* async PF was not yet handled.
* Add dummy entry for the token.
*/
n = kzalloc(sizeof(*n), GFP_ATOMIC);
n = kmalloc(sizeof(*n), GFP_ATOMIC);
if (!n) {
/*
* Allocation failed! Busy wait while other cpu
Expand All @@ -213,6 +219,7 @@ void kvm_async_pf_task_wake(u32 token)
}
n->token = token;
n->cpu = smp_processor_id();
n->mm = NULL;
init_waitqueue_head(&n->wq);
hlist_add_head(&n->link, &b->list);
} else
Expand Down
1 change: 0 additions & 1 deletion trunk/arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,6 @@ void set_personality_ia32(bool x32)
current_thread_info()->status |= TS_COMPAT;
}
}
EXPORT_SYMBOL_GPL(set_personality_ia32);

unsigned long get_wchan(struct task_struct *p)
{
Expand Down
14 changes: 1 addition & 13 deletions trunk/arch/x86/kernel/setup_percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,22 +185,10 @@ void __init setup_per_cpu_areas(void)
#endif
rc = -EINVAL;
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
const size_t dyn_size = PERCPU_MODULE_RESERVE +
PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
size_t atom_size;

/*
* On 64bit, use PMD_SIZE for atom_size so that embedded
* percpu areas are aligned to PMD. This, in the future,
* can also allow using PMD mappings in vmalloc area. Use
* PAGE_SIZE on 32bit as vmalloc space is highly contended
* and large vmalloc area allocs can easily fail.
*/
#ifdef CONFIG_X86_64
atom_size = PMD_SIZE;
#else
atom_size = PAGE_SIZE;
#endif
rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
dyn_size, atom_size,
pcpu_cpu_distance,
Expand Down
1 change: 0 additions & 1 deletion trunk/arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -6581,7 +6581,6 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
kvm_inject_page_fault(vcpu, &fault);
}
vcpu->arch.apf.halted = false;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
Expand Down
Loading

0 comments on commit cf88297

Please sign in to comment.