diff --git a/[refs] b/[refs] index 1f2aa9a69c6e..80e9ab5bd709 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 0e053738b50836e9d1e94b2295ef2942eb471078 +refs/heads/master: 1b7558e457ed0de61023cfc913d2c342c7c3d9f2 diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index c048de34d6a1..c8a56e457d61 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -1228,11 +1228,6 @@ asmlinkage void __init xen_start_kernel(void) if (xen_feature(XENFEAT_supervisor_mode_kernel)) pv_info.kernel_rpl = 0; - /* Prevent unwanted bits from being set in PTEs. */ - __supported_pte_mask &= ~_PAGE_GLOBAL; - if (!is_initial_xendomain()) - __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); - /* set the limit of our address space */ xen_reserve_top(); diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index 265601d5a6ae..3525ef523a74 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -179,54 +179,48 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, preempt_enable(); } -/* Assume pteval_t is equivalent to all the other *val_t types. */ -static pteval_t pte_mfn_to_pfn(pteval_t val) -{ - if (val & _PAGE_PRESENT) { - unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT; - pteval_t flags = val & ~PTE_MASK; - val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; - } - - return val; -} - -static pteval_t pte_pfn_to_mfn(pteval_t val) +pteval_t xen_pte_val(pte_t pte) { - if (val & _PAGE_PRESENT) { - unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT; - pteval_t flags = val & ~PTE_MASK; - val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; - } + pteval_t ret = pte.pte; - return val; -} + if (ret & _PAGE_PRESENT) + ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; -pteval_t xen_pte_val(pte_t pte) -{ - return pte_mfn_to_pfn(pte.pte); + return ret; } pgdval_t xen_pgd_val(pgd_t pgd) { - return pte_mfn_to_pfn(pgd.pgd); + pgdval_t ret = pgd.pgd; + if (ret & _PAGE_PRESENT) + ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; + return ret; } pte_t xen_make_pte(pteval_t pte) { - pte = pte_pfn_to_mfn(pte); - return native_make_pte(pte); + if (pte & _PAGE_PRESENT) { + pte = phys_to_machine(XPADDR(pte)).maddr; + pte &= ~(_PAGE_PCD | _PAGE_PWT); + } + + return (pte_t){ .pte = pte }; } pgd_t xen_make_pgd(pgdval_t pgd) { - pgd = pte_pfn_to_mfn(pgd); - return native_make_pgd(pgd); + if (pgd & _PAGE_PRESENT) + pgd = phys_to_machine(XPADDR(pgd)).maddr; + + return (pgd_t){ pgd }; } pmdval_t xen_pmd_val(pmd_t pmd) { - return pte_mfn_to_pfn(pmd.pmd); + pmdval_t ret = native_pmd_val(pmd); + if (ret & _PAGE_PRESENT) + ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; + return ret; } #ifdef CONFIG_X86_PAE void xen_set_pud(pud_t *ptr, pud_t val) @@ -273,7 +267,9 @@ void xen_pmd_clear(pmd_t *pmdp) pmd_t xen_make_pmd(pmdval_t pmd) { - pmd = pte_pfn_to_mfn(pmd); + if (pmd & _PAGE_PRESENT) + pmd = phys_to_machine(XPADDR(pmd)).maddr; + return native_make_pmd(pmd); } #else /* !PAE */ diff --git a/trunk/arch/x86/xen/xen-head.S b/trunk/arch/x86/xen/xen-head.S index 3175e973fd0d..288d587ce73c 100644 --- a/trunk/arch/x86/xen/xen-head.S +++ b/trunk/arch/x86/xen/xen-head.S @@ -17,7 +17,7 @@ ENTRY(startup_xen) __FINIT -.pushsection .text +.pushsection .bss.page_aligned .align PAGE_SIZE_asm ENTRY(hypercall_page) .skip 0x1000 diff --git a/trunk/drivers/char/tty_ioctl.c b/trunk/drivers/char/tty_ioctl.c index 8f81139d6194..b1a757a5ee27 100644 --- a/trunk/drivers/char/tty_ioctl.c +++ b/trunk/drivers/char/tty_ioctl.c @@ -981,9 +981,16 @@ EXPORT_SYMBOL_GPL(tty_perform_flush); int n_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { + struct tty_struct *real_tty; unsigned long flags; int retval; + if (tty->driver->type == TTY_DRIVER_TYPE_PTY && + tty->driver->subtype == PTY_TYPE_MASTER) + real_tty = tty->link; + else + real_tty = tty; + switch (cmd) { case TCXONC: retval = tty_check_change(tty); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_memfree.c b/trunk/drivers/infiniband/hw/mthca/mthca_memfree.c index d5862e5d99a0..b224079d4e1f 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -109,11 +109,7 @@ static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_m { struct page *page; - /* - * Use __GFP_ZERO because buggy firmware assumes ICM pages are - * cleared, and subtle failures are seen if they aren't. - */ - page = alloc_pages(gfp_mask | __GFP_ZERO, order); + page = alloc_pages(gfp_mask, order); if (!page) return -ENOMEM; diff --git a/trunk/drivers/lguest/x86/core.c b/trunk/drivers/lguest/x86/core.c index 2e554a4ab337..5126d5d9ea0e 100644 --- a/trunk/drivers/lguest/x86/core.c +++ b/trunk/drivers/lguest/x86/core.c @@ -176,7 +176,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * we set it now, so we can trap and pass that trap to the Guest if it * uses the FPU. */ if (cpu->ts) - unlazy_fpu(current); + lguest_set_ts(); /* SYSENTER is an optimized way of doing system calls. We can't allow * it because it always jumps to privilege level 0. A normal Guest @@ -196,10 +196,6 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * trap made the switcher code come back, and an error code which some * traps set. */ - /* Restore SYSENTER if it's supposed to be on. */ - if (boot_cpu_has(X86_FEATURE_SEP)) - wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); - /* If the Guest page faulted, then the cr2 register will tell us the * bad virtual address. We have to grab this now, because once we * re-enable interrupts an interrupt could fault and thus overwrite @@ -207,12 +203,13 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) if (cpu->regs->trapnum == 14) cpu->arch.last_pagefault = read_cr2(); /* Similarly, if we took a trap because the Guest used the FPU, - * we have to restore the FPU it expects to see. - * math_state_restore() may sleep and we may even move off to - * a different CPU. So all the critical stuff should be done - * before this. */ + * we have to restore the FPU it expects to see. */ else if (cpu->regs->trapnum == 7) math_state_restore(); + + /* Restore SYSENTER if it's supposed to be on. */ + if (boot_cpu_has(X86_FEATURE_SEP)) + wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); } /*H:130 Now we've examined the hypercall code; our Guest can make requests. diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c index 76e5b7386af9..4f0f22b020ea 100644 --- a/trunk/drivers/xen/events.c +++ b/trunk/drivers/xen/events.c @@ -529,7 +529,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ - wmb(); + rmb(); #endif pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); while (pending_words != 0) { diff --git a/trunk/fs/select.c b/trunk/fs/select.c index da0e88201c3a..8dda969614a9 100644 --- a/trunk/fs/select.c +++ b/trunk/fs/select.c @@ -249,6 +249,7 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) retval++; } } + cond_resched(); } if (res_in) *rinp = res_in; @@ -256,7 +257,6 @@ int do_select(int n, fd_set_bits *fds, s64 *timeout) *routp = res_out; if (res_ex) *rexp = res_ex; - cond_resched(); } wait = NULL; if (retval || !*timeout || signal_pending(current)) diff --git a/trunk/include/linux/tty_driver.h b/trunk/include/linux/tty_driver.h index d2a003586761..59f1c0bd8f9c 100644 --- a/trunk/include/linux/tty_driver.h +++ b/trunk/include/linux/tty_driver.h @@ -27,7 +27,8 @@ * This routine is called by the kernel to write a series of * characters to the tty device. The characters may come from * user space or kernel space. This routine will return the - * number of characters actually accepted for writing. + * number of characters actually accepted for writing. This + * routine is mandatory. * * Optional: Required for writable devices. * @@ -133,7 +134,7 @@ * This routine notifies the tty driver that it should hangup the * tty device. * - * Optional: + * Required: * * void (*break_ctl)(struct tty_stuct *tty, int state); * diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index 449def8074fe..7d1136e97c14 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -1096,21 +1096,64 @@ static void unqueue_me_pi(struct futex_q *q) * private futexes. */ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - struct task_struct *newowner) + struct task_struct *newowner, + struct rw_semaphore *fshared) { u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; + struct task_struct *oldowner = pi_state->owner; u32 uval, curval, newval; - int ret; + int ret, attempt = 0; /* Owner died? */ + if (!pi_state->owner) + newtid |= FUTEX_OWNER_DIED; + + /* + * We are here either because we stole the rtmutex from the + * pending owner or we are the pending owner which failed to + * get the rtmutex. We have to replace the pending owner TID + * in the user space variable. This must be atomic as we have + * to preserve the owner died bit here. + * + * Note: We write the user space value _before_ changing the + * pi_state because we can fault here. Imagine swapped out + * pages or a fork, which was running right before we acquired + * mmap_sem, that marked all the anonymous memory readonly for + * cow. + * + * Modifying pi_state _before_ the user space value would + * leave the pi_state in an inconsistent state when we fault + * here, because we need to drop the hash bucket lock to + * handle the fault. This might be observed in the PID check + * in lookup_pi_state. + */ +retry: + if (get_futex_value_locked(&uval, uaddr)) + goto handle_fault; + + while (1) { + newval = (uval & FUTEX_OWNER_DIED) | newtid; + + curval = cmpxchg_futex_value_locked(uaddr, uval, newval); + + if (curval == -EFAULT) + goto handle_fault; + if (curval == uval) + break; + uval = curval; + } + + /* + * We fixed up user space. Now we need to fix the pi_state + * itself. + */ if (pi_state->owner != NULL) { spin_lock_irq(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); spin_unlock_irq(&pi_state->owner->pi_lock); - } else - newtid |= FUTEX_OWNER_DIED; + } pi_state->owner = newowner; @@ -1118,26 +1161,35 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &newowner->pi_state_list); spin_unlock_irq(&newowner->pi_lock); + return 0; /* - * We own it, so we have to replace the pending owner - * TID. This must be atomic as we have preserve the - * owner died bit here. + * To handle the page fault we need to drop the hash bucket + * lock here. That gives the other task (either the pending + * owner itself or the task which stole the rtmutex) the + * chance to try the fixup of the pi_state. So once we are + * back from handling the fault we need to check the pi_state + * after reacquiring the hash bucket lock and before trying to + * do another fixup. When the fixup has been done already we + * simply return. */ - ret = get_futex_value_locked(&uval, uaddr); +handle_fault: + spin_unlock(q->lock_ptr); - while (!ret) { - newval = (uval & FUTEX_OWNER_DIED) | newtid; + ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++); - curval = cmpxchg_futex_value_locked(uaddr, uval, newval); + spin_lock(q->lock_ptr); - if (curval == -EFAULT) - ret = -EFAULT; - if (curval == uval) - break; - uval = curval; - } - return ret; + /* + * Check if someone else fixed it for us: + */ + if (pi_state->owner != oldowner) + return 0; + + if (ret) + return ret; + + goto retry; } /* @@ -1507,7 +1559,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, * that case: */ if (q.pi_state->owner != curr) - ret = fixup_pi_state_owner(uaddr, &q, curr); + ret = fixup_pi_state_owner(uaddr, &q, curr, fshared); } else { /* * Catch the rare case, where the lock was released @@ -1539,7 +1591,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, int res; owner = rt_mutex_owner(&q.pi_state->pi_mutex); - res = fixup_pi_state_owner(uaddr, &q, owner); + res = fixup_pi_state_owner(uaddr, &q, owner, + fshared); /* propagate -EFAULT, if the fixup failed */ if (res) diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 3aaa5c8cb421..b048ad8a11af 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -4398,20 +4398,22 @@ do_wait_for_common(struct completion *x, long timeout, int state) signal_pending(current)) || (state == TASK_KILLABLE && fatal_signal_pending(current))) { - timeout = -ERESTARTSYS; - break; + __remove_wait_queue(&x->wait, &wait); + return -ERESTARTSYS; } __set_current_state(state); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); - } while (!x->done && timeout); + if (!timeout) { + __remove_wait_queue(&x->wait, &wait); + return timeout; + } + } while (!x->done); __remove_wait_queue(&x->wait, &wait); - if (!x->done) - return timeout; } x->done--; - return timeout ?: 1; + return timeout; } static long __sched diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index 0f3c19197fa4..1dad5bbb59b6 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -250,8 +250,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; spin_unlock(&rt_rq->rt_runtime_lock); - } else if (rt_rq->rt_nr_running) - idle = 0; + } if (enqueue) sched_rt_rq_enqueue(rt_rq); diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index d14b251a25a6..9aefaae46858 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -1045,26 +1045,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, return page; } -/* Can we do the FOLL_ANON optimization? */ -static inline int use_zero_page(struct vm_area_struct *vma) -{ - /* - * We don't want to optimize FOLL_ANON for make_pages_present() - * when it tries to page in a VM_LOCKED region. As to VM_SHARED, - * we want to get the page from the page tables to make sure - * that we serialize and update with any other user of that - * mapping. - */ - if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) - return 0; - /* - * And if we have a fault or a nopfn routine, it's not an - * anonymous region. - */ - return !vma->vm_ops || - (!vma->vm_ops->fault && !vma->vm_ops->nopfn); -} - int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas) @@ -1139,7 +1119,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, foll_flags = FOLL_TOUCH; if (pages) foll_flags |= FOLL_GET; - if (!write && use_zero_page(vma)) + if (!write && !(vma->vm_flags & VM_LOCKED) && + (!vma->vm_ops || !vma->vm_ops->fault)) foll_flags |= FOLL_ANON; do { @@ -1785,6 +1766,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { if (old_page) { + page_remove_rmap(old_page, vma); if (!PageAnon(old_page)) { dec_mm_counter(mm, file_rss); inc_mm_counter(mm, anon_rss); @@ -1806,32 +1788,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, lru_cache_add_active(new_page); page_add_new_anon_rmap(new_page, vma, address); - if (old_page) { - /* - * Only after switching the pte to the new page may - * we remove the mapcount here. Otherwise another - * process may come and find the rmap count decremented - * before the pte is switched to the new page, and - * "reuse" the old page writing into it while our pte - * here still points into it and can be read by other - * threads. - * - * The critical issue is to order this - * page_remove_rmap with the ptp_clear_flush above. - * Those stores are ordered by (if nothing else,) - * the barrier present in the atomic_add_negative - * in page_remove_rmap. - * - * Then the TLB flush in ptep_clear_flush ensures that - * no process can access the old page before the - * decremented mapcount is visible. And the old page - * cannot be reused until after the decremented - * mapcount is visible. So transitively, TLBs to - * old page will be flushed before it can be reused. - */ - page_remove_rmap(old_page, vma); - } - /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; diff --git a/trunk/sound/isa/sb/sb_mixer.c b/trunk/sound/isa/sb/sb_mixer.c index 73d4572d136b..91d14224f6b3 100644 --- a/trunk/sound/isa/sb/sb_mixer.c +++ b/trunk/sound/isa/sb/sb_mixer.c @@ -925,7 +925,7 @@ static unsigned char als4000_saved_regs[] = { static void save_mixer(struct snd_sb *chip, unsigned char *regs, int num_regs) { unsigned char *val = chip->saved_regs; - snd_assert(num_regs <= ARRAY_SIZE(chip->saved_regs), return); + snd_assert(num_regs > ARRAY_SIZE(chip->saved_regs), return); for (; num_regs; num_regs--) *val++ = snd_sbmixer_read(chip, *regs++); } @@ -933,7 +933,7 @@ static void save_mixer(struct snd_sb *chip, unsigned char *regs, int num_regs) static void restore_mixer(struct snd_sb *chip, unsigned char *regs, int num_regs) { unsigned char *val = chip->saved_regs; - snd_assert(num_regs <= ARRAY_SIZE(chip->saved_regs), return); + snd_assert(num_regs > ARRAY_SIZE(chip->saved_regs), return); for (; num_regs; num_regs--) snd_sbmixer_write(chip, *regs++, *val++); } diff --git a/trunk/sound/pci/aw2/aw2-alsa.c b/trunk/sound/pci/aw2/aw2-alsa.c index 3f00ddf450f8..56f87cd33c19 100644 --- a/trunk/sound/pci/aw2/aw2-alsa.c +++ b/trunk/sound/pci/aw2/aw2-alsa.c @@ -316,8 +316,6 @@ static int __devinit snd_aw2_create(struct snd_card *card, return -ENOMEM; } - /* (2) initialization of the chip hardware */ - snd_aw2_saa7146_setup(&chip->saa7146, chip->iobase_virt); if (request_irq(pci->irq, snd_aw2_saa7146_interrupt, IRQF_SHARED, "Audiowerk2", chip)) { @@ -331,6 +329,8 @@ static int __devinit snd_aw2_create(struct snd_card *card, } chip->irq = pci->irq; + /* (2) initialization of the chip hardware */ + snd_aw2_saa7146_setup(&chip->saa7146, chip->iobase_virt); err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { free_irq(chip->irq, (void *)chip);