Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 11369
b: refs/heads/master
c: 69b0475
h: refs/heads/master
i:
  11367: 7a75d58
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Oct 30, 2005
1 parent df3e7bd commit 0a759e0
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 85 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 60ec5585496871345c1a8113d7b60ed9d9474866
refs/heads/master: 69b0475456ff7ef520e16f69d7a15c0d68b74e64
96 changes: 18 additions & 78 deletions trunk/arch/arm/kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,93 +139,33 @@ struct iwmmxt_sigframe {
unsigned long storage[0x98/4];
};

static int page_present(struct mm_struct *mm, void __user *uptr, int wr)
{
unsigned long addr = (unsigned long)uptr;
pgd_t *pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) {
pmd_t *pmd = pmd_offset(pgd, addr);
if (pmd_present(*pmd)) {
pte_t *pte = pte_offset_map(pmd, addr);
return (pte_present(*pte) && (!wr || pte_write(*pte)));
}
}
return 0;
}

static int copy_locked(void __user *uptr, void *kptr, size_t size, int write,
void (*copyfn)(void *, void __user *))
{
unsigned char v, __user *userptr = uptr;
int err = 0;

do {
struct mm_struct *mm;

if (write) {
__put_user_error(0, userptr, err);
__put_user_error(0, userptr + size - 1, err);
} else {
__get_user_error(v, userptr, err);
__get_user_error(v, userptr + size - 1, err);
}

if (err)
break;

mm = current->mm;
spin_lock(&mm->page_table_lock);
if (page_present(mm, userptr, write) &&
page_present(mm, userptr + size - 1, write)) {
copyfn(kptr, uptr);
} else
err = 1;
spin_unlock(&mm->page_table_lock);
} while (err);

return err;
}

static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
{
int err = 0;
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;

/* the iWMMXt context must be 64 bit aligned */
WARN_ON((unsigned long)frame & 7);

__put_user_error(IWMMXT_MAGIC0, &frame->magic0, err);
__put_user_error(IWMMXT_MAGIC1, &frame->magic1, err);

/*
* iwmmxt_task_copy() doesn't check user permissions.
* Let's do a dummy write on the upper boundary to ensure
* access to user mem is OK all way up.
*/
err |= copy_locked(&frame->storage, current_thread_info(),
sizeof(frame->storage), 1, iwmmxt_task_copy);
return err;
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
kframe->magic0 = IWMMXT_MAGIC0;
kframe->magic1 = IWMMXT_MAGIC1;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
return __copy_to_user(frame, kframe, sizeof(*frame));
}

static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
{
unsigned long magic0, magic1;
int err = 0;
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;

/* the iWMMXt context is 64 bit aligned */
WARN_ON((unsigned long)frame & 7);

/*
* Validate iWMMXt context signature.
* Also, iwmmxt_task_restore() doesn't check user permissions.
* Let's do a dummy write on the upper boundary to ensure
* access to user mem is OK all way up.
*/
__get_user_error(magic0, &frame->magic0, err);
__get_user_error(magic1, &frame->magic1, err);
if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1)
err = copy_locked(&frame->storage, current_thread_info(),
sizeof(frame->storage), 0, iwmmxt_task_restore);
return err;
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
if (kframe->magic0 != IWMMXT_MAGIC0 ||
kframe->magic1 != IWMMXT_MAGIC1)
return -1;
iwmmxt_task_restore(current_thread_info(), &kframe->storage);
return 0;
}

#endif
Expand Down
14 changes: 9 additions & 5 deletions trunk/arch/arm/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -483,29 +483,33 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;

regs->ARM_cpsr &= ~PSR_C_BIT;
spin_lock(&mm->page_table_lock);
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map(pmd, addr);
if (!pte_present(*pte) || !pte_write(*pte))
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
spin_unlock(&mm->page_table_lock);
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val;

bad_access:
spin_unlock(&mm->page_table_lock);
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
return -1;
Expand Down
7 changes: 6 additions & 1 deletion trunk/arch/arm/mm/fault-armv.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
*
* Note that the pte lock held when calling update_mmu_cache must also
* guard the pte (somewhere else in the same mm) that we modify here.
* Therefore those configurations which might call adjust_pte (those
* without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
*/
static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
{
Expand Down Expand Up @@ -127,7 +132,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page);
* 2. If we have multiple shared mappings of the same space in
* an object, we need to deal with the cache aliasing issues.
*
* Note that the page_table_lock will be held.
* Note that the pte lock will be held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
Expand Down

0 comments on commit 0a759e0

Please sign in to comment.