Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 11368
b: refs/heads/master
c: 60ec558
h: refs/heads/master
v: v3
  • Loading branch information
Hugh Dickins authored and Linus Torvalds committed Oct 30, 2005
1 parent 7a75d58 commit df3e7bd
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 66 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: deceb6cd17e6dfafe4c4f81b1b4153bc41b2cb70
refs/heads/master: 60ec5585496871345c1a8113d7b60ed9d9474866
17 changes: 7 additions & 10 deletions trunk/arch/i386/kernel/vm86.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,17 +134,16 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
return ret;
}

static void mark_screen_rdonly(struct task_struct * tsk)
static void mark_screen_rdonly(struct mm_struct *mm)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte, *mapped;
pte_t *pte;
spinlock_t *ptl;
int i;

preempt_disable();
spin_lock(&tsk->mm->page_table_lock);
pgd = pgd_offset(tsk->mm, 0xA0000);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
pud = pud_offset(pgd, 0xA0000);
Expand All @@ -153,16 +152,14 @@ static void mark_screen_rdonly(struct task_struct * tsk)
pmd = pmd_offset(pud, 0xA0000);
if (pmd_none_or_clear_bad(pmd))
goto out;
pte = mapped = pte_offset_map(pmd, 0xA0000);
pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
for (i = 0; i < 32; i++) {
if (pte_present(*pte))
set_pte(pte, pte_wrprotect(*pte));
pte++;
}
pte_unmap(mapped);
pte_unmap_unlock(pte, ptl);
out:
spin_unlock(&tsk->mm->page_table_lock);
preempt_enable();
flush_tlb();
}

Expand Down Expand Up @@ -306,7 +303,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk

tsk->thread.screen_bitmap = info->screen_bitmap;
if (info->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk);
mark_screen_rdonly(tsk->mm);
__asm__ __volatile__(
"xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
"movl %0,%%esp\n\t"
Expand Down
40 changes: 23 additions & 17 deletions trunk/arch/sh/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,10 +194,13 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
unsigned long address)
{
unsigned long addrmax = P4SEG;
pgd_t *dir;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
struct mm_struct *mm;
spinlock_t *ptl;
int ret = 1;

#ifdef CONFIG_SH_KGDB
if (kgdb_nofault && kgdb_bus_err_hook)
Expand All @@ -208,28 +211,28 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
addrmax = P4SEG_STORE_QUE + 0x04000000;
#endif

if (address >= P3SEG && address < addrmax)
dir = pgd_offset_k(address);
else if (address >= TASK_SIZE)
if (address >= P3SEG && address < addrmax) {
pgd = pgd_offset_k(address);
mm = NULL;
} else if (address >= TASK_SIZE)
return 1;
else if (!current->mm)
else if (!(mm = current->mm))
return 1;
else
dir = pgd_offset(current->mm, address);
pgd = pgd_offset(mm, address);

pmd = pmd_offset(dir, address);
if (pmd_none(*pmd))
return 1;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
pmd = pmd_offset(pgd, address);
if (pmd_none_or_clear_bad(pmd))
return 1;
}
pte = pte_offset_kernel(pmd, address);
if (mm)
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
else
pte = pte_offset_kernel(pmd, address);

entry = *pte;
if (pte_none(entry) || pte_not_present(entry)
|| (writeaccess && !pte_write(entry)))
return 1;
goto unlock;

if (writeaccess)
entry = pte_mkdirty(entry);
Expand All @@ -251,8 +254,11 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,

set_pte(pte, entry);
update_mmu_cache(NULL, address, entry);

return 0;
ret = 0;
unlock:
if (mm)
pte_unmap_unlock(pte, ptl);
return ret;
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
Expand Down
68 changes: 30 additions & 38 deletions trunk/arch/sh64/mm/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -584,32 +584,36 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr)
}
}

static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr)
static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
unsigned long addr, unsigned long end)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
spinlock_t *ptl;
unsigned long paddr;

/* NOTE : all the callers of this have mm->page_table_lock held, so the
following page table traversal is safe even on SMP/pre-emptible. */

if (!mm) return; /* No way to find physical address of page */
pgd = pgd_offset(mm, eaddr);
if (pgd_bad(*pgd)) return;

pmd = pmd_offset(pgd, eaddr);
if (pmd_none(*pmd) || pmd_bad(*pmd)) return;

pte = pte_offset_kernel(pmd, eaddr);
entry = *pte;
if (pte_none(entry) || !pte_present(entry)) return;

paddr = pte_val(entry) & PAGE_MASK;

sh64_dcache_purge_coloured_phy_page(paddr, eaddr);

if (!mm)
return; /* No way to find physical address of page */

pgd = pgd_offset(mm, addr);
if (pgd_bad(*pgd))
return;

pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return;

pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
do {
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
continue;
paddr = pte_val(entry) & PAGE_MASK;
sh64_dcache_purge_coloured_phy_page(paddr, addr);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(pte - 1, ptl);
}
/****************************************************************************/

Expand Down Expand Up @@ -668,7 +672,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
int n_pages;

n_pages = ((end - start) >> PAGE_SHIFT);
if (n_pages >= 64) {
if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
#if 1
sh64_dcache_purge_all();
#else
Expand Down Expand Up @@ -707,20 +711,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
}
#endif
} else {
/* 'Small' range */
unsigned long aligned_start;
unsigned long eaddr;
unsigned long last_page_start;

aligned_start = start & PAGE_MASK;
/* 'end' is 1 byte beyond the end of the range */
last_page_start = (end - 1) & PAGE_MASK;

eaddr = aligned_start;
while (eaddr <= last_page_start) {
sh64_dcache_purge_user_page(mm, eaddr);
eaddr += PAGE_SIZE;
}
/* Small range, covered by a single page table page */
start &= PAGE_MASK; /* should already be so */
end = PAGE_ALIGN(end); /* should already be so */
sh64_dcache_purge_user_pages(mm, start, end);
}
return;
}
Expand Down Expand Up @@ -880,9 +874,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
addresses from the user address space specified by mm, after writing
back any dirty data.
Note(1), 'end' is 1 byte beyond the end of the range to flush.
Note(2), this is called with mm->page_table_lock held.*/
Note, 'end' is 1 byte beyond the end of the range to flush. */

sh64_dcache_purge_user_range(mm, start, end);
sh64_icache_inv_user_page_range(mm, start, end);
Expand All @@ -898,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned
the I-cache must be searched too in case the page in question is
both writable and being executed from (e.g. stack trampolines.)
Note(1), this is called with mm->page_table_lock held.
Note, this is called with pte lock held.
*/

sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
Expand Down

0 comments on commit df3e7bd

Please sign in to comment.