Skip to content

Commit

Permalink
Merge branch 'master'
Browse files Browse the repository at this point in the history
  • Loading branch information
Jeff Garzik committed Oct 30, 2005
2 parents 0169e28 + 9f75e1e commit 81cfb88
Show file tree
Hide file tree
Showing 170 changed files with 3,281 additions and 3,451 deletions.
9 changes: 0 additions & 9 deletions Documentation/cachetlb.txt
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,6 @@ changes occur:
page table operations such as what happens during
fork, and exec.

Platform developers note that generic code will always
invoke this interface without mm->page_table_lock held.

3) void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)

Expand All @@ -72,9 +69,6 @@ changes occur:
call flush_tlb_page (see below) for each entry which may be
modified.

Platform developers note that generic code will always
invoke this interface with mm->page_table_lock held.

4) void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)

This time we need to remove the PAGE_SIZE sized translation
Expand All @@ -93,9 +87,6 @@ changes occur:

This is used primarily during fault processing.

Platform developers note that generic code will always
invoke this interface with mm->page_table_lock held.

5) void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)

Expand Down
2 changes: 0 additions & 2 deletions Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1460,8 +1460,6 @@ running once the system is up.
stifb= [HW]
Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]]

stram_swap= [HW,M68k]

swiotlb= [IA-64] Number of I/O TLB slabs

switches= [HW,M68k]
Expand Down
24 changes: 1 addition & 23 deletions Documentation/m68k/kernel-options.txt
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,7 @@ ignored (others aren't affected).
can be performed in optimal order. Not all SCSI devices support
tagged queuing (:-().

4.6 switches=
4.5 switches=
-------------

Syntax: switches=<list of switches>
Expand Down Expand Up @@ -661,28 +661,6 @@ correctly.
earlier initialization ("ov_"-less) takes precedence. But the
switching-off on reset still happens in this case.

4.5) stram_swap=
----------------

Syntax: stram_swap=<do_swap>[,<max_swap>]

This option is available only if the kernel has been compiled with
CONFIG_STRAM_SWAP enabled. Normally, the kernel then determines
dynamically whether to actually use ST-RAM as swap space. (Currently,
the fraction of ST-RAM must be less or equal 1/3 of total memory to
enable this swapping.) You can override the kernel's decision by
specifying this option. 1 for <do_swap> means always enable the swap,
even if you have less alternate RAM. 0 stands for never swap to
ST-RAM, even if it's small enough compared to the rest of memory.

If ST-RAM swapping is enabled, the kernel usually uses all free
ST-RAM as swap "device". If the kernel resides in ST-RAM, the region
allocated by it is obviously never used for swapping :-) You can also
limit this amount by specifying the second parameter, <max_swap>, if
you want to use parts of ST-RAM as normal system memory. <max_swap> is
in kBytes and the number should be a multiple of 4 (otherwise: rounded
down).

5) Options for Amiga Only:
==========================

Expand Down
3 changes: 3 additions & 0 deletions arch/alpha/mm/numa.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,6 +371,8 @@ show_mem(void)
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_online_node(nid) {
unsigned long flags;
pgdat_resize_lock(NODE_DATA(nid), &flags);
i = node_spanned_pages(nid);
while (i-- > 0) {
struct page *page = nid_page_nr(nid, i);
Expand All @@ -384,6 +386,7 @@ show_mem(void)
else
shared += page_count(page) - 1;
}
pgdat_resize_unlock(NODE_DATA(nid), &flags);
}
printk("%ld pages of RAM\n",total);
printk("%ld free pages\n",free);
Expand Down
6 changes: 1 addition & 5 deletions arch/alpha/mm/remap.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>

/* called with the page_table_lock held */
static inline void
remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
Expand Down Expand Up @@ -31,7 +30,6 @@ remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
} while (address && (address < end));
}

/* called with the page_table_lock held */
static inline int
remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
Expand All @@ -46,7 +44,7 @@ remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
if (address >= end)
BUG();
do {
pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
pte_t * pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address,
Expand All @@ -70,7 +68,6 @@ __alpha_remap_area_pages(unsigned long address, unsigned long phys_addr,
flush_cache_all();
if (address >= end)
BUG();
spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd;
pmd = pmd_alloc(&init_mm, dir, address);
Expand All @@ -84,7 +81,6 @@ __alpha_remap_area_pages(unsigned long address, unsigned long phys_addr,
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
spin_unlock(&init_mm.page_table_lock);
return error;
}

96 changes: 18 additions & 78 deletions arch/arm/kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,93 +139,33 @@ struct iwmmxt_sigframe {
unsigned long storage[0x98/4];
};

static int page_present(struct mm_struct *mm, void __user *uptr, int wr)
{
unsigned long addr = (unsigned long)uptr;
pgd_t *pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) {
pmd_t *pmd = pmd_offset(pgd, addr);
if (pmd_present(*pmd)) {
pte_t *pte = pte_offset_map(pmd, addr);
return (pte_present(*pte) && (!wr || pte_write(*pte)));
}
}
return 0;
}

static int copy_locked(void __user *uptr, void *kptr, size_t size, int write,
void (*copyfn)(void *, void __user *))
{
unsigned char v, __user *userptr = uptr;
int err = 0;

do {
struct mm_struct *mm;

if (write) {
__put_user_error(0, userptr, err);
__put_user_error(0, userptr + size - 1, err);
} else {
__get_user_error(v, userptr, err);
__get_user_error(v, userptr + size - 1, err);
}

if (err)
break;

mm = current->mm;
spin_lock(&mm->page_table_lock);
if (page_present(mm, userptr, write) &&
page_present(mm, userptr + size - 1, write)) {
copyfn(kptr, uptr);
} else
err = 1;
spin_unlock(&mm->page_table_lock);
} while (err);

return err;
}

static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
{
int err = 0;
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;

/* the iWMMXt context must be 64 bit aligned */
WARN_ON((unsigned long)frame & 7);

__put_user_error(IWMMXT_MAGIC0, &frame->magic0, err);
__put_user_error(IWMMXT_MAGIC1, &frame->magic1, err);

/*
* iwmmxt_task_copy() doesn't check user permissions.
* Let's do a dummy write on the upper boundary to ensure
* access to user mem is OK all way up.
*/
err |= copy_locked(&frame->storage, current_thread_info(),
sizeof(frame->storage), 1, iwmmxt_task_copy);
return err;
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
kframe->magic0 = IWMMXT_MAGIC0;
kframe->magic1 = IWMMXT_MAGIC1;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
return __copy_to_user(frame, kframe, sizeof(*frame));
}

static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
{
unsigned long magic0, magic1;
int err = 0;
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;

/* the iWMMXt context is 64 bit aligned */
WARN_ON((unsigned long)frame & 7);

/*
* Validate iWMMXt context signature.
* Also, iwmmxt_task_restore() doesn't check user permissions.
* Let's do a dummy write on the upper boundary to ensure
* access to user mem is OK all way up.
*/
__get_user_error(magic0, &frame->magic0, err);
__get_user_error(magic1, &frame->magic1, err);
if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1)
err = copy_locked(&frame->storage, current_thread_info(),
sizeof(frame->storage), 0, iwmmxt_task_restore);
return err;
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
if (kframe->magic0 != IWMMXT_MAGIC0 ||
kframe->magic1 != IWMMXT_MAGIC1)
return -1;
iwmmxt_task_restore(current_thread_info(), &kframe->storage);
return 0;
}

#endif
Expand Down
14 changes: 9 additions & 5 deletions arch/arm/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -483,29 +483,33 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
unsigned long addr = regs->ARM_r2;
struct mm_struct *mm = current->mm;
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
spinlock_t *ptl;

regs->ARM_cpsr &= ~PSR_C_BIT;
spin_lock(&mm->page_table_lock);
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, addr);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map(pmd, addr);
if (!pte_present(*pte) || !pte_write(*pte))
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
val = *(unsigned long *)addr;
val -= regs->ARM_r0;
if (val == 0) {
*(unsigned long *)addr = regs->ARM_r1;
regs->ARM_cpsr |= PSR_C_BIT;
}
spin_unlock(&mm->page_table_lock);
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return val;

bad_access:
spin_unlock(&mm->page_table_lock);
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
return -1;
Expand Down
6 changes: 1 addition & 5 deletions arch/arm/mm/consistent.c
Original file line number Diff line number Diff line change
Expand Up @@ -397,8 +397,6 @@ static int __init consistent_init(void)
pte_t *pte;
int ret = 0;

spin_lock(&init_mm.page_table_lock);

do {
pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
Expand All @@ -409,7 +407,7 @@ static int __init consistent_init(void)
}
WARN_ON(!pmd_none(*pmd));

pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE);
pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
if (!pte) {
printk(KERN_ERR "%s: no pte tables\n", __func__);
ret = -ENOMEM;
Expand All @@ -419,8 +417,6 @@ static int __init consistent_init(void)
consistent_pte = pte;
} while (0);

spin_unlock(&init_mm.page_table_lock);

return ret;
}

Expand Down
7 changes: 6 additions & 1 deletion arch/arm/mm/fault-armv.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
*
* Note that the pte lock held when calling update_mmu_cache must also
* guard the pte (somewhere else in the same mm) that we modify here.
* Therefore those configurations which might call adjust_pte (those
* without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
*/
static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
{
Expand Down Expand Up @@ -127,7 +132,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page);
* 2. If we have multiple shared mappings of the same space in
* an object, we need to deal with the cache aliasing issues.
*
* Note that the page_table_lock will be held.
* Note that the pte lock will be held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
Expand Down
4 changes: 1 addition & 3 deletions arch/arm/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,

pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
do {
pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
pte_t * pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
Expand All @@ -97,7 +97,6 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
phys_addr -= address;
dir = pgd_offset(&init_mm, address);
BUG_ON(address >= end);
spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
if (!pmd) {
Expand All @@ -114,7 +113,6 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
dir++;
} while (address && (address < end));

spin_unlock(&init_mm.page_table_lock);
flush_cache_vmap(start, end);
return err;
}
Expand Down
Loading

0 comments on commit 81cfb88

Please sign in to comment.