diff --git a/[refs] b/[refs] index 2999c4aec1ae..7c67aa397ed5 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7c1c3eb855b5311118bed3b51c79d652d40148ec +refs/heads/master: 49e31ca8387227898710f99476f2217ea154aab0 diff --git a/trunk/arch/mips/oprofile/common.c b/trunk/arch/mips/oprofile/common.c index 935dd851f480..49ef8fd338d6 100644 --- a/trunk/arch/mips/oprofile/common.c +++ b/trunk/arch/mips/oprofile/common.c @@ -114,5 +114,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) void oprofile_arch_exit(void) { - model->exit(); + if (model) + model->exit(); } diff --git a/trunk/fs/splice.c b/trunk/fs/splice.c index a46ddd28561e..447ebc0a37f3 100644 --- a/trunk/fs/splice.c +++ b/trunk/fs/splice.c @@ -279,7 +279,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, pgoff_t index, end_index; loff_t isize; size_t total_len; - int error, page_nr; + int error; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, @@ -299,74 +299,46 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, * read-ahead if this is a non-zero offset (we are likely doing small * chunk splice and the page is already there) for a single page. */ - if (!loff || nr_pages > 1) - page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages); + if (!loff || spd.nr_pages > 1) + do_page_cache_readahead(mapping, in, index, spd.nr_pages); /* * Now fill in the holes: */ error = 0; total_len = 0; + for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) { + unsigned int this_len; - /* - * Lookup the (hopefully) full range of pages we need. - */ - spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages); + if (!len) + break; - /* - * If find_get_pages_contig() returned fewer pages than we needed, - * allocate the rest. - */ - index += spd.nr_pages; - while (spd.nr_pages < nr_pages) { /* - * Page could be there, find_get_pages_contig() breaks on - * the first hole. + * this_len is the max we'll use from this page + */ + this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); +find_page: + /* + * lookup the page for this index */ page = find_get_page(mapping, index); if (!page) { /* - * page didn't exist, allocate one. + * page didn't exist, allocate one */ page = page_cache_alloc_cold(mapping); if (!page) break; error = add_to_page_cache_lru(page, mapping, index, - mapping_gfp_mask(mapping)); + mapping_gfp_mask(mapping)); if (unlikely(error)) { page_cache_release(page); break; } - /* - * add_to_page_cache() locks the page, unlock it - * to avoid convoluting the logic below even more. - */ - unlock_page(page); - } - - pages[spd.nr_pages++] = page; - index++; - } - - /* - * Now loop over the map and see if we need to start IO on any - * pages, fill in the partial map, etc. - */ - index = *ppos >> PAGE_CACHE_SHIFT; - nr_pages = spd.nr_pages; - spd.nr_pages = 0; - for (page_nr = 0; page_nr < nr_pages; page_nr++) { - unsigned int this_len; - - if (!len) - break; - /* - * this_len is the max we'll use from this page - */ - this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); - page = pages[page_nr]; + goto readpage; + } /* * If the page isn't uptodate, we may need to start io on it @@ -388,6 +360,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, */ if (!page->mapping) { unlock_page(page); + page_cache_release(page); break; } /* @@ -398,20 +371,16 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, goto fill_it; } +readpage: /* * need to read in the page */ error = mapping->a_ops->readpage(in, page); + if (unlikely(error)) { - /* - * We really should re-lookup the page here, - * but it complicates things a lot. Instead - * lets just do what we already stored, and - * we'll get it the next time we are called. - */ + page_cache_release(page); if (error == AOP_TRUNCATED_PAGE) - error = 0; - + goto find_page; break; } @@ -420,8 +389,10 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, */ isize = i_size_read(mapping->host); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; - if (unlikely(!isize || index > end_index)) + if (unlikely(!isize || index > end_index)) { + page_cache_release(page); break; + } /* * if this is the last page, see if we need to shrink @@ -429,33 +400,27 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, */ if (end_index == index) { loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK); - if (total_len + loff > isize) + if (total_len + loff > isize) { + page_cache_release(page); break; + } /* * force quit after adding this page */ - len = this_len; + nr_pages = spd.nr_pages; this_len = min(this_len, loff); loff = 0; } } fill_it: - partial[page_nr].offset = loff; - partial[page_nr].len = this_len; + pages[spd.nr_pages] = page; + partial[spd.nr_pages].offset = loff; + partial[spd.nr_pages].len = this_len; len -= this_len; total_len += this_len; loff = 0; - spd.nr_pages++; - index++; } - /* - * Release any pages at the end, if we quit early. 'i' is how far - * we got, 'nr_pages' is how many pages are in the map. - */ - while (page_nr < nr_pages) - page_cache_release(pages[page_nr++]); - if (spd.nr_pages) return splice_to_pipe(pipe, &spd); diff --git a/trunk/include/asm-i386/pgtable-2level.h b/trunk/include/asm-i386/pgtable-2level.h index 2756d4b04c27..27bde973abc7 100644 --- a/trunk/include/asm-i386/pgtable-2level.h +++ b/trunk/include/asm-i386/pgtable-2level.h @@ -18,9 +18,6 @@ #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) - #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) #define pte_same(a, b) ((a).pte_low == (b).pte_low) #define pte_page(x) pfn_to_page(pte_pfn(x)) diff --git a/trunk/include/asm-i386/pgtable-3level.h b/trunk/include/asm-i386/pgtable-3level.h index dccb1b3337ad..36a5aa63cbbf 100644 --- a/trunk/include/asm-i386/pgtable-3level.h +++ b/trunk/include/asm-i386/pgtable-3level.h @@ -85,26 +85,6 @@ static inline void pud_clear (pud_t * pud) { } #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ pmd_index(address)) -/* - * For PTEs and PDEs, we must clear the P-bit first when clearing a page table - * entry, so clear the bottom half first and enforce ordering with a compiler - * barrier. - */ -static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - ptep->pte_low = 0; - smp_wmb(); - ptep->pte_high = 0; -} - -static inline void pmd_clear(pmd_t *pmd) -{ - u32 *tmp = (u32 *)pmd; - *tmp = 0; - smp_wmb(); - *(tmp + 1) = 0; -} - static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t res; diff --git a/trunk/include/asm-i386/pgtable.h b/trunk/include/asm-i386/pgtable.h index 672c3f76b9df..ee056c41a9fb 100644 --- a/trunk/include/asm-i386/pgtable.h +++ b/trunk/include/asm-i386/pgtable.h @@ -204,10 +204,12 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) +#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) @@ -266,7 +268,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long pte_t pte; if (full) { pte = *ptep; - pte_clear(mm, addr, ptep); + *ptep = __pte(0); } else { pte = ptep_get_and_clear(mm, addr, ptep); } diff --git a/trunk/include/linux/pagemap.h b/trunk/include/linux/pagemap.h index 7a1af574dedf..9539efd4f7e6 100644 --- a/trunk/include/linux/pagemap.h +++ b/trunk/include/linux/pagemap.h @@ -78,8 +78,6 @@ extern struct page * find_or_create_page(struct address_space *mapping, unsigned long index, gfp_t gfp_mask); unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); -unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, - unsigned int nr_pages, struct page **pages); unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages); diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index fd57442186cb..3ef20739e725 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -697,38 +697,6 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, return ret; } -/** - * find_get_pages_contig - gang contiguous pagecache lookup - * @mapping: The address_space to search - * @index: The starting page index - * @nr_pages: The maximum number of pages - * @pages: Where the resulting pages are placed - * - * find_get_pages_contig() works exactly like find_get_pages(), except - * that the returned number of pages are guaranteed to be contiguous. - * - * find_get_pages_contig() returns the number of pages which were found. - */ -unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, - unsigned int nr_pages, struct page **pages) -{ - unsigned int i; - unsigned int ret; - - read_lock_irq(&mapping->tree_lock); - ret = radix_tree_gang_lookup(&mapping->page_tree, - (void **)pages, index, nr_pages); - for (i = 0; i < ret; i++) { - if (pages[i]->mapping == NULL || pages[i]->index != index) - break; - - page_cache_get(pages[i]); - index++; - } - read_unlock_irq(&mapping->tree_lock); - return i; -} - /* * Like find_get_pages, except we only return pages which are tagged with * `tag'. We update *index to index the next page for the traversal.