Skip to content

Commit

Permalink
mm: remove checks for pte_index
Browse files Browse the repository at this point in the history
Since pte_index is always defined, we don't need to check whether it's
defined or not.  Delete the slow version that doesn't depend on it and
remove the #define since nobody needs to test for it.

Link: https://lkml.kernel.org/r/20230819031837.3160096-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Christian Dietrich <stettberger@dokucode.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Matthew Wilcox (Oracle) authored and Andrew Morton committed Aug 24, 2023
1 parent 14a405c commit bb7dbaa
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 17 deletions.
1 change: 0 additions & 1 deletion include/linux/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
#define pte_index pte_index

#ifndef pmd_index
static inline unsigned long pmd_index(unsigned long address)
Expand Down
17 changes: 1 addition & 16 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1869,7 +1869,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
return retval;
}

#ifdef pte_index
static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, struct page *page, pgprot_t prot)
{
Expand All @@ -1884,7 +1883,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
}

/* insert_pages() amortizes the cost of spinlock operations
* when inserting pages in a loop. Arch *must* define pte_index.
* when inserting pages in a loop.
*/
static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num, pgprot_t prot)
Expand Down Expand Up @@ -1943,7 +1942,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
*num = remaining_pages_total;
return ret;
}
#endif /* ifdef pte_index */

/**
* vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
Expand All @@ -1963,7 +1961,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num)
{
#ifdef pte_index
const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;

if (addr < vma->vm_start || end_addr >= vma->vm_end)
Expand All @@ -1975,18 +1972,6 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
}
/* Defer page refcount checking till we're about to map that page. */
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
#else
unsigned long idx = 0, pgcount = *num;
int err = -EINVAL;

for (; idx < pgcount; ++idx) {
err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
if (err)
break;
}
*num = pgcount - idx;
return err;
#endif /* ifdef pte_index */
}
EXPORT_SYMBOL(vm_insert_pages);

Expand Down

0 comments on commit bb7dbaa

Please sign in to comment.