Skip to content

Commit

Permalink
sparc64: implement the new page table range API
Browse files Browse the repository at this point in the history
Add set_ptes(), update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages().  Convert the PG_dcache_dirty flag from being
per-page to per-folio.

Link: https://lkml.kernel.org/r/20230802151406.3735276-27-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Matthew Wilcox (Oracle) authored and Andrew Morton committed Aug 24, 2023
1 parent 665f640 commit 1a10a44
Show file tree
Hide file tree
Showing 5 changed files with 119 additions and 67 deletions.
18 changes: 12 additions & 6 deletions arch/sparc/include/asm/cacheflush_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,20 +35,26 @@ void flush_icache_range(unsigned long start, unsigned long end);
void __flush_icache_page(unsigned long);

void __flush_dcache_page(void *addr, int flush_icache);
void flush_dcache_page_impl(struct page *page);
void flush_dcache_folio_impl(struct folio *folio);
#ifdef CONFIG_SMP
void smp_flush_dcache_page_impl(struct page *page, int cpu);
void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
void smp_flush_dcache_folio_impl(struct folio *folio, int cpu);
void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#define smp_flush_dcache_folio_impl(folio, cpu) flush_dcache_folio_impl(folio)
#define flush_dcache_folio_all(mm, folio) flush_dcache_folio_impl(folio)
#endif

void __flush_dcache_range(unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
static inline void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}

#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_pages(vma, pg, nr) do { } while(0)

void flush_ptrace_access(struct vm_area_struct *, struct page *,
unsigned long uaddr, void *kaddr,
Expand Down
29 changes: 22 additions & 7 deletions arch/sparc/include/asm/pgtable_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ extern unsigned long VMALLOC_END;
#define vmemmap ((struct page *)VMEMMAP_BASE)

#include <linux/sched.h>
#include <asm/tlbflush.h>

bool kern_addr_valid(unsigned long addr);

Expand Down Expand Up @@ -927,8 +928,21 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
}

#define set_pte_at(mm,addr,ptep,pte) \
__set_pte_at((mm), (addr), (ptep), (pte), 0)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
arch_enter_lazy_mmu_mode();
for (;;) {
__set_pte_at(mm, addr, ptep, pte, 0);
if (--nr == 0)
break;
ptep++;
pte_val(pte) += PAGE_SIZE;
addr += PAGE_SIZE;
}
arch_leave_lazy_mmu_mode();
}
#define set_ptes set_ptes

#define pte_clear(mm,addr,ptep) \
set_pte_at((mm), (addr), (ptep), __pte(0UL))
Expand All @@ -947,8 +961,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
\
if (pfn_valid(this_pfn) && \
(((old_addr) ^ (new_addr)) & (1 << 13))) \
flush_dcache_page_all(current->mm, \
pfn_to_page(this_pfn)); \
flush_dcache_folio_all(current->mm, \
page_folio(pfn_to_page(this_pfn))); \
} \
newpte; \
})
Expand All @@ -963,7 +977,10 @@ struct seq_file;
void mmu_info(struct seq_file *);

struct vm_area_struct;
void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
void update_mmu_cache_range(struct vm_fault *, struct vm_area_struct *,
unsigned long addr, pte_t *ptep, unsigned int nr);
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
Expand Down Expand Up @@ -1121,8 +1138,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
}
#define pte_access_permitted pte_access_permitted

#include <asm/tlbflush.h>

/* We provide our own get_unmapped_area to cope with VA holes and
* SHM area cache aliasing for userland.
*/
Expand Down
56 changes: 36 additions & 20 deletions arch/sparc/kernel/smp_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -921,20 +921,26 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
#endif
extern unsigned long xcall_flush_dcache_page_spitfire;

static inline void __local_flush_dcache_page(struct page *page)
static inline void __local_flush_dcache_folio(struct folio *folio)
{
unsigned int i, nr = folio_nr_pages(folio);

#ifdef DCACHE_ALIASING_POSSIBLE
__flush_dcache_page(page_address(page),
for (i = 0; i < nr; i++)
__flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
((tlb_type == spitfire) &&
page_mapping_file(page) != NULL));
folio_flush_mapping(folio) != NULL));
#else
if (page_mapping_file(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page_address(page)));
if (folio_flush_mapping(folio) != NULL &&
tlb_type == spitfire) {
unsigned long pfn = folio_pfn(folio)
for (i = 0; i < nr; i++)
__flush_icache_page((pfn + i) * PAGE_SIZE);
}
#endif
}

void smp_flush_dcache_page_impl(struct page *page, int cpu)
void smp_flush_dcache_folio_impl(struct folio *folio, int cpu)
{
int this_cpu;

Expand All @@ -948,33 +954,38 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
this_cpu = get_cpu();

if (cpu == this_cpu) {
__local_flush_dcache_page(page);
__local_flush_dcache_folio(folio);
} else if (cpu_online(cpu)) {
void *pg_addr = page_address(page);
void *pg_addr = folio_address(folio);
u64 data0 = 0;

if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping_file(page) != NULL)
if (folio_flush_mapping(folio) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
#endif
}
if (data0) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpumask_of(cpu));
unsigned int i, nr = folio_nr_pages(folio);

for (i = 0; i < nr; i++) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpumask_of(cpu));
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
atomic_inc(&dcpage_flushes_xcall);
#endif
pg_addr += PAGE_SIZE;
}
}
}

put_cpu();
}

void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio)
{
void *pg_addr;
u64 data0;
Expand All @@ -988,24 +999,29 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
atomic_inc(&dcpage_flushes);
#endif
data0 = 0;
pg_addr = page_address(page);
pg_addr = folio_address(folio);
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping_file(page) != NULL)
if (folio_flush_mapping(folio) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
#endif
}
if (data0) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpu_online_mask);
unsigned int i, nr = folio_nr_pages(folio);

for (i = 0; i < nr; i++) {
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpu_online_mask);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
atomic_inc(&dcpage_flushes_xcall);
#endif
pg_addr += PAGE_SIZE;
}
}
__local_flush_dcache_page(page);
__local_flush_dcache_folio(folio);

preempt_enable();
}
Expand Down
Loading

0 comments on commit 1a10a44

Please sign in to comment.