Skip to content

Commit

Permalink
x86/mm/highmem: Use generic kmap atomic implementation
Browse files Browse the repository at this point in the history
Convert X86 to the generic kmap atomic implementation and make the
iomap_atomic() naming convention consistent while at it.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20201103095857.375127260@linutronix.de
  • Loading branch information
Thomas Gleixner committed Nov 6, 2020
1 parent 389755c commit 157e118
Show file tree
Hide file tree
Showing 12 changed files with 31 additions and 161 deletions.
3 changes: 2 additions & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@ config X86_32
select ARCH_WANT_IPC_PARSE_VERSION
select CLKSRC_I8253
select CLONE_BACKWARDS
select GENERIC_VDSO_32
select HAVE_DEBUG_STACKOVERFLOW
select KMAP_LOCAL
select MODULES_USE_ELF_REL
select OLD_SIGACTION
select GENERIC_VDSO_32

config X86_64
def_bool y
Expand Down
5 changes: 2 additions & 3 deletions arch/x86/include/asm/fixmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
#include <asm/pgtable_types.h>
#ifdef CONFIG_X86_32
#include <linux/threads.h>
#include <asm/kmap_types.h>
#include <asm/kmap_size.h>
#else
#include <uapi/asm/vsyscall.h>
#endif
Expand Down Expand Up @@ -94,7 +94,7 @@ enum fixed_addresses {
#endif
#ifdef CONFIG_X86_32
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
Expand Down Expand Up @@ -151,7 +151,6 @@ extern void reserve_top_address(unsigned long reserve);

extern int fixmaps_set;

extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;

void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
Expand Down
13 changes: 9 additions & 4 deletions arch/x86/include/asm/highmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@

#include <linux/interrupt.h>
#include <linux/threads.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/fixmap.h>
Expand Down Expand Up @@ -58,11 +57,17 @@ extern unsigned long highstart_pfn, highend_pfn;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))

void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);

#define flush_cache_kmaps() do { } while (0)

#define arch_kmap_local_post_map(vaddr, pteval) \
arch_flush_lazy_mmu_mode()

#define arch_kmap_local_post_unmap(vaddr) \
do { \
flush_tlb_one_kernel((vaddr)); \
arch_flush_lazy_mmu_mode(); \
} while (0)

extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn);

Expand Down
18 changes: 10 additions & 8 deletions arch/x86/include/asm/iomap.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,21 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);

void
iounmap_atomic(void __iomem *kvaddr);
static inline void iounmap_atomic(void __iomem *vaddr)
{
kunmap_local_indexed((void __force *)vaddr);
pagefault_enable();
preempt_enable();
}

int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);

void
iomap_free(resource_size_t base, unsigned long size);
void iomap_free(resource_size_t base, unsigned long size);

#endif /* _ASM_X86_IOMAP_H */
13 changes: 0 additions & 13 deletions arch/x86/include/asm/kmap_types.h

This file was deleted.

1 change: 0 additions & 1 deletion arch/x86/include/asm/paravirt_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
#ifndef __ASSEMBLY__

#include <asm/desc_defs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable_types.h>
#include <asm/nospec-branch.h>

Expand Down
59 changes: 0 additions & 59 deletions arch/x86/mm/highmem_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,65 +4,6 @@
#include <linux/swap.h> /* for totalram_pages */
#include <linux/memblock.h>

void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;

type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot));
arch_flush_lazy_mmu_mode();

return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high_prot);

/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn)
{
return kmap_atomic_prot_pfn(pfn, kmap_prot);
}
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);

void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;

if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;

type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();

#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory);
}
#endif
}
EXPORT_SYMBOL(kunmap_atomic_high);

void __init set_highmem_pages_init(void)
{
struct zone *zone;
Expand Down
15 changes: 0 additions & 15 deletions arch/x86/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -394,19 +394,6 @@ kernel_physical_mapping_init(unsigned long start,
return last_map_addr;
}

pte_t *kmap_pte;

static void __init kmap_init(void)
{
unsigned long kmap_vstart;

/*
* Cache the first kmap pte:
*/
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = virt_to_kpte(kmap_vstart);
}

#ifdef CONFIG_HIGHMEM
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
Expand Down Expand Up @@ -712,8 +699,6 @@ void __init paging_init(void)

__flush_tlb_all();

kmap_init();

/*
* NOTE: at this point the bootmem allocator is fully available.
*/
Expand Down
59 changes: 5 additions & 54 deletions arch/x86/mm/iomap_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size)
}
EXPORT_SYMBOL_GPL(iomap_free);

void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;

preempt_disable();
pagefault_disable();

type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
arch_flush_lazy_mmu_mode();

return (void *)vaddr;
}

/*
* Map 'pfn' using protections 'prot'
*/
void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
{
/*
* For non-PAT systems, translate non-WB request to UC- just in
Expand All @@ -81,36 +60,8 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
/* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val(prot) &= __default_kernel_pte_mask;

return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);

void
iounmap_atomic(void __iomem *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;

if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;

type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();

#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}

pagefault_enable();
preempt_enable();
preempt_disable();
pagefault_disable();
return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
}
EXPORT_SYMBOL_GPL(iounmap_atomic);
EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
2 changes: 1 addition & 1 deletion include/linux/highmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ static inline void __kunmap_atomic(void *addr)
#endif /* CONFIG_HIGHMEM */

#if !defined(CONFIG_KMAP_LOCAL)
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
#if defined(CONFIG_HIGHMEM)

DECLARE_PER_CPU(int, __kmap_atomic_idx);

Expand Down
2 changes: 1 addition & 1 deletion include/linux/io-mapping.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,

BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
}

static inline void
Expand Down
2 changes: 1 addition & 1 deletion mm/highmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
#include <linux/vmalloc.h>

#ifndef CONFIG_KMAP_LOCAL
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
#ifdef CONFIG_HIGHMEM
DEFINE_PER_CPU(int, __kmap_atomic_idx);
#endif
#endif
Expand Down

0 comments on commit 157e118

Please sign in to comment.