Skip to content

Commit

Permalink
Merge branch 'jsgf/x86/unify' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/jeremy/xen into x86/headers
  • Loading branch information
Ingo Molnar committed Feb 9, 2009
2 parents d5b5623 + fb08b20 commit 790c7eb
Show file tree
Hide file tree
Showing 10 changed files with 280 additions and 294 deletions.
90 changes: 90 additions & 0 deletions arch/x86/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

#include <linux/compiler.h>
#include <asm-generic/int-ll64.h>
#include <asm/page.h>

#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
Expand Down Expand Up @@ -80,6 +81,95 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
#define readq readq
#define writeq writeq

/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/

static inline phys_addr_t virt_to_phys(volatile void *address)
{
return __pa(address);
}

/**
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/

static inline void *phys_to_virt(phys_addr_t address)
{
return __va(address);
}

/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)

/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt

/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt

/**
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);

/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}

extern void iounmap(volatile void __iomem *addr);

extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);


#ifdef CONFIG_X86_32
# include "io_32.h"
#else
Expand Down
86 changes: 0 additions & 86 deletions arch/x86/include/asm/io_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,92 +53,6 @@
*/
#define xlate_dev_kmem_ptr(p) p

/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/

static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}

/**
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/

static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}

/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)

/**
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);

/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}

extern void iounmap(volatile void __iomem *addr);

/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt

/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt

static inline void
memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
Expand Down
59 changes: 0 additions & 59 deletions arch/x86/include/asm/io_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,67 +142,8 @@ __OUTS(l)

#include <linux/vmalloc.h>

#ifndef __i386__
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}

static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
#endif

/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)

#include <asm-generic/iomap.h>

/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);

/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}

extern void iounmap(volatile void __iomem *addr);

extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);

/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt

/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt

void __memcpy_fromio(void *, unsigned long, unsigned);
void __memcpy_toio(unsigned long, const void *, unsigned);

Expand Down
15 changes: 15 additions & 0 deletions arch/x86/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd)
return pgd.pgd;
}

static inline pgdval_t pgd_flags(pgd_t pgd)
{
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}

#if PAGETABLE_LEVELS >= 3
#if PAGETABLE_LEVELS == 4
typedef struct { pudval_t pud; } pud_t;
Expand All @@ -117,6 +122,11 @@ static inline pudval_t native_pud_val(pud_t pud)
}
#endif /* PAGETABLE_LEVELS == 4 */

static inline pudval_t pud_flags(pud_t pud)
{
return native_pud_val(pud) & PTE_FLAGS_MASK;
}

typedef struct { pmdval_t pmd; } pmd_t;

static inline pmd_t native_make_pmd(pmdval_t val)
Expand All @@ -128,6 +138,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return pmd.pmd;
}

static inline pmdval_t pmd_flags(pmd_t pmd)
{
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
}
#else /* PAGETABLE_LEVELS == 2 */
#include <asm-generic/pgtable-nopmd.h>

Expand Down
2 changes: 0 additions & 2 deletions arch/x86/include/asm/pgtable-2level.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif

#define pte_none(x) (!(x).pte_low)

/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range:
Expand Down
35 changes: 0 additions & 35 deletions arch/x86/include/asm/pgtable-3level.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,21 +18,6 @@
printk("%s:%d: bad pgd %p(%016Lx).\n", \
__FILE__, __LINE__, &(e), pgd_val(e))

static inline int pud_none(pud_t pud)
{
return pud_val(pud) == 0;
}

static inline int pud_bad(pud_t pud)
{
return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
}

static inline int pud_present(pud_t pud)
{
return pud_val(pud) & _PAGE_PRESENT;
}

/* Rules for using set_pte: the pte being assigned *must* be
* either not present or in a state where the hardware will
* not attempt to update the pte. In places where this is
Expand Down Expand Up @@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
write_cr3(pgd);
}

#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)

#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))


/* Find an entry in the second-level page table.. */
#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
pmd_index(address))

#ifdef CONFIG_SMP
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
Expand All @@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif

#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}

static inline int pte_none(pte_t pte)
{
return !pte.pte_low && !pte.pte_high;
}

/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
Expand Down
Loading

0 comments on commit 790c7eb

Please sign in to comment.