Skip to content

Commit

Permalink
microblaze/nommu: use the generic uncached segment support
Browse files Browse the repository at this point in the history
Stop providing our own arch alloc/free hooks for nommu platforms and
just expose the segment offset and use the generic dma-direct
allocator.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
  • Loading branch information
Christoph Hellwig authored and Michal Simek committed Aug 30, 2019
1 parent a55aa89 commit d3b9f65
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 52 deletions.
2 changes: 2 additions & 0 deletions arch/microblaze/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@ config MICROBLAZE
select ARCH_NO_SWAP
select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT if !MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_IPC_PARSE_VERSION
Expand Down
93 changes: 41 additions & 52 deletions arch/microblaze/mm/consistent.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,34 +42,58 @@
#include <asm/cpuinfo.h>
#include <asm/tlbflush.h>

#ifndef CONFIG_MMU
/* I have to use dcache values because I can't relate on ram size */
# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
#endif
void arch_dma_prep_coherent(struct page *page, size_t size)
{
phys_addr_t paddr = page_to_phys(page);

flush_dcache_range(paddr, paddr + size);
}

#ifndef CONFIG_MMU
/*
* Consistent memory allocators. Used for DMA devices that want to
* share uncached memory with the processor core.
* My crufty no-MMU approach is simple. In the HW platform we can optionally
* mirror the DDR up above the processor cacheable region. So, memory accessed
* in this mirror region will not be cached. It's alloced from the same
* pool as normal memory, but the handle we return is shifted up into the
* uncached region. This will no doubt cause big problems if memory allocated
* here is not also freed properly. -- JW
* Consistent memory allocators. Used for DMA devices that want to share
* uncached memory with the processor core. My crufty no-MMU approach is
* simple. In the HW platform we can optionally mirror the DDR up above the
* processor cacheable region. So, memory accessed in this mirror region will
* not be cached. It's alloced from the same pool as normal memory, but the
* handle we return is shifted up into the uncached region. This will no doubt
* cause big problems if memory allocated here is not also freed properly. -- JW
*
* I have to use dcache values because I can't relate on ram size:
*/
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
#else
#define UNCACHED_SHADOW_MASK 0
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */

void *uncached_kernel_address(void *ptr)
{
unsigned long addr = (unsigned long)ptr;

addr |= UNCACHED_SHADOW_MASK;
if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
return (void *)addr;
}

void *cached_kernel_address(void *ptr)
{
unsigned long addr = (unsigned long)ptr;

return (void *)(addr & ~UNCACHED_SHADOW_MASK);
}
#else /* CONFIG_MMU */
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
unsigned long order, vaddr;
void *ret;
unsigned int i, err = 0;
struct page *page, *end;

#ifdef CONFIG_MMU
phys_addr_t pa;
struct vm_struct *area;
unsigned long va;
#endif

if (in_interrupt())
BUG();
Expand All @@ -86,26 +110,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* we need to ensure that there are no cachelines in use,
* or worse dirty in this area.
*/
flush_dcache_range(virt_to_phys((void *)vaddr),
virt_to_phys((void *)vaddr) + size);

#ifndef CONFIG_MMU
ret = (void *)vaddr;
/*
* Here's the magic! Note if the uncached shadow is not implemented,
* it's up to the calling code to also test that condition and make
* other arranegments, such as manually flushing the cache and so on.
*/
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
# endif
if ((unsigned int)ret > cpuinfo.dcache_base &&
(unsigned int)ret < cpuinfo.dcache_high)
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
arch_dma_prep_coherent(virt_to_page((unsigned long)vaddr), size);

/* dma_handle is same as physical (shadowed) address */
*dma_handle = (dma_addr_t)ret;
#else
/* Allocate some common virtual space to map the new pages. */
area = get_vm_area(size, VM_ALLOC);
if (!area) {
Expand All @@ -117,7 +123,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,

/* This gives us the real physical address of the first page. */
*dma_handle = pa = __virt_to_phys(vaddr);
#endif

/*
* free wasted pages. We skip the first page since we know
Expand All @@ -131,10 +136,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
split_page(page, order);

for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
#ifdef CONFIG_MMU
/* MS: This is the whole magic - use cache inhibit pages */
err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
#endif

SetPageReserved(page);
page++;
Expand All @@ -154,7 +157,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
return ret;
}

#ifdef CONFIG_MMU
static pte_t *consistent_virt_to_pte(void *vaddr)
{
unsigned long addr = (unsigned long)vaddr;
Expand All @@ -172,7 +174,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,

return pte_pfn(*ptep);
}
#endif

/*
* free page(s) as defined by the above mapping.
Expand All @@ -187,18 +188,6 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,

size = PAGE_ALIGN(size);

#ifndef CONFIG_MMU
/* Clear SHADOW_MASK bit in address, and free as per usual */
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
# endif
page = virt_to_page(vaddr);

do {
__free_reserved_page(page);
page++;
} while (size -= PAGE_SIZE);
#else
do {
pte_t *ptep = consistent_virt_to_pte(vaddr);
unsigned long pfn;
Expand All @@ -216,5 +205,5 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,

/* flush tlb */
flush_tlb_all();
#endif
}
#endif /* CONFIG_MMU */

0 comments on commit d3b9f65

Please sign in to comment.