Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
8d8bb39
Documentation
arch
alpha
arm
avr32
blackfin
cris
frv
h8300
ia64
m32r
m68k
m68knommu
mips
au1000
basler
bcm47xx
boot
cobalt
configs
dec
emma2rh
fw
gt64120
jazz
kernel
lasat
lemote
lib
math-emu
mipssim
mm
Makefile
c-r3k.c
c-r4k.c
c-tx39.c
cache.c
cerr-sb1.c
cex-gen.S
cex-sb1.S
dma-default.c
extable.c
fault.c
highmem.c
init.c
ioremap.c
page.c
pgtable-32.c
pgtable-64.c
pgtable.c
sc-ip22.c
sc-mips.c
sc-r5k.c
sc-rm7k.c
tlb-r3k.c
tlb-r4k.c
tlb-r8k.c
tlbex-fault.S
tlbex.c
uasm.c
uasm.h
mti-malta
nxp
oprofile
pci
pmc-sierra
rb532
sgi-ip22
sgi-ip27
sgi-ip32
sibyte
sni
txx9
vr41xx
Kconfig
Kconfig.debug
Makefile
mn10300
parisc
powerpc
s390
sh
sparc
sparc64
um
x86
xtensa
.gitignore
Kconfig
block
crypto
drivers
firmware
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
usr
virt
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
MAINTAINERS
Makefile
README
REPORTING-BUGS
Breadcrumbs
linux
/
arch
/
mips
/
mm
/
dma-default.c
Blame
Blame
Latest commit
History
History
389 lines (294 loc) · 8.64 KB
Breadcrumbs
linux
/
arch
/
mips
/
mm
/
dma-default.c
Top
File metadata and controls
Code
Blame
389 lines (294 loc) · 8.64 KB
Raw
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. */ #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/string.h> #include <asm/cache.h> #include <asm/io.h> #include <dma-coherence.h> static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) { unsigned long addr = plat_dma_addr_to_phys(dma_addr); return (unsigned long)phys_to_virt(addr); } /* * Warning on the terminology - Linux calls an uncached area coherent; * MIPS terminology calls memory areas with hardware maintained coherency * coherent. */ static inline int cpu_is_noncoherent_r10000(struct device *dev) { return !plat_device_is_coherent(dev) && (current_cpu_type() == CPU_R10000 || current_cpu_type() == CPU_R12000); } static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) { /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); #ifdef CONFIG_ZONE_DMA if (dev == NULL) gfp |= __GFP_DMA; else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) gfp |= __GFP_DMA; else #endif #ifdef CONFIG_ZONE_DMA32 if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) gfp |= __GFP_DMA32; else #endif ; /* Don't invoke OOM killer */ gfp |= __GFP_NORETRY; return gfp; } void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; gfp = massage_gfp_flags(dev, gfp); ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret != NULL) { memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); } return ret; } EXPORT_SYMBOL(dma_alloc_noncoherent); void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; gfp = massage_gfp_flags(dev, gfp); ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); ret = UNCAC_ADDR(ret); } } return ret; } EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { free_pages((unsigned long) vaddr, get_order(size)); } EXPORT_SYMBOL(dma_free_noncoherent); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { unsigned long addr = (unsigned long) vaddr; if (!plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); free_pages(addr, get_order(size)); } EXPORT_SYMBOL(dma_free_coherent); static inline void __dma_sync(unsigned long addr, size_t size, enum dma_data_direction direction) { switch (direction) { case DMA_TO_DEVICE: dma_cache_wback(addr, size); break; case DMA_FROM_DEVICE: dma_cache_inv(addr, size); break; case DMA_BIDIRECTIONAL: dma_cache_wback_inv(addr, size); break; default: BUG(); } } dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { unsigned long addr = (unsigned long) ptr; if (!plat_device_is_coherent(dev)) __dma_sync(addr, size, direction); return plat_map_dma_mem(dev, ptr, size); } EXPORT_SYMBOL(dma_map_single); void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { if (cpu_is_noncoherent_r10000(dev)) __dma_sync(dma_addr_to_virt(dma_addr), size, direction); plat_unmap_dma_mem(dma_addr); } EXPORT_SYMBOL(dma_unmap_single); int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; BUG_ON(direction == DMA_NONE); for (i = 0; i < nents; i++, sg++) { unsigned long addr; addr = (unsigned long) sg_virt(sg); if (!plat_device_is_coherent(dev) && addr) __dma_sync(addr, sg->length, direction); sg->dma_address = plat_map_dma_mem(dev, (void *)addr, sg->length); } return nents; } EXPORT_SYMBOL(dma_map_sg); dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev)) { unsigned long addr; addr = (unsigned long) page_address(page) + offset; dma_cache_wback_inv(addr, size); } return plat_map_dma_mem_page(dev, page) + offset; } EXPORT_SYMBOL(dma_map_page); void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { unsigned long addr; addr = plat_dma_addr_to_phys(dma_address); dma_cache_wback_inv(addr, size); } plat_unmap_dma_mem(dma_address); } EXPORT_SYMBOL(dma_unmap_page); void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction) { unsigned long addr; int i; BUG_ON(direction == DMA_NONE); for (i = 0; i < nhwentries; i++, sg++) { if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { addr = (unsigned long) sg_virt(sg); if (addr) __dma_sync(addr, sg->length, direction); } plat_unmap_dma_mem(sg->dma_address); } } EXPORT_SYMBOL(dma_unmap_sg); void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (cpu_is_noncoherent_r10000(dev)) { unsigned long addr; addr = dma_addr_to_virt(dma_handle); __dma_sync(addr, size, direction); } } EXPORT_SYMBOL(dma_sync_single_for_cpu); void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev)) { unsigned long addr; addr = dma_addr_to_virt(dma_handle); __dma_sync(addr, size, direction); } } EXPORT_SYMBOL(dma_sync_single_for_device); void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (cpu_is_noncoherent_r10000(dev)) { unsigned long addr; addr = dma_addr_to_virt(dma_handle); __dma_sync(addr + offset, size, direction); } } EXPORT_SYMBOL(dma_sync_single_range_for_cpu); void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev)) { unsigned long addr; addr = dma_addr_to_virt(dma_handle); __dma_sync(addr + offset, size, direction); } } EXPORT_SYMBOL(dma_sync_single_range_for_device); void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { int i; BUG_ON(direction == DMA_NONE); /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (cpu_is_noncoherent_r10000(dev)) __dma_sync((unsigned long)page_address(sg_page(sg)), sg->length, direction); plat_unmap_dma_mem(sg->dma_address); } } EXPORT_SYMBOL(dma_sync_sg_for_cpu); void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { int i; BUG_ON(direction == DMA_NONE); /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (!plat_device_is_coherent(dev)) __dma_sync((unsigned long)page_address(sg_page(sg)), sg->length, direction); plat_unmap_dma_mem(sg->dma_address); } } EXPORT_SYMBOL(dma_sync_sg_for_device); int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } EXPORT_SYMBOL(dma_mapping_error); int dma_supported(struct device *dev, u64 mask) { /* * we fall back to GFP_DMA when the mask isn't all 1s, * so we can't guarantee allocations that must be * within a tighter range than GFP_DMA.. */ if (mask < DMA_BIT_MASK(24)) return 0; return 1; } EXPORT_SYMBOL(dma_supported); int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) { return plat_device_is_coherent(dev); } EXPORT_SYMBOL(dma_is_consistent); void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev)) __dma_sync((unsigned long)vaddr, size, direction); } EXPORT_SYMBOL(dma_cache_sync);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
You can’t perform that action at this time.