Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
31f408c
Documentation
arch
block
certs
crypto
drivers
accessibility
acpi
amba
android
ata
atm
auxdisplay
base
bcma
block
bluetooth
bus
cdrom
char
clk
clocksource
connector
cpufreq
cpuidle
crypto
dax
dca
devfreq
dio
dma-buf
dma
edac
eisa
extcon
firewire
firmware
fmc
fpga
gpio
gpu
drm
amd
arc
arm
armada
Kconfig
Makefile
armada_510.c
armada_crtc.c
armada_crtc.h
armada_debugfs.c
armada_drm.h
armada_drv.c
armada_fb.c
armada_fb.h
armada_fbdev.c
armada_gem.c
armada_gem.h
armada_hw.h
armada_ioctlP.h
armada_overlay.c
armada_trace.c
armada_trace.h
ast
atmel-hlcdc
bochs
bridge
cirrus
etnaviv
exynos
fsl-dcu
gma500
hisilicon
i2c
i810
i915
imx
lib
mediatek
meson
mga
mgag200
msm
mxsfb
nouveau
omapdrm
panel
qxl
r128
radeon
rcar-du
rockchip
savage
selftests
shmobile
sis
sti
sun4i
tdfx
tegra
tilcdc
ttm
udl
vc4
vgem
via
virtio
vmwgfx
zte
Kconfig
Makefile
ati_pcigart.c
drm_agpsupport.c
drm_atomic.c
drm_atomic_helper.c
drm_auth.c
drm_blend.c
drm_bridge.c
drm_bufs.c
drm_cache.c
drm_color_mgmt.c
drm_connector.c
drm_context.c
drm_crtc.c
drm_crtc_helper.c
drm_crtc_helper_internal.h
drm_crtc_internal.h
drm_debugfs.c
drm_debugfs_crc.c
drm_dma.c
drm_dp_aux_dev.c
drm_dp_dual_mode_helper.c
drm_dp_helper.c
drm_dp_mst_topology.c
drm_drv.c
drm_dumb_buffers.c
drm_edid.c
drm_edid_load.c
drm_encoder.c
drm_encoder_slave.c
drm_fb_cma_helper.c
drm_fb_helper.c
drm_flip_work.c
drm_fops.c
drm_fourcc.c
drm_framebuffer.c
drm_gem.c
drm_gem_cma_helper.c
drm_global.c
drm_hashtab.c
drm_info.c
drm_internal.h
drm_ioc32.c
drm_ioctl.c
drm_irq.c
drm_kms_helper_common.c
drm_legacy.h
drm_lock.c
drm_memory.c
drm_mipi_dsi.c
drm_mm.c
drm_mode_config.c
drm_mode_object.c
drm_modes.c
drm_modeset_helper.c
drm_modeset_lock.c
drm_of.c
drm_panel.c
drm_pci.c
drm_plane.c
drm_plane_helper.c
drm_platform.c
drm_prime.c
drm_print.c
drm_probe_helper.c
drm_property.c
drm_rect.c
drm_scatter.c
drm_simple_kms_helper.c
drm_sysfs.c
drm_trace.h
drm_trace_points.c
drm_vm.c
drm_vma_manager.c
host1x
ipu-v3
vga
Makefile
hid
hsi
hv
hwmon
hwspinlock
hwtracing
i2c
ide
idle
iio
infiniband
input
iommu
ipack
irqchip
isdn
leds
lguest
lightnvm
macintosh
mailbox
mcb
md
media
memory
memstick
message
mfd
misc
mmc
mtd
net
nfc
ntb
nubus
nvdimm
nvme
nvmem
of
oprofile
parisc
parport
pci
pcmcia
perf
phy
pinctrl
platform
pnp
power
powercap
pps
ps3
ptp
pwm
rapidio
ras
regulator
remoteproc
reset
rpmsg
rtc
s390
sbus
scsi
sfi
sh
sn
soc
spi
spmi
ssb
staging
target
tc
thermal
thunderbolt
tty
uio
usb
uwb
vfio
vhost
video
virt
virtio
vlynq
vme
w1
watchdog
xen
zorro
Kconfig
Makefile
firmware
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
armada
/
armada_gem.c
Copy path
Blame
Blame
Latest commit
History
History
613 lines (501 loc) · 14.5 KB
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
armada
/
armada_gem.c
Top
File metadata and controls
Code
Blame
613 lines (501 loc) · 14.5 KB
Raw
/* * Copyright (C) 2012 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/dma-buf.h> #include <linux/dma-mapping.h> #include <linux/shmem_fs.h> #include <drm/drmP.h> #include "armada_drm.h" #include "armada_gem.h" #include <drm/armada_drm.h> #include "armada_ioctlP.h" static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data); unsigned long pfn = obj->phys_addr >> PAGE_SHIFT; int ret; pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT; ret = vm_insert_pfn(vma, vmf->address, pfn); switch (ret) { case 0: case -EBUSY: return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; default: return VM_FAULT_SIGBUS; } } const struct vm_operations_struct armada_gem_vm_ops = { .fault = armada_gem_vm_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; static size_t roundup_gem_size(size_t size) { return roundup(size, PAGE_SIZE); } void armada_gem_free_object(struct drm_gem_object *obj) { struct armada_gem_object *dobj = drm_to_armada_gem(obj); struct armada_private *priv = obj->dev->dev_private; DRM_DEBUG_DRIVER("release obj %p\n", dobj); drm_gem_free_mmap_offset(&dobj->obj); might_lock(&priv->linear_lock); if (dobj->page) { /* page backed memory */ unsigned int order = get_order(dobj->obj.size); __free_pages(dobj->page, order); } else if (dobj->linear) { /* linear backed memory */ mutex_lock(&priv->linear_lock); drm_mm_remove_node(dobj->linear); mutex_unlock(&priv->linear_lock); kfree(dobj->linear); if (dobj->addr) iounmap(dobj->addr); } if (dobj->obj.import_attach) { /* We only ever display imported data */ if (dobj->sgt) dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(&dobj->obj, NULL); } drm_gem_object_release(&dobj->obj); kfree(dobj); } int armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) { struct armada_private *priv = dev->dev_private; size_t size = obj->obj.size; if (obj->page || obj->linear) return 0; /* * If it is a small allocation (typically cursor, which will * be 32x64 or 64x32 ARGB pixels) try to get it from the system. * Framebuffers will never be this small (our minimum size for * framebuffers is larger than this anyway.) Such objects are * only accessed by the CPU so we don't need any special handing * here. */ if (size <= 8192) { unsigned int order = get_order(size); struct page *p = alloc_pages(GFP_KERNEL, order); if (p) { obj->addr = page_address(p); obj->phys_addr = page_to_phys(p); obj->page = p; memset(obj->addr, 0, PAGE_ALIGN(size)); } } /* * We could grab something from CMA if it's enabled, but that * involves building in a problem: * * CMA's interface uses dma_alloc_coherent(), which provides us * with an CPU virtual address and a device address. * * The CPU virtual address may be either an address in the kernel * direct mapped region (for example, as it would be on x86) or * it may be remapped into another part of kernel memory space * (eg, as it would be on ARM.) This means virt_to_phys() on the * returned virtual address is invalid depending on the architecture * implementation. * * The device address may also not be a physical address; it may * be that there is some kind of remapping between the device and * system RAM, which makes the use of the device address also * unsafe to re-use as a physical address. * * This makes DRM usage of dma_alloc_coherent() in a generic way * at best very questionable and unsafe. */ /* Otherwise, grab it from our linear allocation */ if (!obj->page) { struct drm_mm_node *node; unsigned align = min_t(unsigned, size, SZ_2M); void __iomem *ptr; int ret; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOSPC; mutex_lock(&priv->linear_lock); ret = drm_mm_insert_node_generic(&priv->linear, node, size, align, 0, 0); mutex_unlock(&priv->linear_lock); if (ret) { kfree(node); return ret; } obj->linear = node; /* Ensure that the memory we're returning is cleared. */ ptr = ioremap_wc(obj->linear->start, size); if (!ptr) { mutex_lock(&priv->linear_lock); drm_mm_remove_node(obj->linear); mutex_unlock(&priv->linear_lock); kfree(obj->linear); obj->linear = NULL; return -ENOMEM; } memset_io(ptr, 0, size); iounmap(ptr); obj->phys_addr = obj->linear->start; obj->dev_addr = obj->linear->start; } DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, (unsigned long long)obj->phys_addr, (unsigned long long)obj->dev_addr); return 0; } void * armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj) { /* only linear objects need to be ioremap'd */ if (!dobj->addr && dobj->linear) dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size); return dobj->addr; } struct armada_gem_object * armada_gem_alloc_private_object(struct drm_device *dev, size_t size) { struct armada_gem_object *obj; size = roundup_gem_size(size); obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) return NULL; drm_gem_private_object_init(dev, &obj->obj, size); obj->dev_addr = DMA_ERROR_CODE; DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size); return obj; } static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, size_t size) { struct armada_gem_object *obj; struct address_space *mapping; size = roundup_gem_size(size); obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) return NULL; if (drm_gem_object_init(dev, &obj->obj, size)) { kfree(obj); return NULL; } obj->dev_addr = DMA_ERROR_CODE; mapping = obj->obj.filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); return obj; } /* Dumb alloc support */ int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct armada_gem_object *dobj; u32 handle; size_t size; int ret; args->pitch = armada_pitch(args->width, args->bpp); args->size = size = args->pitch * args->height; dobj = armada_gem_alloc_private_object(dev, size); if (dobj == NULL) return -ENOMEM; ret = armada_gem_linear_back(dev, dobj); if (ret) goto err; ret = drm_gem_handle_create(file, &dobj->obj, &handle); if (ret) goto err; args->handle = handle; /* drop reference from allocate - handle holds it now */ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle); err: drm_gem_object_unreference_unlocked(&dobj->obj); return ret; } int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct armada_gem_object *obj; int ret = 0; obj = armada_gem_object_lookup(file, handle); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); return -EINVAL; } /* Don't allow imported objects to be mapped */ if (obj->obj.import_attach) { ret = -EINVAL; goto err_unref; } ret = drm_gem_create_mmap_offset(&obj->obj); if (ret == 0) { *offset = drm_vma_node_offset_addr(&obj->obj.vma_node); DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset); } err_unref: drm_gem_object_unreference_unlocked(&obj->obj); return ret; } int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle) { return drm_gem_handle_delete(file, handle); } /* Private driver gem ioctls */ int armada_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_armada_gem_create *args = data; struct armada_gem_object *dobj; size_t size; u32 handle; int ret; if (args->size == 0) return -ENOMEM; size = args->size; dobj = armada_gem_alloc_object(dev, size); if (dobj == NULL) return -ENOMEM; ret = drm_gem_handle_create(file, &dobj->obj, &handle); if (ret) goto err; args->handle = handle; /* drop reference from allocate - handle holds it now */ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle); err: drm_gem_object_unreference_unlocked(&dobj->obj); return ret; } /* Map a shmem-backed object into process memory space */ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_armada_gem_mmap *args = data; struct armada_gem_object *dobj; unsigned long addr; dobj = armada_gem_object_lookup(file, args->handle); if (dobj == NULL) return -ENOENT; if (!dobj->obj.filp) { drm_gem_object_unreference_unlocked(&dobj->obj); return -EINVAL; } addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); drm_gem_object_unreference_unlocked(&dobj->obj); if (IS_ERR_VALUE(addr)) return addr; args->addr = addr; return 0; } int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_armada_gem_pwrite *args = data; struct armada_gem_object *dobj; char __user *ptr; int ret; DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n", args->handle, args->offset, args->size, args->ptr); if (args->size == 0) return 0; ptr = (char __user *)(uintptr_t)args->ptr; if (!access_ok(VERIFY_READ, ptr, args->size)) return -EFAULT; ret = fault_in_pages_readable(ptr, args->size); if (ret) return ret; dobj = armada_gem_object_lookup(file, args->handle); if (dobj == NULL) return -ENOENT; /* Must be a kernel-mapped object */ if (!dobj->addr) return -EINVAL; if (args->offset > dobj->obj.size || args->size > dobj->obj.size - args->offset) { DRM_ERROR("invalid size: object size %u\n", dobj->obj.size); ret = -EINVAL; goto unref; } if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) { ret = -EFAULT; } else if (dobj->update) { dobj->update(dobj->update_data); ret = 0; } unref: drm_gem_object_unreference_unlocked(&dobj->obj); return ret; } /* Prime support */ static struct sg_table * armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct drm_gem_object *obj = attach->dmabuf->priv; struct armada_gem_object *dobj = drm_to_armada_gem(obj); struct scatterlist *sg; struct sg_table *sgt; int i, num; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return NULL; if (dobj->obj.filp) { struct address_space *mapping; int count; count = dobj->obj.size / PAGE_SIZE; if (sg_alloc_table(sgt, count, GFP_KERNEL)) goto free_sgt; mapping = dobj->obj.filp->f_mapping; for_each_sg(sgt->sgl, sg, count, i) { struct page *page; page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) { num = i; goto release; } sg_set_page(sg, page, PAGE_SIZE, 0); } if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) { num = sgt->nents; goto release; } } else if (dobj->page) { /* Single contiguous page */ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) goto free_sgt; sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0); if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) goto free_table; } else if (dobj->linear) { /* Single contiguous physical region - no struct page */ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) goto free_sgt; sg_dma_address(sgt->sgl) = dobj->dev_addr; sg_dma_len(sgt->sgl) = dobj->obj.size; } else { goto free_sgt; } return sgt; release: for_each_sg(sgt->sgl, sg, num, i) put_page(sg_page(sg)); free_table: sg_free_table(sgt); free_sgt: kfree(sgt); return NULL; } static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { struct drm_gem_object *obj = attach->dmabuf->priv; struct armada_gem_object *dobj = drm_to_armada_gem(obj); int i; if (!dobj->linear) dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); if (dobj->obj.filp) { struct scatterlist *sg; for_each_sg(sgt->sgl, sg, sgt->nents, i) put_page(sg_page(sg)); } sg_free_table(sgt); kfree(sgt); } static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n) { return NULL; } static void armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr) { } static int armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma) { return -EINVAL; } static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = { .map_dma_buf = armada_gem_prime_map_dma_buf, .unmap_dma_buf = armada_gem_prime_unmap_dma_buf, .release = drm_gem_dmabuf_release, .kmap_atomic = armada_gem_dmabuf_no_kmap, .kunmap_atomic = armada_gem_dmabuf_no_kunmap, .kmap = armada_gem_dmabuf_no_kmap, .kunmap = armada_gem_dmabuf_no_kunmap, .mmap = armada_gem_dmabuf_mmap, }; struct dma_buf * armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &armada_gem_prime_dmabuf_ops; exp_info.size = obj->size; exp_info.flags = O_RDWR; exp_info.priv = obj; return drm_gem_dmabuf_export(dev, &exp_info); } struct drm_gem_object * armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) { struct dma_buf_attachment *attach; struct armada_gem_object *dobj; if (buf->ops == &armada_gem_prime_dmabuf_ops) { struct drm_gem_object *obj = buf->priv; if (obj->dev == dev) { /* * Importing our own dmabuf(s) increases the * refcount on the gem object itself. */ drm_gem_object_reference(obj); return obj; } } attach = dma_buf_attach(buf, dev->dev); if (IS_ERR(attach)) return ERR_CAST(attach); dobj = armada_gem_alloc_private_object(dev, buf->size); if (!dobj) { dma_buf_detach(buf, attach); return ERR_PTR(-ENOMEM); } dobj->obj.import_attach = attach; get_dma_buf(buf); /* * Don't call dma_buf_map_attachment() here - it maps the * scatterlist immediately for DMA, and this is not always * an appropriate thing to do. */ return &dobj->obj; } int armada_gem_map_import(struct armada_gem_object *dobj) { int ret; dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach, DMA_TO_DEVICE); if (IS_ERR(dobj->sgt)) { ret = PTR_ERR(dobj->sgt); dobj->sgt = NULL; DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret); return ret; } if (dobj->sgt->nents > 1) { DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n"); return -EINVAL; } if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) { DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n"); return -EINVAL; } dobj->dev_addr = sg_dma_address(dobj->sgt->sgl); return 0; }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
You can’t perform that action at this time.