Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
3216c6b
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
amd
/
amdgpu
/
amdgpu_prime.c
Blame
Blame
Latest commit
History
History
294 lines (244 loc) · 7.81 KB
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
amd
/
amdgpu
/
amdgpu_prime.c
Top
File metadata and controls
Code
Blame
294 lines (244 loc) · 7.81 KB
Raw
/* * Copyright 2012 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * based on nouveau_prime.c * * Authors: Alex Deucher */ #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_display.h" #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> static const struct dma_buf_ops amdgpu_dmabuf_ops; struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); int npages = bo->tbo.num_pages; return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); } void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); int ret; ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->dma_buf_vmap); if (ret) return ERR_PTR(ret); return bo->dma_buf_vmap.virtual; } void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); ttm_bo_kunmap(&bo->dma_buf_vmap); } int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); unsigned asize = amdgpu_bo_size(bo); int ret; if (!vma->vm_file) return -ENODEV; if (adev == NULL) return -ENODEV; /* Check for valid size. */ if (asize < vma->vm_end - vma->vm_start) return -EINVAL; if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { return -EPERM; } vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; /* prime mmap does not need to check access, so allow here */ ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); if (ret) return ret; ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); return ret; } struct drm_gem_object * amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { struct reservation_object *resv = attach->dmabuf->resv; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_bo *bo; struct amdgpu_bo_param bp; int ret; memset(&bp, 0, sizeof(bp)); bp.size = attach->dmabuf->size; bp.byte_align = PAGE_SIZE; bp.domain = AMDGPU_GEM_DOMAIN_CPU; bp.flags = 0; bp.type = ttm_bo_type_sg; bp.resv = resv; ww_mutex_lock(&resv->lock, NULL); ret = amdgpu_bo_create(adev, &bp, &bo); if (ret) goto error; bo->tbo.sg = sg; bo->tbo.ttm->sg = sg; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) bo->prime_shared_count = 1; ww_mutex_unlock(&resv->lock); return &bo->gem_base; error: ww_mutex_unlock(&resv->lock); return ERR_PTR(ret); } static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); long r; r = drm_gem_map_attach(dma_buf, target_dev, attach); if (r) return r; r = amdgpu_bo_reserve(bo, false); if (unlikely(r != 0)) goto error_detach; if (attach->dev->driver != adev->dev->driver) { /* * Wait for all shared fences to complete before we switch to future * use of exclusive fence on this prime shared bo. */ r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, MAX_SCHEDULE_TIMEOUT); if (unlikely(r < 0)) { DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); goto error_unreserve; } } /* pin buffer into GTT */ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); if (r) goto error_unreserve; if (attach->dev->driver != adev->dev->driver) bo->prime_shared_count++; error_unreserve: amdgpu_bo_unreserve(bo); error_detach: if (r) drm_gem_map_detach(dma_buf, attach); return r; } static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int ret = 0; ret = amdgpu_bo_reserve(bo, true); if (unlikely(ret != 0)) goto error; amdgpu_bo_unpin(bo); if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) bo->prime_shared_count--; amdgpu_bo_unreserve(bo); error: drm_gem_map_detach(dma_buf, attach); } struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); return bo->tbo.resv; } static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_operation_ctx ctx = { true, false }; u32 domain = amdgpu_display_framebuffer_domains(adev); int ret; bool reads = (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE); if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) return 0; /* move to gtt */ ret = amdgpu_bo_reserve(bo, false); if (unlikely(ret != 0)) return ret; if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); } amdgpu_bo_unreserve(bo); return ret; } static const struct dma_buf_ops amdgpu_dmabuf_ops = { .attach = amdgpu_gem_map_attach, .detach = amdgpu_gem_map_detach, .map_dma_buf = drm_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, .begin_cpu_access = amdgpu_gem_begin_cpu_access, .map = drm_gem_dmabuf_kmap, .map_atomic = drm_gem_dmabuf_kmap_atomic, .unmap = drm_gem_dmabuf_kunmap, .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, }; struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gobj, int flags) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct dma_buf *buf; if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) return ERR_PTR(-EPERM); buf = drm_gem_prime_export(dev, gobj, flags); if (!IS_ERR(buf)) { buf->file->f_mapping = dev->anon_inode->i_mapping; buf->ops = &amdgpu_dmabuf_ops; } return buf; } struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { struct drm_gem_object *obj; if (dma_buf->ops == &amdgpu_dmabuf_ops) { obj = dma_buf->priv; if (obj->dev == dev) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_get(obj); return obj; } } return drm_gem_prime_import(dev, dma_buf); }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
You can’t perform that action at this time.