Skip to content

Commit

Permalink
drm/i915/gem: Calculate object page offset for partial memory mapping
Browse files Browse the repository at this point in the history
To enable partial memory mapping of GPU virtual memory, it's
necessary to introduce an offset to the object's memory
(obj->mm.pages) scatterlist. This adjustment compensates for
instances when userspace mappings do not start from the beginning
of the object.

Based on a patch by Chris Wilson.

Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Cc: Chris Wilson <chris.p.wilson@linux.intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240807100521.478266-3-andi.shyti@linux.intel.com
  • Loading branch information
Andi Shyti committed Aug 21, 2024
1 parent 609d8b1 commit 255fc17
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 3 deletions.
4 changes: 3 additions & 1 deletion drivers/gpu/drm/i915/gem/i915_gem_mman.c
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
unsigned long obj_offset;
resource_size_t iomap;
int err;

Expand All @@ -273,10 +274,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
iomap -= obj->mm.region->region.start;
}

obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
/* PTEs are revoked in obj->ops->put_pages() */
err = remap_io_sg(area,
area->vm_start, area->vm_end - area->vm_start,
obj->mm.pages->sgl, iomap);
obj->mm.pages->sgl, obj_offset, iomap);

if (area->vm_flags & VM_WRITE) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
Expand Down
12 changes: 11 additions & 1 deletion drivers/gpu/drm/i915/i915_mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,13 +122,15 @@ int remap_io_mapping(struct vm_area_struct *vma,
* @addr: target user address to start at
* @size: size of map area
* @sgl: Start sg entry
* @offset: offset from the start of the page
* @iobase: Use stored dma address offset by this address or pfn if -1
*
* Note: this is only safe if the mm semaphore is held when called.
*/
int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
struct scatterlist *sgl, unsigned long offset,
resource_size_t iobase)
{
struct remap_pfn r = {
.mm = vma->vm_mm,
Expand All @@ -141,6 +143,14 @@ int remap_io_sg(struct vm_area_struct *vma,
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);

while (offset >= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT) {
offset -= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT;
r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase));
if (!r.sgt.sgp)
return -EINVAL;
}
r.sgt.curr = offset << PAGE_SHIFT;

if (!use_dma(iobase))
flush_cache_range(vma, addr, size);

Expand Down
3 changes: 2 additions & 1 deletion drivers/gpu/drm/i915/i915_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ int remap_io_mapping(struct vm_area_struct *vma,

int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase);
struct scatterlist *sgl, unsigned long offset,
resource_size_t iobase);

#endif /* __I915_MM_H__ */

0 comments on commit 255fc17

Please sign in to comment.