-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
drm/xe: Introduce helper to populate userptr
Introduce a helper function xe_userptr_populate_range to populate a userptr range. This functions calls hmm_range_fault to read CPU page tables and populate all pfns/pages of this virtual address range. For system memory page, dma-mapping is performed to get a dma-address which can be used later for GPU to access pages. v1: Address review comments: separate a npage_in_range function (Matt) reparameterize function xe_userptr_populate_range function (Matt) move mmu_interval_read_begin() call into while loop (Thomas) s/mark_range_accessed/xe_mark_range_accessed (Thomas) use set_page_dirty_lock (vs set_page_dirty) (Thomas) move a few checking in xe_vma_userptr_pin_pages to hmm.c (Matt) v2: Remove device private page support. Only support system pages for now. use dma-map-sg rather than dma-map-page (Matt/Thomas) v3: Address review comments: Squash patch "drm/xe: Introduce a helper to free sg table" to current patch (Matt) start and end addresses are already page aligned (Matt) Do mmap_read_lock and mmap_read_unlock for hmm_range_fault incase of non system allocator call. (Matt) Drop kthread_use_mm and kthread_unuse_mm. (Matt) No need of kernel-doc for static functions.(Matt) Modify function names. (Matt) Free sgtable incase of dma_map_sgtable failure.(Matt) Modify loop for hmm_range_fault.(Matt) v4: Remove the dummy function for xe_hmm_userptr_populate_range since CONFIG_HMM_MIRROR is needed. (Matt) Change variable names start/end to userptr_start/userptr_end.(Matt) v5: Remove device private page support info from commit message. Since the patch doesn't support device page handling. (Thomas) Signed-off-by: Oak Zeng <oak.zeng@intel.com> Co-developed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Thomas Hellström <thomas.hellstrom@intel.com> Cc: Brian Welty <brian.welty@intel.com> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240412095237.1048599-2-himal.prasad.ghimiray@intel.com
- Loading branch information
Oak Zeng
authored and
Thomas Hellström
committed
Apr 12, 2024
1 parent
a2f3d73
commit 81e058a
Showing
4 changed files
with
267 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,253 @@ | ||
// SPDX-License-Identifier: MIT | ||
/* | ||
* Copyright © 2024 Intel Corporation | ||
*/ | ||
|
||
#include <linux/scatterlist.h> | ||
#include <linux/mmu_notifier.h> | ||
#include <linux/dma-mapping.h> | ||
#include <linux/memremap.h> | ||
#include <linux/swap.h> | ||
#include <linux/hmm.h> | ||
#include <linux/mm.h> | ||
#include "xe_hmm.h" | ||
#include "xe_vm.h" | ||
#include "xe_bo.h" | ||
|
||
static u64 xe_npages_in_range(unsigned long start, unsigned long end) | ||
{ | ||
return (end - start) >> PAGE_SHIFT; | ||
} | ||
|
||
/* | ||
* xe_mark_range_accessed() - mark a range is accessed, so core mm | ||
* have such information for memory eviction or write back to | ||
* hard disk | ||
* | ||
* @range: the range to mark | ||
* @write: if write to this range, we mark pages in this range | ||
* as dirty | ||
*/ | ||
static void xe_mark_range_accessed(struct hmm_range *range, bool write) | ||
{ | ||
struct page *page; | ||
u64 i, npages; | ||
|
||
npages = xe_npages_in_range(range->start, range->end); | ||
for (i = 0; i < npages; i++) { | ||
page = hmm_pfn_to_page(range->hmm_pfns[i]); | ||
if (write) | ||
set_page_dirty_lock(page); | ||
|
||
mark_page_accessed(page); | ||
} | ||
} | ||
|
||
/* | ||
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn | ||
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table | ||
* and will be used to program GPU page table later. | ||
* | ||
* @xe: the xe device who will access the dma-address in sg table | ||
* @range: the hmm range that we build the sg table from. range->hmm_pfns[] | ||
* has the pfn numbers of pages that back up this hmm address range. | ||
* @st: pointer to the sg table. | ||
* @write: whether we write to this range. This decides dma map direction | ||
* for system pages. If write we map it bi-diretional; otherwise | ||
* DMA_TO_DEVICE | ||
* | ||
* All the contiguous pfns will be collapsed into one entry in | ||
* the scatter gather table. This is for the purpose of efficiently | ||
* programming GPU page table. | ||
* | ||
* The dma_address in the sg table will later be used by GPU to | ||
* access memory. So if the memory is system memory, we need to | ||
* do a dma-mapping so it can be accessed by GPU/DMA. | ||
* | ||
* FIXME: This function currently only support pages in system | ||
* memory. If the memory is GPU local memory (of the GPU who | ||
* is going to access memory), we need gpu dpa (device physical | ||
* address), and there is no need of dma-mapping. This is TBD. | ||
* | ||
* FIXME: dma-mapping for peer gpu device to access remote gpu's | ||
* memory. Add this when you support p2p | ||
* | ||
* This function allocates the storage of the sg table. It is | ||
* caller's responsibility to free it calling sg_free_table. | ||
* | ||
* Returns 0 if successful; -ENOMEM if fails to allocate memory | ||
*/ | ||
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, | ||
struct sg_table *st, bool write) | ||
{ | ||
struct device *dev = xe->drm.dev; | ||
struct page **pages; | ||
u64 i, npages; | ||
int ret; | ||
|
||
npages = xe_npages_in_range(range->start, range->end); | ||
pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL); | ||
if (!pages) | ||
return -ENOMEM; | ||
|
||
for (i = 0; i < npages; i++) { | ||
pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); | ||
xe_assert(xe, !is_device_private_page(pages[i])); | ||
} | ||
|
||
ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT, | ||
xe_sg_segment_size(dev), GFP_KERNEL); | ||
if (ret) | ||
goto free_pages; | ||
|
||
ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, | ||
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); | ||
if (ret) { | ||
sg_free_table(st); | ||
st = NULL; | ||
} | ||
|
||
free_pages: | ||
kvfree(pages); | ||
return ret; | ||
} | ||
|
||
/* | ||
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr | ||
* | ||
* @uvma: the userptr vma which hold the scatter gather table | ||
* | ||
* With function xe_userptr_populate_range, we allocate storage of | ||
* the userptr sg table. This is a helper function to free this | ||
* sg table, and dma unmap the address in the table. | ||
*/ | ||
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma) | ||
{ | ||
struct xe_userptr *userptr = &uvma->userptr; | ||
struct xe_vma *vma = &uvma->vma; | ||
bool write = !xe_vma_read_only(vma); | ||
struct xe_vm *vm = xe_vma_vm(vma); | ||
struct xe_device *xe = vm->xe; | ||
struct device *dev = xe->drm.dev; | ||
|
||
xe_assert(xe, userptr->sg); | ||
dma_unmap_sgtable(dev, userptr->sg, | ||
write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); | ||
|
||
sg_free_table(userptr->sg); | ||
userptr->sg = NULL; | ||
} | ||
|
||
/** | ||
* xe_hmm_userptr_populate_range() - Populate physical pages of a virtual | ||
* address range | ||
* | ||
* @uvma: userptr vma which has information of the range to populate. | ||
* @is_mm_mmap_locked: True if mmap_read_lock is already acquired by caller. | ||
* | ||
* This function populate the physical pages of a virtual | ||
* address range. The populated physical pages is saved in | ||
* userptr's sg table. It is similar to get_user_pages but call | ||
* hmm_range_fault. | ||
* | ||
* This function also read mmu notifier sequence # ( | ||
* mmu_interval_read_begin), for the purpose of later | ||
* comparison (through mmu_interval_read_retry). | ||
* | ||
* This must be called with mmap read or write lock held. | ||
* | ||
* This function allocates the storage of the userptr sg table. | ||
* It is caller's responsibility to free it calling sg_free_table. | ||
* | ||
* returns: 0 for succuss; negative error no on failure | ||
*/ | ||
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, | ||
bool is_mm_mmap_locked) | ||
{ | ||
unsigned long timeout = | ||
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); | ||
unsigned long *pfns, flags = HMM_PFN_REQ_FAULT; | ||
struct xe_userptr *userptr; | ||
struct xe_vma *vma = &uvma->vma; | ||
u64 userptr_start = xe_vma_userptr(vma); | ||
u64 userptr_end = userptr_start + xe_vma_size(vma); | ||
struct xe_vm *vm = xe_vma_vm(vma); | ||
struct hmm_range hmm_range; | ||
bool write = !xe_vma_read_only(vma); | ||
unsigned long notifier_seq; | ||
u64 npages; | ||
int ret; | ||
|
||
userptr = &uvma->userptr; | ||
|
||
if (is_mm_mmap_locked) | ||
mmap_assert_locked(userptr->notifier.mm); | ||
|
||
if (vma->gpuva.flags & XE_VMA_DESTROYED) | ||
return 0; | ||
|
||
notifier_seq = mmu_interval_read_begin(&userptr->notifier); | ||
if (notifier_seq == userptr->notifier_seq) | ||
return 0; | ||
|
||
if (userptr->sg) | ||
xe_hmm_userptr_free_sg(uvma); | ||
|
||
npages = xe_npages_in_range(userptr_start, userptr_end); | ||
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); | ||
if (unlikely(!pfns)) | ||
return -ENOMEM; | ||
|
||
if (write) | ||
flags |= HMM_PFN_REQ_WRITE; | ||
|
||
if (!mmget_not_zero(userptr->notifier.mm)) { | ||
ret = -EFAULT; | ||
goto free_pfns; | ||
} | ||
|
||
hmm_range.default_flags = flags; | ||
hmm_range.hmm_pfns = pfns; | ||
hmm_range.notifier = &userptr->notifier; | ||
hmm_range.start = userptr_start; | ||
hmm_range.end = userptr_end; | ||
hmm_range.dev_private_owner = vm->xe; | ||
|
||
while (true) { | ||
hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier); | ||
|
||
if (!is_mm_mmap_locked) | ||
mmap_read_lock(userptr->notifier.mm); | ||
|
||
ret = hmm_range_fault(&hmm_range); | ||
|
||
if (!is_mm_mmap_locked) | ||
mmap_read_unlock(userptr->notifier.mm); | ||
|
||
if (ret == -EBUSY) { | ||
if (time_after(jiffies, timeout)) | ||
break; | ||
|
||
continue; | ||
} | ||
break; | ||
} | ||
|
||
mmput(userptr->notifier.mm); | ||
|
||
if (ret) | ||
goto free_pfns; | ||
|
||
ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write); | ||
if (ret) | ||
goto free_pfns; | ||
|
||
xe_mark_range_accessed(&hmm_range, write); | ||
userptr->sg = &userptr->sgt; | ||
userptr->notifier_seq = hmm_range.notifier_seq; | ||
|
||
free_pfns: | ||
kvfree(pfns); | ||
return ret; | ||
} | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
/* SPDX-License-Identifier: MIT | ||
* | ||
* Copyright © 2024 Intel Corporation | ||
*/ | ||
|
||
#include <linux/types.h> | ||
|
||
struct xe_userptr_vma; | ||
|
||
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked); | ||
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma); |