diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index a9d32cd69ae9..80076f4dc4b4 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -434,3 +434,18 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, return err; } + +/** + * xe_svm_has_mapping() - SVM has mappings + * @vm: The VM. + * @start: Start address. + * @end: End address. + * + * Check if an address range has SVM mappings. + * + * Return: True if address range has a SVM mapping, False otherwise + */ +bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end) +{ + return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end); +} diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index 87cbda5641bb..35e044e492e0 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -57,6 +57,8 @@ void xe_svm_close(struct xe_vm *vm); int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, struct xe_tile *tile, u64 fault_addr, bool atomic); + +bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end); #else static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range) { @@ -86,6 +88,12 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, { return 0; } + +static inline +bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end) +{ + return false; +} #endif /** diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index d0ed77c80f03..ea56b8379634 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2486,6 +2486,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va); bool skip = xe_vma_is_cpu_addr_mirror(old); + u64 start = xe_vma_start(old), end = xe_vma_end(old); + + if (op->base.remap.prev) + start = op->base.remap.prev->va.addr + + op->base.remap.prev->va.range; + if (op->base.remap.next) + end = op->base.remap.next->va.addr; + + if (xe_vma_is_cpu_addr_mirror(old) && + xe_svm_has_mapping(vm, start, end)) + return -EBUSY; op->remap.start = xe_vma_start(old); op->remap.range = xe_vma_size(old); @@ -2567,6 +2578,11 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, case DRM_GPUVA_OP_UNMAP: vma = gpuva_to_vma(op->base.unmap.va); + if (xe_vma_is_cpu_addr_mirror(vma) && + xe_svm_has_mapping(vm, xe_vma_start(vma), + xe_vma_end(vma))) + return -EBUSY; + if (!xe_vma_is_cpu_addr_mirror(vma)) xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break;